diff --git a/Cargo.lock b/Cargo.lock index a7372450a..fbfadacdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1082,6 +1082,7 @@ dependencies = [ "profiles", "score", "serde", + "sketching", "tikv-jemallocator", "tokio", "toml", @@ -2107,6 +2108,7 @@ dependencies = [ "serde", "serde_cbor", "serde_json", + "sketching", "smartstring", "smolset", "sshkeys", @@ -2117,8 +2119,6 @@ dependencies = [ "toml", "touch", "tracing", - "tracing-serde", - "tracing-subscriber", "url", "urlencoding", "users", @@ -2212,11 +2212,11 @@ dependencies = [ "score", "serde", "serde_json", + "sketching", "tokio", "tokio-util", "toml", "tracing", - "tracing-subscriber", "users", ] @@ -3398,6 +3398,7 @@ dependencies = [ "reqwest", "serde", "serde_json", + "sketching", "tide", "tide-compress", "tide-openssl", @@ -3405,7 +3406,6 @@ dependencies = [ "tokio-openssl", "tokio-util", "tracing", - "tracing-subscriber", "url", "uuid 1.1.2", "webauthn-authenticator-rs", @@ -3649,6 +3649,18 @@ dependencies = [ "event-listener", ] +[[package]] +name = "sketching" +version = "0.1.0" +dependencies = [ + "async-trait", + "num_enum", + "tide", + "tracing", + "tracing-forest", + "tracing-subscriber", +] + [[package]] name = "slab" version = "0.4.7" @@ -4185,6 +4197,19 @@ dependencies = [ "valuable", ] +[[package]] +name = "tracing-forest" +version = "0.1.4" +source = "git+https://github.com/Firstyear/tracing-forest.git?rev=18d242a4dde060c4946ade0a2c4d5be1df048aea#18d242a4dde060c4946ade0a2c4d5be1df048aea" +dependencies = [ + "smallvec", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", + "uuid 1.1.2", +] + [[package]] name = "tracing-log" version = "0.1.3" @@ -4196,16 +4221,6 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" version = "0.3.15" diff --git a/Cargo.toml b/Cargo.toml index 24d0a5f3f..b6f8d022c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,6 +17,7 @@ members = [ "kanidmd/score", "orca", "profiles", + "sketching", ] exclude = [ diff --git a/kanidm_unix_int/Cargo.toml b/kanidm_unix_int/Cargo.toml index c8acf0ab4..60555664d 100644 --- a/kanidm_unix_int/Cargo.toml +++ b/kanidm_unix_int/Cargo.toml @@ -48,7 +48,7 @@ kanidm_proto = { path = "../kanidm_proto" } kanidm = { path = "../kanidmd/idm" } tracing = "^0.1.35" -tracing-subscriber = "^0.3.14" +sketching = { path = "../sketching" } toml = "^0.5.9" rpassword = "^7.0.0" diff --git a/kanidm_unix_int/src/cache_clear.rs b/kanidm_unix_int/src/cache_clear.rs index 37c83714e..fcf9bf248 100644 --- a/kanidm_unix_int/src/cache_clear.rs +++ b/kanidm_unix_int/src/cache_clear.rs @@ -30,7 +30,7 @@ async fn main() { if opt.debug { ::std::env::set_var("RUST_LOG", "kanidm=debug,kanidm_client=debug"); } - tracing_subscriber::fmt::init(); + sketching::tracing_subscriber::fmt::init(); debug!("Starting cache invalidate tool ..."); diff --git a/kanidm_unix_int/src/cache_invalidate.rs b/kanidm_unix_int/src/cache_invalidate.rs index c9b0b55c8..cd950d2b3 100644 --- a/kanidm_unix_int/src/cache_invalidate.rs +++ b/kanidm_unix_int/src/cache_invalidate.rs @@ -30,7 +30,7 @@ async fn main() { if opt.debug { ::std::env::set_var("RUST_LOG", "kanidm=debug,kanidm_client=debug"); } - tracing_subscriber::fmt::init(); + sketching::tracing_subscriber::fmt::init(); debug!("Starting cache invalidate tool ..."); diff --git a/kanidm_unix_int/src/daemon.rs b/kanidm_unix_int/src/daemon.rs index 8650971a3..6a9bc05e3 100644 --- a/kanidm_unix_int/src/daemon.rs +++ b/kanidm_unix_int/src/daemon.rs @@ -10,9 +10,6 @@ #![deny(clippy::needless_pass_by_value)] #![deny(clippy::trivially_copy_pass_by_ref)] -#[macro_use] -extern crate tracing; - use users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid}; use std::fs::metadata; @@ -25,10 +22,12 @@ use bytes::{BufMut, BytesMut}; use futures::SinkExt; use futures::StreamExt; use libc::umask; +use sketching::tracing_forest::{self, traits::*, util::*}; use std::error::Error; use std::io; use std::sync::Arc; use std::time::Duration; + use tokio::net::{UnixListener, UnixStream}; use tokio::sync::mpsc::{channel, Receiver, Sender}; use tokio::sync::oneshot; @@ -380,302 +379,313 @@ async fn main() { std::process::exit(1); } - tracing_subscriber::fmt::init(); + tracing_forest::worker_task() + .set_global(true) + // Fall back to stderr + .map_sender(|sender| sender.or_stderr()) + .build_on(|subscriber| subscriber + .with(EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new("info")) + .expect("Failed to init envfilter") + ) + ) + .on(async { + debug!("Profile -> {}", env!("KANIDM_PROFILE_NAME")); + debug!("CPU Flags -> {}", env!("KANIDM_CPU_FLAGS")); - debug!("Profile -> {}", env!("KANIDM_PROFILE_NAME")); - debug!("CPU Flags -> {}", env!("KANIDM_CPU_FLAGS")); - - let cfg_path = Path::new("/etc/kanidm/config"); - let cfg_path_str = match cfg_path.to_str() { - Some(cps) => cps, - None => { - error!("Unable to turn cfg_path to str"); - std::process::exit(1); - } - }; - if !cfg_path.exists() { - // there's no point trying to start up if we can't read a usable config! - error!( - "Client config missing from {} - cannot start up. Quitting.", - cfg_path_str - ); - std::process::exit(1); - } - - if cfg_path.exists() { - let cfg_meta = match metadata(&cfg_path) { - Ok(v) => v, - Err(e) => { - error!("Unable to read metadata for {} - {:?}", cfg_path_str, e); - std::process::exit(1); - } - }; - if !file_permissions_readonly(&cfg_meta) { - warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", - cfg_path_str - ); - } - - if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid { - warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...", - cfg_path_str - ); - } - } - - let unixd_path = Path::new(DEFAULT_CONFIG_PATH); - let unixd_path_str = match unixd_path.to_str() { - Some(cps) => cps, - None => { - error!("Unable to turn unixd_path to str"); - std::process::exit(1); - } - }; - if !unixd_path.exists() { - // there's no point trying to start up if we can't read a usable config! - error!( - "unixd config missing from {} - cannot start up. Quitting.", - unixd_path_str - ); - std::process::exit(1); - } else { - let unixd_meta = match metadata(&unixd_path) { - Ok(v) => v, - Err(e) => { - error!("Unable to read metadata for {} - {:?}", unixd_path_str, e); - std::process::exit(1); - } - }; - if !file_permissions_readonly(&unixd_meta) { - warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", - unixd_path_str); - } - - if unixd_meta.uid() == cuid || unixd_meta.uid() == ceuid { - warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...", - unixd_path_str - ); - } - } - - // setup - let cb = match KanidmClientBuilder::new().read_options_from_optional_config(cfg_path) { - Ok(v) => v, - Err(_) => { - error!("Failed to parse {}", cfg_path_str); - std::process::exit(1); - } - }; - - let cfg = match KanidmUnixdConfig::new().read_options_from_optional_config(unixd_path) { - Ok(v) => v, - Err(_) => { - error!("Failed to parse {}", unixd_path_str); - std::process::exit(1); - } - }; - - debug!("๐Ÿงน Cleaning up sockets from previous invocations"); - rm_if_exist(cfg.sock_path.as_str()); - rm_if_exist(cfg.task_sock_path.as_str()); - - let cb = cb.connect_timeout(cfg.conn_timeout); - - let rsclient = match cb.build() { - Ok(rsc) => rsc, - Err(_e) => { - error!("Failed to build async client"); - std::process::exit(1); - } - }; - - // Check the pb path will be okay. - if cfg.db_path != "" { - let db_path = PathBuf::from(cfg.db_path.as_str()); - // We only need to check the parent folder path permissions as the db itself may not exist yet. - if let Some(db_parent_path) = db_path.parent() { - if !db_parent_path.exists() { - error!( - "Refusing to run, DB folder {} does not exist", - db_parent_path - .to_str() - .unwrap_or_else(|| "") - ); - std::process::exit(1); - } - - let db_par_path_buf = db_parent_path.to_path_buf(); - - let i_meta = match metadata(&db_par_path_buf) { - Ok(v) => v, - Err(e) => { - error!( - "Unable to read metadata for {} - {:?}", - db_par_path_buf - .to_str() - .unwrap_or_else(|| ""), - e - ); + let cfg_path = Path::new("/etc/kanidm/config"); + let cfg_path_str = match cfg_path.to_str() { + Some(cps) => cps, + None => { + error!("Unable to turn cfg_path to str"); std::process::exit(1); } }; - - if !i_meta.is_dir() { + if !cfg_path.exists() { + // there's no point trying to start up if we can't read a usable config! error!( - "Refusing to run - DB folder {} may not be a directory", - db_par_path_buf - .to_str() - .unwrap_or_else(|| "") + "Client config missing from {} - cannot start up. Quitting.", + cfg_path_str ); std::process::exit(1); } - if !file_permissions_readonly(&i_meta) { - warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str() - .unwrap_or_else(|| "") - ); - } - if i_meta.mode() & 0o007 != 0 { - warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str() - .unwrap_or_else(|| "") - ); - } - } - - // check to see if the db's already there - if db_path.exists() { - if !db_path.is_file() { - error!( - "Refusing to run - DB path {} already exists and is not a file.", - db_path.to_str().unwrap_or_else(|| "") - ); - std::process::exit(1); - }; - - match metadata(&db_path) { - Ok(v) => v, - Err(e) => { - error!( - "Unable to read metadata for {} - {:?}", - db_path.to_str().unwrap_or_else(|| ""), - e - ); - std::process::exit(1); - } - }; - // TODO: permissions dance to enumerate the user's ability to write to the file? ref #456 - r2d2 will happily keep trying to do things without bailing. - }; - } - - let cl_inner = match CacheLayer::new( - cfg.db_path.as_str(), // The sqlite db path - cfg.cache_timeout, - rsclient, - cfg.pam_allowed_login_groups.clone(), - cfg.default_shell.clone(), - cfg.home_prefix.clone(), - cfg.home_attr, - cfg.home_alias, - cfg.uid_attr_map, - cfg.gid_attr_map, - ) - .await - { - Ok(c) => c, - Err(_e) => { - error!("Failed to build cache layer."); - std::process::exit(1); - } - }; - - let cachelayer = Arc::new(cl_inner); - - // Set the umask while we open the path for most clients. - let before = unsafe { umask(0) }; - let listener = match UnixListener::bind(cfg.sock_path.as_str()) { - Ok(l) => l, - Err(_e) => { - error!("Failed to bind unix socket."); - std::process::exit(1); - } - }; - // Setup the root-only socket. Take away all others. - let _ = unsafe { umask(0o0077) }; - let task_listener = match UnixListener::bind(cfg.task_sock_path.as_str()) { - Ok(l) => l, - Err(_e) => { - error!("Failed to bind unix socket."); - std::process::exit(1); - } - }; - - // Undo it. - let _ = unsafe { umask(before) }; - - let (task_channel_tx, mut task_channel_rx) = channel(16); - let task_channel_tx = Arc::new(task_channel_tx); - - let task_channel_tx_cln = task_channel_tx.clone(); - - tokio::spawn(async move { - loop { - match task_listener.accept().await { - Ok((socket, _addr)) => { - // Did it come from root? - if let Ok(ucred) = socket.peer_cred() { - if ucred.uid() == 0 { - // all good! - } else { - // move along. - debug!("Task handler not running as root, ignoring ..."); - continue; - } - } else { - // move along. - debug!("Task handler not running as root, ignoring ..."); - continue; - }; - debug!("A task handler has connected."); - // It did? Great, now we can wait and spin on that one - // client. - if let Err(e) = - handle_task_client(socket, &task_channel_tx, &mut task_channel_rx).await - { - error!("Task client error occured; error = {:?}", e); + if cfg_path.exists() { + let cfg_meta = match metadata(&cfg_path) { + Ok(v) => v, + Err(e) => { + error!("Unable to read metadata for {} - {:?}", cfg_path_str, e); + std::process::exit(1); } - // If they DC we go back to accept. + }; + if !file_permissions_readonly(&cfg_meta) { + warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", + cfg_path_str + ); } - Err(err) => { - error!("Task Accept error -> {:?}", err); + + if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid { + warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...", + cfg_path_str + ); } } - // done - } - }); - // TODO: Setup a task that handles pre-fetching here. + let unixd_path = Path::new(DEFAULT_CONFIG_PATH); + let unixd_path_str = match unixd_path.to_str() { + Some(cps) => cps, + None => { + error!("Unable to turn unixd_path to str"); + std::process::exit(1); + } + }; + if !unixd_path.exists() { + // there's no point trying to start up if we can't read a usable config! + error!( + "unixd config missing from {} - cannot start up. Quitting.", + unixd_path_str + ); + std::process::exit(1); + } else { + let unixd_meta = match metadata(&unixd_path) { + Ok(v) => v, + Err(e) => { + error!("Unable to read metadata for {} - {:?}", unixd_path_str, e); + std::process::exit(1); + } + }; + if !file_permissions_readonly(&unixd_meta) { + warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", + unixd_path_str); + } - let server = async move { - loop { - let tc_tx = task_channel_tx_cln.clone(); - match listener.accept().await { - Ok((socket, _addr)) => { - let cachelayer_ref = cachelayer.clone(); - tokio::spawn(async move { - if let Err(e) = handle_client(socket, cachelayer_ref.clone(), &tc_tx).await - { - error!("an error occured; error = {:?}", e); + if unixd_meta.uid() == cuid || unixd_meta.uid() == ceuid { + warn!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...", + unixd_path_str + ); + } + } + + // setup + let cb = match KanidmClientBuilder::new().read_options_from_optional_config(cfg_path) { + Ok(v) => v, + Err(_) => { + error!("Failed to parse {}", cfg_path_str); + std::process::exit(1); + } + }; + + let cfg = match KanidmUnixdConfig::new().read_options_from_optional_config(unixd_path) { + Ok(v) => v, + Err(_) => { + error!("Failed to parse {}", unixd_path_str); + std::process::exit(1); + } + }; + + debug!("๐Ÿงน Cleaning up sockets from previous invocations"); + rm_if_exist(cfg.sock_path.as_str()); + rm_if_exist(cfg.task_sock_path.as_str()); + + let cb = cb.connect_timeout(cfg.conn_timeout); + + let rsclient = match cb.build() { + Ok(rsc) => rsc, + Err(_e) => { + error!("Failed to build async client"); + std::process::exit(1); + } + }; + + // Check the pb path will be okay. + if cfg.db_path != "" { + let db_path = PathBuf::from(cfg.db_path.as_str()); + // We only need to check the parent folder path permissions as the db itself may not exist yet. + if let Some(db_parent_path) = db_path.parent() { + if !db_parent_path.exists() { + error!( + "Refusing to run, DB folder {} does not exist", + db_parent_path + .to_str() + .unwrap_or_else(|| "") + ); + std::process::exit(1); + } + + let db_par_path_buf = db_parent_path.to_path_buf(); + + let i_meta = match metadata(&db_par_path_buf) { + Ok(v) => v, + Err(e) => { + error!( + "Unable to read metadata for {} - {:?}", + db_par_path_buf + .to_str() + .unwrap_or_else(|| ""), + e + ); + std::process::exit(1); } - }); - } - Err(err) => { - error!("Accept error -> {:?}", err); + }; + + if !i_meta.is_dir() { + error!( + "Refusing to run - DB folder {} may not be a directory", + db_par_path_buf + .to_str() + .unwrap_or_else(|| "") + ); + std::process::exit(1); + } + if !file_permissions_readonly(&i_meta) { + warn!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str() + .unwrap_or_else(|| "") + ); + } + + if i_meta.mode() & 0o007 != 0 { + warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str() + .unwrap_or_else(|| "") + ); + } } + + // check to see if the db's already there + if db_path.exists() { + if !db_path.is_file() { + error!( + "Refusing to run - DB path {} already exists and is not a file.", + db_path.to_str().unwrap_or_else(|| "") + ); + std::process::exit(1); + }; + + match metadata(&db_path) { + Ok(v) => v, + Err(e) => { + error!( + "Unable to read metadata for {} - {:?}", + db_path.to_str().unwrap_or_else(|| ""), + e + ); + std::process::exit(1); + } + }; + // TODO: permissions dance to enumerate the user's ability to write to the file? ref #456 - r2d2 will happily keep trying to do things without bailing. + }; } - } - }; - info!("Server started ..."); + let cl_inner = match CacheLayer::new( + cfg.db_path.as_str(), // The sqlite db path + cfg.cache_timeout, + rsclient, + cfg.pam_allowed_login_groups.clone(), + cfg.default_shell.clone(), + cfg.home_prefix.clone(), + cfg.home_attr, + cfg.home_alias, + cfg.uid_attr_map, + cfg.gid_attr_map, + ) + .await + { + Ok(c) => c, + Err(_e) => { + error!("Failed to build cache layer."); + std::process::exit(1); + } + }; - server.await; + let cachelayer = Arc::new(cl_inner); + + // Set the umask while we open the path for most clients. + let before = unsafe { umask(0) }; + let listener = match UnixListener::bind(cfg.sock_path.as_str()) { + Ok(l) => l, + Err(_e) => { + error!("Failed to bind unix socket."); + std::process::exit(1); + } + }; + // Setup the root-only socket. Take away all others. + let _ = unsafe { umask(0o0077) }; + let task_listener = match UnixListener::bind(cfg.task_sock_path.as_str()) { + Ok(l) => l, + Err(_e) => { + error!("Failed to bind unix socket."); + std::process::exit(1); + } + }; + + // Undo it. + let _ = unsafe { umask(before) }; + + let (task_channel_tx, mut task_channel_rx) = channel(16); + let task_channel_tx = Arc::new(task_channel_tx); + + let task_channel_tx_cln = task_channel_tx.clone(); + + tokio::spawn(async move { + loop { + match task_listener.accept().await { + Ok((socket, _addr)) => { + // Did it come from root? + if let Ok(ucred) = socket.peer_cred() { + if ucred.uid() == 0 { + // all good! + } else { + // move along. + debug!("Task handler not running as root, ignoring ..."); + continue; + } + } else { + // move along. + debug!("Task handler not running as root, ignoring ..."); + continue; + }; + debug!("A task handler has connected."); + // It did? Great, now we can wait and spin on that one + // client. + if let Err(e) = + handle_task_client(socket, &task_channel_tx, &mut task_channel_rx).await + { + error!("Task client error occured; error = {:?}", e); + } + // If they DC we go back to accept. + } + Err(err) => { + error!("Task Accept error -> {:?}", err); + } + } + // done + } + }); + + // TODO: Setup a task that handles pre-fetching here. + + let server = async move { + loop { + let tc_tx = task_channel_tx_cln.clone(); + match listener.accept().await { + Ok((socket, _addr)) => { + let cachelayer_ref = cachelayer.clone(); + tokio::spawn(async move { + if let Err(e) = handle_client(socket, cachelayer_ref.clone(), &tc_tx).await + { + error!("an error occured; error = {:?}", e); + } + }); + } + Err(err) => { + error!("Accept error -> {:?}", err); + } + } + } + }; + + info!("Server started ..."); + + server.await; + }) + .await; } diff --git a/kanidm_unix_int/src/daemon_status.rs b/kanidm_unix_int/src/daemon_status.rs index 8e0699d57..a7a69b48a 100644 --- a/kanidm_unix_int/src/daemon_status.rs +++ b/kanidm_unix_int/src/daemon_status.rs @@ -31,7 +31,7 @@ fn main() { if opt.debug { ::std::env::set_var("RUST_LOG", "kanidm=debug,kanidm_client=debug"); } - tracing_subscriber::fmt::init(); + sketching::tracing_subscriber::fmt::init(); trace!("Starting cache status tool ..."); diff --git a/kanidm_unix_int/src/db.rs b/kanidm_unix_int/src/db.rs index e2de56e8a..b4744c5f8 100644 --- a/kanidm_unix_int/src/db.rs +++ b/kanidm_unix_int/src/db.rs @@ -724,7 +724,7 @@ mod tests { #[tokio::test] async fn test_cache_db_account_basic() { - let _ = tracing_subscriber::fmt::try_init(); + let _ = sketching::test_init(); let db = Db::new("").expect("failed to create."); let dbtxn = db.write().await; assert!(dbtxn.migrate().is_ok()); @@ -808,7 +808,7 @@ mod tests { #[tokio::test] async fn test_cache_db_group_basic() { - let _ = tracing_subscriber::fmt::try_init(); + let _ = sketching::test_init(); let db = Db::new("").expect("failed to create."); let dbtxn = db.write().await; assert!(dbtxn.migrate().is_ok()); @@ -883,7 +883,7 @@ mod tests { #[tokio::test] async fn test_cache_db_account_group_update() { - let _ = tracing_subscriber::fmt::try_init(); + let _ = sketching::test_init(); let db = Db::new("").expect("failed to create."); let dbtxn = db.write().await; assert!(dbtxn.migrate().is_ok()); @@ -951,7 +951,7 @@ mod tests { #[tokio::test] async fn test_cache_db_account_password() { - let _ = tracing_subscriber::fmt::try_init(); + let _ = sketching::test_init(); let db = Db::new("").expect("failed to create."); let dbtxn = db.write().await; assert!(dbtxn.migrate().is_ok()); @@ -1000,7 +1000,7 @@ mod tests { #[tokio::test] async fn test_cache_db_group_rename_duplicate() { - let _ = tracing_subscriber::fmt::try_init(); + let _ = sketching::test_init(); let db = Db::new("").expect("failed to create."); let dbtxn = db.write().await; assert!(dbtxn.migrate().is_ok()); @@ -1055,7 +1055,7 @@ mod tests { #[tokio::test] async fn test_cache_db_account_rename_duplicate() { - let _ = tracing_subscriber::fmt::try_init(); + let _ = sketching::test_init(); let db = Db::new("").expect("failed to create."); let dbtxn = db.write().await; assert!(dbtxn.migrate().is_ok()); diff --git a/kanidm_unix_int/src/ssh_authorizedkeys.rs b/kanidm_unix_int/src/ssh_authorizedkeys.rs index 61975d94e..2e9aba2dd 100644 --- a/kanidm_unix_int/src/ssh_authorizedkeys.rs +++ b/kanidm_unix_int/src/ssh_authorizedkeys.rs @@ -31,7 +31,7 @@ async fn main() { if opt.debug { ::std::env::set_var("RUST_LOG", "kanidm=debug,kanidm_client=debug"); } - tracing_subscriber::fmt::init(); + sketching::tracing_subscriber::fmt::init(); debug!("Starting authorized keys tool ..."); diff --git a/kanidm_unix_int/src/tasks_daemon.rs b/kanidm_unix_int/src/tasks_daemon.rs index 666012cc6..c9adeb3a6 100644 --- a/kanidm_unix_int/src/tasks_daemon.rs +++ b/kanidm_unix_int/src/tasks_daemon.rs @@ -10,9 +10,6 @@ #![deny(clippy::needless_pass_by_value)] #![deny(clippy::trivially_copy_pass_by_ref)] -#[macro_use] -extern crate tracing; - use users::{get_effective_gid, get_effective_uid}; use std::os::unix::fs::symlink; @@ -23,6 +20,7 @@ use std::ffi::CString; use bytes::{BufMut, BytesMut}; use futures::SinkExt; use futures::StreamExt; +use sketching::tracing_forest::{self, traits::*, util::*}; use std::fs; use std::io; use std::path::Path; @@ -213,49 +211,61 @@ async fn main() { std::process::exit(1); } - tracing_subscriber::fmt::init(); - - let unixd_path = Path::new(DEFAULT_CONFIG_PATH); - let unixd_path_str = match unixd_path.to_str() { - Some(cps) => cps, - None => { - error!("Unable to turn unixd_path to str"); - std::process::exit(1); - } - }; - - let cfg = match KanidmUnixdConfig::new().read_options_from_optional_config(unixd_path) { - Ok(v) => v, - Err(_) => { - error!("Failed to parse {}", unixd_path_str); - std::process::exit(1); - } - }; - - let task_sock_path = cfg.task_sock_path.clone(); - debug!("Attempting to use {} ...", task_sock_path); - - let server = async move { - loop { - info!("Attempting to connect to kanidm_unixd ..."); - // Try to connect to the daemon. - match UnixStream::connect(&task_sock_path).await { - // Did we connect? - Ok(stream) => { - info!("Found kanidm_unixd, waiting for tasks ..."); - // Yep! Now let the main handler do it's job. - // If it returns (dc, etc, then we loop and try again). - handle_tasks(stream, &cfg.home_prefix).await; + tracing_forest::worker_task() + .set_global(true) + // Fall back to stderr + .map_sender(|sender| sender.or_stderr()) + .build_on(|subscriber| { + subscriber.with( + EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new("info")) + .expect("Failed to init envfilter"), + ) + }) + .on(async { + let unixd_path = Path::new(DEFAULT_CONFIG_PATH); + let unixd_path_str = match unixd_path.to_str() { + Some(cps) => cps, + None => { + error!("Unable to turn unixd_path to str"); + std::process::exit(1); } - Err(e) => { - error!("Unable to find kanidm_unixd, sleeping ..."); - debug!("\\---> {:?}", e); - // Back off. - time::sleep(Duration::from_millis(5000)).await; - } - } - } - }; + }; - server.await; + let cfg = match KanidmUnixdConfig::new().read_options_from_optional_config(unixd_path) { + Ok(v) => v, + Err(_) => { + error!("Failed to parse {}", unixd_path_str); + std::process::exit(1); + } + }; + + let task_sock_path = cfg.task_sock_path.clone(); + debug!("Attempting to use {} ...", task_sock_path); + + let server = async move { + loop { + info!("Attempting to connect to kanidm_unixd ..."); + // Try to connect to the daemon. + match UnixStream::connect(&task_sock_path).await { + // Did we connect? + Ok(stream) => { + info!("Found kanidm_unixd, waiting for tasks ..."); + // Yep! Now let the main handler do it's job. + // If it returns (dc, etc, then we loop and try again). + handle_tasks(stream, &cfg.home_prefix).await; + } + Err(e) => { + error!("Unable to find kanidm_unixd, sleeping ..."); + debug!("\\---> {:?}", e); + // Back off. + time::sleep(Duration::from_millis(5000)).await; + } + } + } + }; + + server.await; + }) + .await; } diff --git a/kanidm_unix_int/src/test_auth.rs b/kanidm_unix_int/src/test_auth.rs index 216e51bf6..adb84a75f 100644 --- a/kanidm_unix_int/src/test_auth.rs +++ b/kanidm_unix_int/src/test_auth.rs @@ -25,7 +25,7 @@ async fn main() { if opt.debug { ::std::env::set_var("RUST_LOG", "kanidm=debug,kanidm_client=debug"); } - tracing_subscriber::fmt::init(); + sketching::tracing_subscriber::fmt::init(); debug!("Starting pam auth tester tool ..."); diff --git a/kanidm_unix_int/tests/cache_layer_test.rs b/kanidm_unix_int/tests/cache_layer_test.rs index fee15425a..0b1d17cc5 100644 --- a/kanidm_unix_int/tests/cache_layer_test.rs +++ b/kanidm_unix_int/tests/cache_layer_test.rs @@ -7,7 +7,6 @@ use kanidm::audit::LogLevel; use kanidm::config::{Configuration, IntegrationTestConfig, ServerRole}; use score::create_server_core; -use kanidm::tracing_tree; use kanidm_unix_common::cache::{CacheLayer, Id}; use kanidm_unix_common::constants::{ DEFAULT_GID_ATTR_MAP, DEFAULT_HOME_ALIAS, DEFAULT_HOME_ATTR, DEFAULT_HOME_PREFIX, @@ -46,7 +45,7 @@ where } async fn setup_test(fix_fn: Fixture) -> (CacheLayer, KanidmClient) { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let mut counter = 0; let port = loop { diff --git a/kanidmd/daemon/Cargo.toml b/kanidmd/daemon/Cargo.toml index 02b8de4d2..4f325d1cd 100644 --- a/kanidmd/daemon/Cargo.toml +++ b/kanidmd/daemon/Cargo.toml @@ -20,6 +20,8 @@ path = "src/main.rs" kanidm = { path = "../idm" } kanidm_proto = { path = "../../kanidm_proto" } score = { path = "../score" } +sketching = { path = "../../sketching" } + clap = { version = "^3.2", features = ["derive", "env"] } serde = { version = "^1.0.142", features = ["derive"] } tokio = { version = "^1.20.0", features = ["rt-multi-thread", "macros", "signal"] } diff --git a/kanidmd/daemon/run_insecure_dev_server.sh b/kanidmd/daemon/run_insecure_dev_server.sh index db1c8061c..af12e64ec 100755 --- a/kanidmd/daemon/run_insecure_dev_server.sh +++ b/kanidmd/daemon/run_insecure_dev_server.sh @@ -25,4 +25,4 @@ if [ -n "${1}" ]; then fi #shellcheck disable=SC2086 -RUST_LOG=debug cargo run --bin kanidmd -- ${COMMAND} -c "${CONFIG_FILE}" +cargo run --bin kanidmd -- ${COMMAND} -c "${CONFIG_FILE}" diff --git a/kanidmd/daemon/src/main.rs b/kanidmd/daemon/src/main.rs index b72537a05..3905dcd4c 100644 --- a/kanidmd/daemon/src/main.rs +++ b/kanidmd/daemon/src/main.rs @@ -30,9 +30,10 @@ use std::path::Path; use std::path::PathBuf; use std::str::FromStr; +use sketching::tracing_forest::{self, traits::*, util::*}; + use kanidm::audit::LogLevel; use kanidm::config::{Configuration, OnlineBackup, ServerRole}; -use kanidm::tracing_tree; #[cfg(not(target_family = "windows"))] use kanidm::utils::file_permissions_readonly; use score::{ @@ -167,274 +168,286 @@ fn get_user_details_windows() { ); } -#[tokio::main] +#[tokio::main(flavor = "multi_thread")] async fn main() { - tracing_tree::main_init(); + tracing_forest::worker_task() + .set_global(true) + .set_tag(sketching::event_tagger) + // Fall back to stderr + .map_sender(|sender| sender.or_stderr()) + .build_on(|subscriber| subscriber + .with(EnvFilter::try_from_default_env() + .or_else(|_| EnvFilter::try_new("info")) + .expect("Failed to init envfilter") + ) + ) + .on(async { + // Get information on the windows username + #[cfg(target_family = "windows")] + get_user_details_windows(); - // Get information on the windows username - #[cfg(target_family = "windows")] - get_user_details_windows(); + // Get info about who we are. + #[cfg(target_family = "unix")] + let (cuid, ceuid) = get_user_details_unix(); - // Get info about who we are. - #[cfg(target_family = "unix")] - let (cuid, ceuid) = get_user_details_unix(); + // Read cli args, determine if we should backup/restore + let opt = KanidmdParser::parse(); - // Read cli args, determine if we should backup/restore - let opt = KanidmdParser::parse(); - - let mut config = Configuration::new(); - // Check the permissions are OK. - #[cfg(target_family = "unix")] - { - let cfg_meta = read_file_metadata(&(opt.commands.commonopt().config_path)); - - #[cfg(target_family = "unix")] - if !file_permissions_readonly(&cfg_meta) { - eprintln!("WARNING: permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", - opt.commands.commonopt().config_path.to_str().unwrap_or("invalid file path")); - } - - #[cfg(target_family = "unix")] - if cfg_meta.mode() & 0o007 != 0 { - eprintln!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", - opt.commands.commonopt().config_path.to_str().unwrap_or("invalid file path") - ); - } - - #[cfg(target_family = "unix")] - if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid { - eprintln!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...", - opt.commands.commonopt().config_path.to_str().unwrap_or("invalid file path") - ); - } - } - - // Read our config - let sconfig = match ServerConfig::new(&(opt.commands.commonopt().config_path)) { - Ok(c) => c, - Err(e) => { - eprintln!("Config Parse failure {:?}", e); - std::process::exit(1); - } - }; - // Apply the file requirements - let ll = sconfig - .log_level - .map(|ll| match LogLevel::from_str(ll.as_str()) { - Ok(v) => v as u32, - Err(e) => { - eprintln!("{:?}", e); - std::process::exit(1); - } - }); - - // Check the permissions of the files from the configuration. - - let db_path = PathBuf::from(sconfig.db_path.as_str()); - // We can't check the db_path permissions because it may not exist yet! - if let Some(db_parent_path) = db_path.parent() { - if !db_parent_path.exists() { - eprintln!( - "DB folder {} may not exist, server startup may FAIL!", - db_parent_path.to_str().unwrap_or("invalid file path") - ); - } - - let db_par_path_buf = db_parent_path.to_path_buf(); - let i_meta = read_file_metadata(&db_par_path_buf); - if !i_meta.is_dir() { - eprintln!( - "ERROR: Refusing to run - DB folder {} may not be a directory", - db_par_path_buf.to_str().unwrap_or("invalid file path") - ); - std::process::exit(1); - } - - // TODO: windows support for DB folder permissions checks - #[cfg(target_family = "unix")] - { - if !file_permissions_readonly(&i_meta) { - eprintln!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str().unwrap_or("invalid file path")); - } - if i_meta.mode() & 0o007 != 0 { - eprintln!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path")); - } - } - } - - config.update_log_level(ll); - config.update_db_path(&sconfig.db_path.as_str()); - config.update_db_fs_type(&sconfig.db_fs_type); - config.update_origin(&sconfig.origin.as_str()); - config.update_domain(&sconfig.domain.as_str()); - config.update_db_arc_size(sconfig.db_arc_size); - config.update_role(sconfig.role); - config.update_output_mode(opt.commands.commonopt().output_mode.to_owned().into()); - - // Apply any cli overrides, normally debug level. - if let Some(dll) = opt.commands.commonopt().debug.as_ref() { - config.update_log_level(Some(dll.clone() as u32)); - } - - // ::std::env::set_var("RUST_LOG", "tide=info,kanidm=info,webauthn=debug"); - // env_logger::builder() - // .format_timestamp(None) - // .format_level(false) - // .init(); - - match &opt.commands { - KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => { - let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_)); - if config_test { - eprintln!("Running in server configuration test mode ..."); - } else { - eprintln!("Running in server mode ..."); - }; - - // configuration options that only relate to server mode - config.update_tls(&sconfig.tls_chain, &sconfig.tls_key); - config.update_bind(&sconfig.bindaddress); - config.update_ldapbind(&sconfig.ldapbindaddress); - config.update_online_backup(&sconfig.online_backup); - - if let Some(i_str) = &(sconfig.tls_chain) { - let i_path = PathBuf::from(i_str.as_str()); - // TODO: windows support for DB folder permissions checks - #[cfg(not(target_family = "unix"))] - eprintln!("WARNING: permissions checks on windows aren't implemented, cannot check TLS Key at {:?}", i_path); + let mut config = Configuration::new(); + // Check the permissions are OK. + #[cfg(target_family = "unix")] + { + let cfg_meta = read_file_metadata(&(opt.commands.commonopt().config_path)); #[cfg(target_family = "unix")] - { - let i_meta = read_file_metadata(&i_path); - if !file_permissions_readonly(&i_meta) { - eprintln!("WARNING: permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str); - } + if !file_permissions_readonly(&cfg_meta) { + eprintln!("WARNING: permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", + opt.commands.commonopt().config_path.to_str().unwrap_or("invalid file path")); + } + + #[cfg(target_family = "unix")] + if cfg_meta.mode() & 0o007 != 0 { + eprintln!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", + opt.commands.commonopt().config_path.to_str().unwrap_or("invalid file path") + ); + } + + #[cfg(target_family = "unix")] + if cfg_meta.uid() == cuid || cfg_meta.uid() == ceuid { + eprintln!("WARNING: {} owned by the current uid, which may allow file permission changes. This could be a security risk ...", + opt.commands.commonopt().config_path.to_str().unwrap_or("invalid file path") + ); } } - if let Some(i_str) = &(sconfig.tls_key) { - let i_path = PathBuf::from(i_str.as_str()); - // TODO: windows support for DB folder permissions checks - #[cfg(not(target_family = "unix"))] - eprintln!("WARNING: permissions checks on windows aren't implemented, cannot check TLS Key at {:?}", i_path); + // Read our config + let sconfig = match ServerConfig::new(&(opt.commands.commonopt().config_path)) { + Ok(c) => c, + Err(e) => { + eprintln!("Config Parse failure {:?}", e); + std::process::exit(1); + } + }; + // Apply the file requirements + let ll = sconfig + .log_level + .map(|ll| match LogLevel::from_str(ll.as_str()) { + Ok(v) => v as u32, + Err(e) => { + eprintln!("{:?}", e); + std::process::exit(1); + } + }); + + // Check the permissions of the files from the configuration. + + let db_path = PathBuf::from(sconfig.db_path.as_str()); + // We can't check the db_path permissions because it may not exist yet! + if let Some(db_parent_path) = db_path.parent() { + if !db_parent_path.exists() { + eprintln!( + "DB folder {} may not exist, server startup may FAIL!", + db_parent_path.to_str().unwrap_or("invalid file path") + ); + } + + let db_par_path_buf = db_parent_path.to_path_buf(); + let i_meta = read_file_metadata(&db_par_path_buf); + if !i_meta.is_dir() { + eprintln!( + "ERROR: Refusing to run - DB folder {} may not be a directory", + db_par_path_buf.to_str().unwrap_or("invalid file path") + ); + std::process::exit(1); + } // TODO: windows support for DB folder permissions checks #[cfg(target_family = "unix")] { - let i_meta = read_file_metadata(&i_path); - if !file_permissions_readonly(&i_meta) { - eprintln!("WARNING: permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str); + if file_permissions_readonly(&i_meta) { + eprintln!("WARNING: DB folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", db_par_path_buf.to_str().unwrap_or("invalid file path")); } if i_meta.mode() & 0o007 != 0 { - eprintln!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", i_str); + eprintln!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path")); } } } - let sctx = create_server_core(config, config_test).await; - if !config_test { - match sctx { - Ok(_sctx) => match tokio::signal::ctrl_c().await { - Ok(_) => { - eprintln!("Ctrl-C received, shutting down"); + config.update_log_level(ll); + config.update_db_path(&sconfig.db_path.as_str()); + config.update_db_fs_type(&sconfig.db_fs_type); + config.update_origin(&sconfig.origin.as_str()); + config.update_domain(&sconfig.domain.as_str()); + config.update_db_arc_size(sconfig.db_arc_size); + config.update_role(sconfig.role); + config.update_output_mode(opt.commands.commonopt().output_mode.to_owned().into()); + + // Apply any cli overrides, normally debug level. + if let Some(dll) = opt.commands.commonopt().debug.as_ref() { + config.update_log_level(Some(dll.clone() as u32)); + } + + // ::std::env::set_var("RUST_LOG", "tide=info,kanidm=info,webauthn=debug"); + // env_logger::builder() + // .format_timestamp(None) + // .format_level(false) + // .init(); + + match &opt.commands { + KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => { + let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_)); + if config_test { + eprintln!("Running in server configuration test mode ..."); + } else { + eprintln!("Running in server mode ..."); + }; + + // configuration options that only relate to server mode + config.update_tls(&sconfig.tls_chain, &sconfig.tls_key); + config.update_bind(&sconfig.bindaddress); + config.update_ldapbind(&sconfig.ldapbindaddress); + config.update_online_backup(&sconfig.online_backup); + + if let Some(i_str) = &(sconfig.tls_chain) { + let i_path = PathBuf::from(i_str.as_str()); + // TODO: windows support for DB folder permissions checks + #[cfg(not(target_family = "unix"))] + eprintln!("WARNING: permissions checks on windows aren't implemented, cannot check TLS Key at {:?}", i_path); + + #[cfg(target_family = "unix")] + { + let i_meta = read_file_metadata(&i_path); + if !file_permissions_readonly(&i_meta) { + eprintln!("WARNING: permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str); + } } - Err(_) => { - eprintln!("Invalid signal received, shutting down as a precaution ..."); + } + + if let Some(i_str) = &(sconfig.tls_key) { + let i_path = PathBuf::from(i_str.as_str()); + // TODO: windows support for DB folder permissions checks + #[cfg(not(target_family = "unix"))] + eprintln!("WARNING: permissions checks on windows aren't implemented, cannot check TLS Key at {:?}", i_path); + + // TODO: windows support for DB folder permissions checks + #[cfg(target_family = "unix")] + { + let i_meta = read_file_metadata(&i_path); + if !file_permissions_readonly(&i_meta) { + eprintln!("WARNING: permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str); + } + if i_meta.mode() & 0o007 != 0 { + eprintln!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", i_str); + } } - }, - Err(_) => { - eprintln!("Failed to start server core!"); - // We may need to return an exit code here, but that may take some re-architecting - // to ensure we drop everything cleanly. - return; + } + + let sctx = create_server_core(config, config_test).await; + if !config_test { + match sctx { + Ok(_sctx) => match tokio::signal::ctrl_c().await { + Ok(_) => { + eprintln!("Ctrl-C received, shutting down"); + } + Err(_) => { + eprintln!("Invalid signal received, shutting down as a precaution ..."); + } + }, + Err(_) => { + eprintln!("Failed to start server core!"); + // We may need to return an exit code here, but that may take some re-architecting + // to ensure we drop everything cleanly. + return; + } + } + eprintln!("stopped ๐Ÿ›‘ "); } } - eprintln!("stopped ๐Ÿ›‘ "); + KanidmdOpt::Database { + commands: DbCommands::Backup(bopt), + } => { + eprintln!("Running in backup mode ..."); + let p = match bopt.path.to_str() { + Some(p) => p, + None => { + eprintln!("Invalid backup path"); + std::process::exit(1); + } + }; + backup_server_core(&config, p); + } + KanidmdOpt::Database { + commands: DbCommands::Restore(ropt), + } => { + eprintln!("Running in restore mode ..."); + let p = match ropt.path.to_str() { + Some(p) => p, + None => { + eprintln!("Invalid restore path"); + std::process::exit(1); + } + }; + restore_server_core(&config, p); + } + KanidmdOpt::Database { + commands: DbCommands::Verify(_vopt), + } => { + eprintln!("Running in db verification mode ..."); + verify_server_core(&config); + } + KanidmdOpt::RecoverAccount(raopt) => { + eprintln!("Running account recovery ..."); + recover_account_core(&config, &raopt.name); + } + KanidmdOpt::Database { + commands: DbCommands::Reindex(_copt), + } => { + eprintln!("Running in reindex mode ..."); + reindex_server_core(&config); + } + KanidmdOpt::DbScan { + commands: DbScanOpt::ListIndexes(_), + } => { + eprintln!("๐Ÿ‘€ db scan - list indexes"); + dbscan_list_indexes_core(&config); + } + KanidmdOpt::DbScan { + commands: DbScanOpt::ListId2Entry(_), + } => { + eprintln!("๐Ÿ‘€ db scan - list id2entry"); + dbscan_list_id2entry_core(&config); + } + KanidmdOpt::DbScan { + commands: DbScanOpt::ListIndexAnalysis(_), + } => { + eprintln!("๐Ÿ‘€ db scan - list index analysis"); + dbscan_list_index_analysis_core(&config); + } + KanidmdOpt::DbScan { + commands: DbScanOpt::ListIndex(dopt), + } => { + eprintln!("๐Ÿ‘€ db scan - list index content - {}", dopt.index_name); + dbscan_list_index_core(&config, dopt.index_name.as_str()); + } + KanidmdOpt::DbScan { + commands: DbScanOpt::GetId2Entry(dopt), + } => { + eprintln!("๐Ÿ‘€ db scan - get id2 entry - {}", dopt.id); + dbscan_get_id2entry_core(&config, dopt.id); + } + KanidmdOpt::DomainSettings { + commands: DomainSettingsCmds::DomainChange(_dopt), + } => { + eprintln!("Running in domain name change mode ... this may take a long time ..."); + domain_rename_core(&config); + } + KanidmdOpt::Database { + commands: DbCommands::Vacuum(_copt), + } => { + eprintln!("Running in vacuum mode ..."); + vacuum_server_core(&config); + } } - } - KanidmdOpt::Database { - commands: DbCommands::Backup(bopt), - } => { - eprintln!("Running in backup mode ..."); - let p = match bopt.path.to_str() { - Some(p) => p, - None => { - eprintln!("Invalid backup path"); - std::process::exit(1); - } - }; - backup_server_core(&config, p); - } - KanidmdOpt::Database { - commands: DbCommands::Restore(ropt), - } => { - eprintln!("Running in restore mode ..."); - let p = match ropt.path.to_str() { - Some(p) => p, - None => { - eprintln!("Invalid restore path"); - std::process::exit(1); - } - }; - restore_server_core(&config, p); - } - KanidmdOpt::Database { - commands: DbCommands::Verify(_vopt), - } => { - eprintln!("Running in db verification mode ..."); - verify_server_core(&config); - } - KanidmdOpt::RecoverAccount(raopt) => { - eprintln!("Running account recovery ..."); - recover_account_core(&config, &raopt.name); - } - KanidmdOpt::Database { - commands: DbCommands::Reindex(_copt), - } => { - eprintln!("Running in reindex mode ..."); - reindex_server_core(&config); - } - KanidmdOpt::DbScan { - commands: DbScanOpt::ListIndexes(_), - } => { - eprintln!("๐Ÿ‘€ db scan - list indexes"); - dbscan_list_indexes_core(&config); - } - KanidmdOpt::DbScan { - commands: DbScanOpt::ListId2Entry(_), - } => { - eprintln!("๐Ÿ‘€ db scan - list id2entry"); - dbscan_list_id2entry_core(&config); - } - KanidmdOpt::DbScan { - commands: DbScanOpt::ListIndexAnalysis(_), - } => { - eprintln!("๐Ÿ‘€ db scan - list index analysis"); - dbscan_list_index_analysis_core(&config); - } - KanidmdOpt::DbScan { - commands: DbScanOpt::ListIndex(dopt), - } => { - eprintln!("๐Ÿ‘€ db scan - list index content - {}", dopt.index_name); - dbscan_list_index_core(&config, dopt.index_name.as_str()); - } - KanidmdOpt::DbScan { - commands: DbScanOpt::GetId2Entry(dopt), - } => { - eprintln!("๐Ÿ‘€ db scan - get id2 entry - {}", dopt.id); - dbscan_get_id2entry_core(&config, dopt.id); - } - KanidmdOpt::DomainSettings { - commands: DomainSettingsCmds::DomainChange(_dopt), - } => { - eprintln!("Running in domain name change mode ... this may take a long time ..."); - domain_rename_core(&config); - } - KanidmdOpt::Database { - commands: DbCommands::Vacuum(_copt), - } => { - eprintln!("Running in vacuum mode ..."); - vacuum_server_core(&config); - } - } + }) + .await; } diff --git a/kanidmd/idm/Cargo.toml b/kanidmd/idm/Cargo.toml index 818a507ab..774647cb9 100644 --- a/kanidmd/idm/Cargo.toml +++ b/kanidmd/idm/Cargo.toml @@ -54,9 +54,10 @@ tokio = { version = "^1.20.0", features = ["net", "sync", "time"] } tokio-util = { version = "^0.7.3", features = ["codec"] } toml = "^0.5.9" touch = "^0.0.1" -tracing = { version = "^0.1.35", features = ["attributes", "max_level_trace", "release_max_level_debug"] } -tracing-serde = "^0.1.3" -tracing-subscriber = { version = "^0.3.14", features = ["env-filter"] } + +sketching = { path = "../../sketching" } +tracing = { version = "^0.1.35", features = ["attributes"] } + url = { version = "^2.2.2", features = ["serde"] } urlencoding = "2.1.0" uuid = { version = "^1.1.2", features = ["serde", "v4" ] } diff --git a/kanidmd/idm/src/access.rs b/kanidmd/idm/src/access.rs index 09e0e07a1..030117cdb 100644 --- a/kanidmd/idm/src/access.rs +++ b/kanidmd/idm/src/access.rs @@ -2529,7 +2529,7 @@ mod tests { #[test] fn test_access_effective_permission_check_1() { - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); let admin = unsafe { Identity::from_impersonate_entry_ser(JSON_ADMIN_V1) }; @@ -2567,7 +2567,7 @@ mod tests { #[test] fn test_access_effective_permission_check_2() { - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); let admin = unsafe { Identity::from_impersonate_entry_ser(JSON_ADMIN_V1) }; diff --git a/kanidmd/idm/src/be/idl_arc_sqlite.rs b/kanidmd/idm/src/be/idl_arc_sqlite.rs index cab563f29..b7d4ce589 100644 --- a/kanidmd/idm/src/be/idl_arc_sqlite.rs +++ b/kanidmd/idm/src/be/idl_arc_sqlite.rs @@ -707,7 +707,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { self.idl_cache.iter_mut_dirty().for_each(|(k, maybe_idl)| { if let Some(idl) = maybe_idl { if idl.maybe_compress() { - filter_info!(?k, "Compressed idl"); + filter_trace!(?k, "Compressed idl"); } } }) diff --git a/kanidmd/idm/src/be/idl_sqlite.rs b/kanidmd/idm/src/be/idl_sqlite.rs index fd8dfb813..f5c539e16 100644 --- a/kanidmd/idm/src/be/idl_sqlite.rs +++ b/kanidmd/idm/src/be/idl_sqlite.rs @@ -1412,7 +1412,7 @@ mod tests { #[test] fn test_idl_sqlite_verify() { - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); let cfg = BackendConfig::new_test(); let be = IdlSqlite::new(&cfg, false).unwrap(); let be_w = be.write(); diff --git a/kanidmd/idm/src/be/mod.rs b/kanidmd/idm/src/be/mod.rs index 26253b563..0936a9977 100644 --- a/kanidmd/idm/src/be/mod.rs +++ b/kanidmd/idm/src/be/mod.rs @@ -1792,7 +1792,7 @@ mod tests { macro_rules! run_test { ($test_fn:expr) => {{ - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); // This is a demo idxmeta, purely for testing. let idxmeta = vec![ diff --git a/kanidmd/idm/src/filter.rs b/kanidmd/idm/src/filter.rs index df0ccf868..7f8ed02ed 100644 --- a/kanidmd/idm/src/filter.rs +++ b/kanidmd/idm/src/filter.rs @@ -1375,7 +1375,7 @@ mod tests { #[test] fn test_filter_optimise() { - let _ = tracing_subscriber::fmt().try_init(); + let _ = sketching::test_init(); // Given sets of "optimisable" filters, optimise them. filter_optimise_assert!( f_and(vec![f_and(vec![f_eq( diff --git a/kanidmd/idm/src/idm/authsession.rs b/kanidmd/idm/src/idm/authsession.rs index cd252d69b..d53a4941f 100644 --- a/kanidmd/idm/src/idm/authsession.rs +++ b/kanidmd/idm/src/idm/authsession.rs @@ -7,7 +7,6 @@ use crate::idm::account::Account; use crate::idm::delayed::BackupCodeRemoval; use crate::idm::AuthState; use crate::prelude::*; -use crate::tracing_tree; use hashbrown::HashSet; use kanidm_proto::v1::OperationError; use kanidm_proto::v1::{AuthAllowed, AuthCredential, AuthMech, AuthType}; @@ -745,13 +744,16 @@ impl AuthSession { // Can't `unwrap` the uuid until full integration, because some unit tests // call functions that call this indirectly without opening a span first, // and this returns `None` when not in a span (and panics if the tree isn't initialized). - let tracing_id = tracing_tree::operation_id().unwrap_or_else(|| { - admin_warn!("Recoverable - Invalid Tracing Operation ID State"); - Uuid::new_v4() - }); + let session_id = Uuid::new_v4(); + security_info!( + "Starting session {} for {} {}", + session_id, + self.account.spn, + self.account.uuid + ); let uat = self .account - .to_userauthtoken(tracing_id, *time, auth_type) + .to_userauthtoken(session_id, *time, auth_type) .ok_or(OperationError::InvalidState)?; let jwt = Jws { inner: uat }; @@ -840,7 +842,6 @@ mod tests { use crate::idm::delayed::DelayedAction; use crate::idm::AuthState; use crate::prelude::*; - use crate::tracing_tree; use hashbrown::HashSet; pub use std::collections::BTreeSet as Set; @@ -874,7 +875,8 @@ mod tests { #[test] fn test_idm_authsession_anonymous_auth_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); + let webauthn = create_webauthn(); let anon_account = entry_str_to_account!(JSON_ANONYMOUS_V1); @@ -935,7 +937,7 @@ mod tests { #[test] fn test_idm_authsession_simple_password_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let webauthn = create_webauthn(); // create the ent let mut account = entry_str_to_account!(JSON_ADMIN_V1); @@ -988,7 +990,7 @@ mod tests { #[test] fn test_idm_authsession_simple_password_badlist() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let jws_signer = create_jwt_signer(); let webauthn = create_webauthn(); // create the ent @@ -1076,7 +1078,7 @@ mod tests { #[test] fn test_idm_authsession_totp_password_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let webauthn = create_webauthn(); let jws_signer = create_jwt_signer(); // create the ent @@ -1231,7 +1233,7 @@ mod tests { #[test] fn test_idm_authsession_password_mfa_badlist() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let webauthn = create_webauthn(); let jws_signer = create_jwt_signer(); // create the ent @@ -1391,7 +1393,7 @@ mod tests { #[test] fn test_idm_authsession_webauthn_only_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let (async_tx, mut async_rx) = unbounded(); let ts = duration_from_epoch_now(); // create the ent @@ -1522,7 +1524,7 @@ mod tests { #[test] fn test_idm_authsession_webauthn_password_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let (async_tx, mut async_rx) = unbounded(); let ts = duration_from_epoch_now(); // create the ent @@ -1692,7 +1694,7 @@ mod tests { #[test] fn test_idm_authsession_webauthn_password_totp_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let (async_tx, mut async_rx) = unbounded(); let ts = duration_from_epoch_now(); // create the ent @@ -1929,7 +1931,7 @@ mod tests { #[test] fn test_idm_authsession_backup_code_mech() { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let jws_signer = create_jwt_signer(); let webauthn = create_webauthn(); // create the ent diff --git a/kanidmd/idm/src/idm/server.rs b/kanidmd/idm/src/idm/server.rs index c14cc8a6f..0698b64b0 100644 --- a/kanidmd/idm/src/idm/server.rs +++ b/kanidmd/idm/src/idm/server.rs @@ -24,7 +24,6 @@ use crate::idm::unix::{UnixGroup, UnixUserAccount}; use crate::idm::AuthState; use crate::ldap::LdapBoundToken; use crate::prelude::*; -use crate::tracing_tree; use crate::utils::{ backup_code_from_random, password_from_random, readable_password_from_random, uuid_from_duration, Sid, @@ -954,16 +953,19 @@ impl<'a> IdmServerAuthTransaction<'a> { return Ok(None); } - let tracing_id = tracing_tree::operation_id().unwrap_or_else(|| { - admin_warn!("Recoverable - Invalid Tracing Operation ID State"); - Uuid::new_v4() - }); + let session_id = Uuid::new_v4(); + security_info!( + "Starting session {} for {} {}", + session_id, + account.spn, + account.uuid + ); // Account must be anon, so we can gen the uat. Ok(Some(LdapBoundToken { uuid: UUID_ANONYMOUS, effective_uat: account - .to_userauthtoken(tracing_id, ct, AuthType::Anonymous) + .to_userauthtoken(session_id, ct, AuthType::Anonymous) .ok_or(OperationError::InvalidState) .map_err(|e| { admin_error!("Unable to generate effective_uat -> {:?}", e); @@ -1037,16 +1039,19 @@ impl<'a> IdmServerAuthTransaction<'a> { let anon_account = Account::try_from_entry_ro(anon_entry.as_ref(), &mut self.qs_read)?; - let tracing_id = tracing_tree::operation_id().unwrap_or_else(|| { - admin_warn!("Recoverable - Invalid Tracing Operation ID State"); - Uuid::new_v4() - }); + let session_id = Uuid::new_v4(); + security_info!( + "Starting session {} for {} {}", + session_id, + account.spn, + account.uuid + ); Ok(Some(LdapBoundToken { spn: account.spn, uuid: account.uuid, effective_uat: anon_account - .to_userauthtoken(tracing_id, ct, AuthType::UnixPassword) + .to_userauthtoken(session_id, ct, AuthType::UnixPassword) .ok_or(OperationError::InvalidState) .map_err(|e| { admin_error!("Unable to generate effective_uat -> {:?}", e); diff --git a/kanidmd/idm/src/lib.rs b/kanidmd/idm/src/lib.rs index 45084447c..b39cfe641 100644 --- a/kanidmd/idm/src/lib.rs +++ b/kanidmd/idm/src/lib.rs @@ -25,6 +25,9 @@ extern crate tracing; #[macro_use] extern crate lazy_static; +// #[macro_use] +// extern crate sketching; + // This has to be before 'be' so the import order works #[macro_use] pub mod macros; @@ -66,9 +69,6 @@ pub mod prelude { pub use url::Url; pub use uuid::Uuid; - pub use crate::tagged_event; - pub use crate::tracing_tree::EventTag; - pub use crate::constants::*; pub use crate::filter::{ f_and, f_andnot, f_eq, f_id, f_inc, f_lt, f_or, f_pres, f_self, f_spn_name, f_sub, @@ -92,11 +92,10 @@ pub mod prelude { ValueSetSecret, ValueSetSpn, ValueSetSyntax, ValueSetT, ValueSetUint32, ValueSetUtf8, ValueSetUuid, }; - pub use crate::{ + pub use sketching::{ admin_debug, admin_error, admin_info, admin_warn, filter_error, filter_info, filter_trace, filter_warn, perf_trace, request_error, request_info, request_trace, request_warn, - security_access, security_critical, security_error, security_info, spanned, + security_access, security_critical, security_error, security_info, spanned, tagged_event, + EventTag, }; } - -pub mod tracing_tree; diff --git a/kanidmd/idm/src/macros.rs b/kanidmd/idm/src/macros.rs index 552e1d2a7..88d16bccf 100644 --- a/kanidmd/idm/src/macros.rs +++ b/kanidmd/idm/src/macros.rs @@ -1,16 +1,6 @@ macro_rules! setup_test { () => {{ - /* - use env_logger; - ::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug"); - let _ = env_logger::builder() - .format_timestamp(None) - .format_level(false) - .is_test(true) - .try_init(); - */ - - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); // Create an in memory BE let schema_outer = Schema::new().expect("Failed to init schema"); @@ -32,16 +22,7 @@ macro_rules! setup_test { use crate::utils::duration_from_epoch_now; use async_std::task; - /* - use env_logger; - ::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug"); - let _ = env_logger::builder() - .format_timestamp(None) - .format_level(false) - .is_test(true) - .try_init(); - */ - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); // Create an in memory BE let schema_outer = Schema::new().expect("Failed to init schema"); @@ -75,16 +56,7 @@ macro_rules! run_test_no_init { use crate::schema::Schema; use crate::utils::duration_from_epoch_now; - let _ = crate::tracing_tree::test_init(); - /* - use env_logger; - ::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug"); - let _ = env_logger::builder() - .format_timestamp(None) - .format_level(false) - .is_test(true) - .try_init(); - */ + let _ = sketching::test_init(); let schema_outer = Schema::new().expect("Failed to init schema"); let idxmeta = { @@ -117,16 +89,7 @@ macro_rules! run_test { #[allow(unused_imports)] use crate::utils::duration_from_epoch_now; - let _ = crate::tracing_tree::test_init(); - /* - use env_logger; - ::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug"); - let _ = env_logger::builder() - .format_timestamp(None) - .format_level(false) - .is_test(true) - .try_init(); - */ + let _ = sketching::test_init(); let test_server = setup_test!(); @@ -198,7 +161,7 @@ macro_rules! run_idm_test_inner { #[cfg(test)] macro_rules! run_idm_test { ($test_fn:expr) => {{ - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); run_idm_test_inner!($test_fn); }}; } @@ -211,7 +174,7 @@ where &crate::idm::server::IdmServerDelayed, ), { - let _ = crate::tracing_tree::test_level(tracing::Level::ERROR); + let _ = sketching::test_init(); let _ = run_idm_test_inner!(test_fn); } @@ -382,7 +345,7 @@ macro_rules! run_delete_test { #[cfg(test)] macro_rules! run_entrychangelog_test { ($test_fn:expr) => {{ - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); let schema_outer = Schema::new().expect("Failed to init schema"); let schema_txn = schema_outer.read(); diff --git a/kanidmd/idm/src/schema.rs b/kanidmd/idm/src/schema.rs index 5de25cd06..08f2404cc 100644 --- a/kanidmd/idm/src/schema.rs +++ b/kanidmd/idm/src/schema.rs @@ -1847,7 +1847,7 @@ mod tests { #[test] fn test_schema_entries() { - let _ = crate::tracing_tree::test_init(); + let _ = sketching::test_init(); // Given an entry, assert it's schema is valid // We do let schema_outer = Schema::new().expect("failed to create schema"); diff --git a/kanidmd/idm/src/server.rs b/kanidmd/idm/src/server.rs index c2e11f399..d4211738a 100644 --- a/kanidmd/idm/src/server.rs +++ b/kanidmd/idm/src/server.rs @@ -1892,8 +1892,9 @@ impl<'a> QueryServerWriteTransaction<'a> { let res: Result>, OperationError> = candidates .into_iter() - .map(|e| { - e.validate(&self.schema) + .map(|entry| { + entry + .validate(&self.schema) .map_err(|e| { admin_error!( "Schema Violation in validation of modify_pre_apply {:?}", @@ -1901,7 +1902,7 @@ impl<'a> QueryServerWriteTransaction<'a> { ); OperationError::SchemaViolation(e) }) - .map(|e| e.seal(&self.schema)) + .map(|entry| entry.seal(&self.schema)) }) .collect(); diff --git a/kanidmd/idm/src/tracing_tree/formatter.rs b/kanidmd/idm/src/tracing_tree/formatter.rs deleted file mode 100644 index 54ab6c794..000000000 --- a/kanidmd/idm/src/tracing_tree/formatter.rs +++ /dev/null @@ -1,325 +0,0 @@ -use super::subscriber::{TreeEvent, TreeProcessed, TreeSpanProcessed}; -use super::EventTag; -use serde::ser::{Serialize, SerializeMap, SerializeStruct}; -use std::fmt::{self, Write as _}; -use std::io::{self, Write as _}; -use tracing_serde::AsSerde; -use tracing_subscriber::registry::ScopeFromRoot; -use tracing_subscriber::Registry; - -#[derive(Clone, Copy, Debug)] -pub enum LogFmt { - Json, - Pretty, -} - -const EVENT_UUID: &str = "00000000-0000-0000-0000-000000000000"; - -impl LogFmt { - pub(crate) fn format(self, processed_logs: &TreeProcessed) -> Vec { - match self { - LogFmt::Json => format_json(processed_logs), - LogFmt::Pretty => format_pretty(processed_logs), - } - } -} - -fn format_json(processed_logs: &TreeProcessed) -> Vec { - fn fmt_rec<'a>( - tree: &TreeProcessed, - spans: &'a mut Vec<&'static str>, - uuid: Option<&'a str>, - mut writer: &mut Vec, - ) -> io::Result<()> { - match tree { - TreeProcessed::Event(event) => { - struct SerializeFields<'a> { - values: &'a Vec<(&'static str, String)>, - } - - impl<'a> Serialize for SerializeFields<'a> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - let mut model = serializer.serialize_map(Some(self.values.len()))?; - for (key, value) in self.values.iter() { - model.serialize_entry(key, value)?; - } - model.end() - } - } - - struct SerializeEvent<'a> { - event: &'a TreeEvent, - uuid: &'a str, - spans: &'a mut Vec<&'static str>, - } - - impl<'a> Serialize for SerializeEvent<'a> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - let mut model = serializer.serialize_struct("event", 8)?; - model.serialize_field("uuid", self.uuid)?; - model.serialize_field("timestamp", &self.event.timestamp.to_rfc3339())?; - model.serialize_field("level", &self.event.level.as_serde())?; - model.serialize_field("message", &self.event.message)?; - model.serialize_field("log-type", "event")?; - model.serialize_field("tag", &self.event.tag.map(EventTag::pretty))?; - model.serialize_field( - "fields", - &SerializeFields { - values: &self.event.values, - }, - )?; - model.serialize_field("spans", self.spans)?; - model.end() - } - } - - let serialize_event = SerializeEvent { - event, - uuid: uuid.unwrap_or(EVENT_UUID), - spans, - }; - - serde_json::to_writer(&mut writer, &serialize_event).map_err(io::Error::from)?; - writeln!(writer) - } - TreeProcessed::Span(span) => { - struct SerializeSpan<'a> { - span: &'a TreeSpanProcessed, - uuid: &'a str, - } - - impl<'a> Serialize for SerializeSpan<'a> { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - let mut model = serializer.serialize_struct("event", 7)?; - model.serialize_field("uuid", self.uuid)?; - model.serialize_field("timestamp", &self.span.timestamp.to_rfc3339())?; - model.serialize_field("level", "TRACE")?; - model.serialize_field("message", &self.span.name)?; - model.serialize_field("log-type", "span")?; - model.serialize_field("nanos-nested", &self.span.nested_duration)?; - model.serialize_field("nanos-total", &self.span.total_duration)?; - model.end() - } - } - - #[allow(clippy::expect_used)] - let uuid = span - .uuid - .as_deref() - .or(uuid) - .expect("Span has no associated UUID, this is a bug"); - - let serialize_span = SerializeSpan { span, uuid }; - - serde_json::to_writer(&mut writer, &serialize_span).map_err(io::Error::from)?; - writeln!(writer)?; - - // format stuff in child spans - spans.push(span.name); - for logs in span.processed_buf.iter() { - fmt_rec(logs, spans, Some(uuid), writer)?; - } - spans.pop(); - Ok(()) - } - } - } - - let mut writer = vec![]; - let mut spans = vec![]; - #[allow(clippy::expect_used)] - fmt_rec(processed_logs, &mut spans, None, &mut writer).expect("Write failed"); - writer -} - -fn format_pretty(processed_logs: &TreeProcessed) -> Vec { - #[derive(Clone, Copy)] - enum Fill { - Void, - Line, - Fork, - Turn, - } - - impl fmt::Display for Fill { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - use Fill::*; - f.write_str(match self { - Void => " ", - Line => "โ”‚ ", - Fork => "โ”โ” ", - Turn => "โ”•โ” ", - }) - } - } - - fn fmt_rec( - tree: &TreeProcessed, - indent: &mut Vec, - uuid: Option<&str>, - root_duration: Option, - writer: &mut Vec, - ) -> io::Result<()> { - use Fill::*; - match tree { - TreeProcessed::Event(event) => { - let uuid = uuid.unwrap_or(EVENT_UUID); - - let timestamp_fmt = event.timestamp.to_rfc3339(); - - write!(writer, "{} {} {:<8} ", uuid, timestamp_fmt, event.level)?; - - for fill in indent.iter() { - write!(writer, "{}", fill)?; - } - - write!( - writer, - "{} [{}]: {}", - event.emoji(), - event.tag(), - event.message - )?; - - for (field, value) in event.values.iter() { - write!(writer, " | {}: {}", field, value)?; - } - - writeln!(writer) - } - TreeProcessed::Span(span) => { - #[allow(clippy::expect_used)] - let uuid = span - .uuid - .as_deref() - .or(uuid) - .expect("Span has no associated UUID, this is a bug"); - - let timestamp_fmt = span.timestamp.to_rfc3339(); - - let total_duration = span.total_duration as f64; - - let root_duration = root_duration.unwrap_or(total_duration); - - let total_load = 100.0 * total_duration / root_duration; - - struct DurationDisplay(f64); - - // This is straight up stolen from chrono - impl fmt::Display for DurationDisplay { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut t = self.0; - for unit in ["ns", "ยตs", "ms", "s"].iter() { - if t < 10.0 { - return write!(f, "{:.2}{}", t, unit); - } else if t < 100.0 { - return write!(f, "{:.1}{}", t, unit); - } else if t < 1000.0 { - return write!(f, "{:.0}{}", t, unit); - } - t /= 1000.0; - } - write!(f, "{:.0}s", t * 1000.0) - } - } - - write!(writer, "{} {} TRACE ", uuid, timestamp_fmt)?; - - for fill in indent.iter() { - write!(writer, "{}", fill)?; - } - - write!( - writer, - "{} [ {} | ", - span.name, - DurationDisplay(total_duration) - )?; - - if span.nested_duration > 0 { - let direct_load = - 100.0 * (total_duration - span.nested_duration as f64) / root_duration; - write!(writer, "{:.3}% / ", direct_load)?; - } - - writeln!(writer, "{:.3}% ]", total_load)?; - - if let Some((last, remaining)) = span.processed_buf.split_last() { - // This span has children - // This is for what wraps the left of this span - match indent.last_mut() { - Some(f @ Turn) => *f = Void, - Some(f @ Fork) => *f = Line, - _ => {} - } - - // Need to extend by one - indent.push(Fork); - for logs in remaining { - // Reset to Fork - if let Some(fill) = indent.last_mut() { - *fill = Fork; - } - fmt_rec(logs, indent, Some(uuid), Some(root_duration), writer)?; - } - - // Last child, set to Turn - if let Some(fill) = indent.last_mut() { - *fill = Turn; - } - fmt_rec(last, indent, Some(uuid), Some(root_duration), writer)?; - - indent.pop(); - } else { - // this span has no children - } - - Ok(()) - } - } - } - - let mut writer = vec![]; - let mut indent = vec![]; - #[allow(clippy::expect_used)] - fmt_rec(processed_logs, &mut indent, None, None, &mut writer).expect("Write failed"); - writer -} - -pub(super) fn format_immediate_event( - event: &TreeEvent, - maybe_scope: Option>, -) -> Result { - let mut writer = String::new(); - - write!( - writer, - "{} {em} [{}] {em}", - event.timestamp.to_rfc3339(), - event.level, - em = event.emoji() - )?; - - if let Some(scope) = maybe_scope { - for span in scope { - write!(writer, "๐Ÿ”น{}", span.name())?; - } - } - - write!(writer, ": {}", event.message)?; - - for (key, value) in event.values.iter() { - write!(writer, " | {}: {}", key, value)?; - } - - Ok(writer) -} diff --git a/kanidmd/idm/src/tracing_tree/mod.rs b/kanidmd/idm/src/tracing_tree/mod.rs deleted file mode 100644 index 679bc92fc..000000000 --- a/kanidmd/idm/src/tracing_tree/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -mod event_tag; -mod formatter; -mod macros; -mod middleware; -mod processor; -mod subscriber; -mod timings; - -pub use event_tag::EventTag; -pub use middleware::TreeMiddleware; -pub use subscriber::{ - main_init, operation_id, test_init, test_level, TreePreProcessed, TreeSubscriber, -}; diff --git a/kanidmd/idm/src/tracing_tree/processor.rs b/kanidmd/idm/src/tracing_tree/processor.rs deleted file mode 100644 index 14438bd89..000000000 --- a/kanidmd/idm/src/tracing_tree/processor.rs +++ /dev/null @@ -1,34 +0,0 @@ -use super::TreePreProcessed; -use tokio::sync::mpsc::UnboundedSender; - -pub trait Processor: 'static { - fn process(&self, preprocessed: TreePreProcessed); -} - -pub struct ExportProcessor { - sender: UnboundedSender, -} - -pub struct TestProcessor {} - -impl ExportProcessor { - pub fn with_sender(sender: UnboundedSender) -> Self { - ExportProcessor { sender } - } -} - -impl Processor for ExportProcessor { - fn process(&self, preprocessed: TreePreProcessed) { - #[allow(clippy::expect_used)] - self.sender - .send(preprocessed) - .expect("Processing channel has been closed, cannot log events."); - } -} - -impl Processor for TestProcessor { - fn process(&self, preprocessed: TreePreProcessed) { - #[allow(clippy::expect_used)] - preprocessed.process().expect("Failed to write logs"); - } -} diff --git a/kanidmd/idm/src/tracing_tree/subscriber.rs b/kanidmd/idm/src/tracing_tree/subscriber.rs deleted file mode 100644 index fe47cfe03..000000000 --- a/kanidmd/idm/src/tracing_tree/subscriber.rs +++ /dev/null @@ -1,583 +0,0 @@ -use std::any::TypeId; -use std::convert::TryFrom; -use std::fmt; -use std::fs::OpenOptions; -use std::io::{self, Write as _}; -use std::path::PathBuf; -use std::time::Duration; - -use chrono::{DateTime, Utc}; -use futures::Future; -use tokio::sync::mpsc::unbounded_channel; -use tokio::sync::mpsc::UnboundedSender; -use tokio::task::JoinHandle; -use tracing::dispatcher::SetGlobalDefaultError; -use tracing::field::{Field, Visit}; -use tracing::span::{Attributes, Record}; -use tracing::{Event, Id, Level, Metadata, Subscriber}; -use tracing_subscriber::filter::EnvFilter; -use tracing_subscriber::layer::{Context, Layered, SubscriberExt}; -use tracing_subscriber::registry::{LookupSpan, Registry, Scope, SpanRef}; -use tracing_subscriber::Layer; -use uuid::Uuid; - -use crate::tracing_tree::processor::TestProcessor; - -use super::formatter::LogFmt; -use super::processor::{ExportProcessor, Processor}; -use super::timings::Timer; -use super::EventTag; - -pub struct TreeSubscriber

{ - inner: Layered, Registry>, -} - -struct TreeLayer

{ - fmt: LogFmt, - processor: P, -} - -#[derive(Debug)] -pub(crate) struct TreeEvent { - pub timestamp: DateTime, - pub message: String, - pub level: Level, - pub tag: Option, - pub values: Vec<(&'static str, String)>, -} - -#[derive(Debug)] -struct TreeSpan { - pub timestamp: DateTime, - pub name: &'static str, - pub buf: Vec, - pub uuid: Option, - pub out: TreeIo, -} - -#[derive(Debug)] -enum Tree { - Event(TreeEvent), - Span(TreeSpan, Duration), -} - -#[derive(Debug)] -pub struct TreePreProcessed { - fmt: LogFmt, - logs: Tree, -} - -#[derive(Debug)] -pub enum TreeIo { - Stdout, - Stderr, - File(PathBuf), -} - -pub(crate) struct TreeSpanProcessed { - pub timestamp: DateTime, - pub name: &'static str, - pub processed_buf: Vec, - pub uuid: Option, - pub out: TreeIo, - pub nested_duration: u64, - pub total_duration: u64, -} - -pub(crate) enum TreeProcessed { - Event(TreeEvent), - Span(TreeSpanProcessed), -} - -impl TreeSubscriber { - fn new_with(fmt: LogFmt, sender: UnboundedSender) -> Self { - let layer = TreeLayer { - fmt, - processor: ExportProcessor::with_sender(sender), - }; - - TreeSubscriber { - inner: Registry::default().with(layer), - } - } - - pub fn new(fmt: LogFmt) -> (Self, impl Future) { - let (log_tx, mut log_rx) = unbounded_channel(); - let subscriber = TreeSubscriber::new_with(fmt, log_tx); - let logger = async move { - while let Some(processor) = log_rx.recv().await { - #[allow(clippy::expect_used)] - processor.process().expect("Failed to write logs"); - } - }; - - (subscriber, logger) - } - - // These are the preferred constructors. - #[allow(dead_code)] - pub fn json() -> (Self, impl Future) { - TreeSubscriber::new(LogFmt::Json) - } - - #[allow(dead_code)] - pub fn pretty() -> (Self, impl Future) { - TreeSubscriber::new(LogFmt::Pretty) - } -} - -impl TreeSubscriber

{ - #[allow(dead_code)] - pub fn thread_operation_id(&self) -> Option { - let current = self.inner.current_span(); - // If there's no current span, we short-circuit. - let id = current.id()?; - #[allow(clippy::expect_used)] - let span = self - .inner - .span(id) - .expect("The subscriber doesn't have data for an existing span, this is a bug"); - - span.scope().into_iter().find_map(|span| { - let extensions = span.extensions(); - // If `uuid` is `None`, then we keep searching. - #[allow(clippy::expect_used)] - let uuid = extensions - .get::() - .expect("Span buffer not found, this is a bug") - .uuid - .as_ref()?; - // TODO: make spans store UUID's as a u128 or 2 u64's - #[allow(clippy::expect_used)] - Some(Uuid::parse_str(uuid.as_str()).expect("Unable to parse UUID, this is a bug")) - }) - } -} - -impl Subscriber for TreeSubscriber

{ - fn enabled(&self, metadata: &Metadata) -> bool { - self.inner.enabled(metadata) - } - - fn max_level_hint(&self) -> Option { - self.inner.max_level_hint() - } - - fn new_span(&self, span: &Attributes) -> Id { - self.inner.new_span(span) - } - - fn record(&self, span: &Id, values: &Record) { - self.inner.record(span, values) - } - - fn record_follows_from(&self, span: &Id, follows: &Id) { - self.inner.record_follows_from(span, follows) - } - - fn event(&self, event: &Event) { - self.inner.event(event) - } - - fn enter(&self, span: &Id) { - self.inner.enter(span) - } - - fn exit(&self, span: &Id) { - self.inner.exit(span) - } - - fn clone_span(&self, id: &Id) -> Id { - self.inner.clone_span(id) - } - - fn try_close(&self, id: Id) -> bool { - self.inner.try_close(id) - } - - unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> { - // Allows us to access this or nested subscribers from dispatch - if id == TypeId::of::() { - Some(self as *const Self as *const ()) - } else { - self.inner.downcast_raw(id) - } - } -} - -impl TreeLayer

{ - fn log_to_parent(&self, logs: Tree, parent: Option>) { - match parent { - // The parent exists- write to them - #[allow(clippy::expect_used)] - Some(span) => span - .extensions_mut() - .get_mut::() - .expect("Log buffer not found, this is a bug") - .log(logs), - // The parent doesn't exist- send to formatter - None => self.processor.process(TreePreProcessed { - fmt: self.fmt, - logs, - }), - } - } -} - -impl Layer for TreeLayer

{ - fn on_new_span(&self, attrs: &Attributes, id: &Id, ctx: Context) { - #[allow(clippy::expect_used)] - let span = ctx.span(id).expect("Span not found, this is a bug"); - - let name = attrs.metadata().name(); - let mut uuid = None; - let mut out = TreeIo::Stdout; - - attrs.record( - &mut |field: &Field, value: &dyn fmt::Debug| match field.name() { - "uuid" => { - uuid = Some(format!("{:?}", value)); - } - "output" if ctx.lookup_current().is_none() => { - out = match format!("{:?}", value).as_str() { - "console stdout" => TreeIo::Stdout, - "console stderr" => TreeIo::Stderr, - path => TreeIo::File(PathBuf::from(path)), - }; - } - _ => {} - }, - ); - - // Take provided ID, or make a fresh one if there's no parent span. - let uuid = uuid.or_else(|| { - ctx.lookup_current() - .is_none() - .then(|| Uuid::new_v4().to_string()) - }); - - let mut extensions = span.extensions_mut(); - - extensions.insert(TreeSpan::new(name, uuid, out)); - extensions.insert(Timer::new()); - } - - fn on_event(&self, event: &Event, ctx: Context) { - let (tree_event, immediate) = TreeEvent::parse(event); - - if immediate { - use super::formatter::format_immediate_event; - let maybe_scope = ctx.event_scope(event).map(Scope::from_root); - #[allow(clippy::expect_used)] - let formatted_event = format_immediate_event(&tree_event, maybe_scope) - .expect("Formatting immediate event failed"); - eprintln!("{}", formatted_event); - } - - self.log_to_parent(Tree::Event(tree_event), ctx.event_span(event)); - } - - fn on_enter(&self, id: &Id, ctx: Context) { - #[allow(clippy::expect_used)] - ctx.span(id) - .expect("Span not found, this is a bug") - .extensions_mut() - .get_mut::() - .expect("Timer not found, this is a bug") - .unpause(); - } - - fn on_exit(&self, id: &Id, ctx: Context) { - #[allow(clippy::expect_used)] - ctx.span(id) - .expect("Span not found, this is a bug") - .extensions_mut() - .get_mut::() - .expect("Timer not found, this is a bug") - .pause(); - } - - fn on_close(&self, id: Id, ctx: Context) { - #[allow(clippy::expect_used)] - let span = ctx.span(&id).expect("Span not found, this is a bug"); - - let mut extensions = span.extensions_mut(); - - #[allow(clippy::expect_used)] - let span_buf = extensions - .remove::() - .expect("Span buffer not found, this is a bug"); - - #[allow(clippy::expect_used)] - let duration = extensions - .remove::() - .expect("Timer not found, this is a bug") - .duration(); - - let logs = Tree::Span(span_buf, duration); - - self.log_to_parent(logs, span.parent()); - } -} - -impl TreeEvent { - fn parse(event: &Event) -> (Self, bool) { - let timestamp = Utc::now(); - let level = *event.metadata().level(); - - struct Visitor { - message: String, - tag: Option, - values: Vec<(&'static str, String)>, - immediate: bool, - } - - impl Visit for Visitor { - fn record_u64(&mut self, field: &Field, value: u64) { - if field.name() == "event_tag_id" { - let tag = EventTag::try_from(value).unwrap_or_else(|_| { - error!("Invalid `event_tag_id`: {}, this is a bug", value); - std::process::exit(1) - }); - self.tag = Some(tag); - } else { - self.record_debug(field, &value) - } - } - - fn record_bool(&mut self, field: &Field, value: bool) { - if field.name() == "immediate" { - self.immediate = value; - } else { - self.record_debug(field, &value) - } - } - - fn record_debug(&mut self, field: &Field, value: &dyn fmt::Debug) { - if field.name() == "message" { - use fmt::Write; - #[allow(clippy::expect_used)] - write!(self.message, "{:?}", value).expect("Write failed"); - } else { - self.values.push((field.name(), format!("{:?}", value))); - } - } - } - - let mut v = Visitor { - message: String::new(), - tag: None, - values: vec![], - immediate: false, - }; - - event.record(&mut v); - - let Visitor { - message, - tag, - values, - immediate, - } = v; - - ( - TreeEvent { - timestamp, - message, - level, - tag, - values, - }, - immediate, - ) - } - - pub(super) fn emoji(&self) -> &'static str { - self.tag - .map(EventTag::emoji) - .unwrap_or_else(|| match self.level { - Level::ERROR => "๐Ÿšจ", - Level::WARN => "โš ๏ธ ", - Level::INFO => " ", - Level::DEBUG => "๐Ÿ›", - Level::TRACE => "๐Ÿ“", - }) - } - - pub(super) fn tag(&self) -> &'static str { - self.tag - .map(EventTag::pretty) - .unwrap_or_else(|| match self.level { - Level::ERROR => "error", - Level::WARN => "warn", - Level::INFO => "info", - Level::DEBUG => "debug", - Level::TRACE => "trace", - }) - } -} - -impl TreeSpan { - fn new(name: &'static str, uuid: Option, out: TreeIo) -> Self { - TreeSpan { - timestamp: Utc::now(), - name, - buf: vec![], - uuid, - out, - } - } - - fn log(&mut self, logs: Tree) { - self.buf.push(logs) - } -} - -impl Tree { - pub fn process(self) -> TreeProcessed { - match self { - Tree::Event(event) => TreeProcessed::Event(event), - Tree::Span(span_buf, duration) => { - let mut processed_buf = vec![]; - - let nested_duration = span_buf - .buf - .into_iter() - .map(|logs| { - let processed = logs.process(); - - let duration = match processed { - TreeProcessed::Span(ref span) => span.total_duration, - _ => 0, - }; - - // Side effect: Push processed logs to processed_buf - processed_buf.push(processed); - - duration - }) - .sum::(); - - TreeProcessed::Span(TreeSpanProcessed { - timestamp: span_buf.timestamp, - name: span_buf.name, - processed_buf, - uuid: span_buf.uuid, - out: span_buf.out, - nested_duration, - total_duration: duration.as_nanos() as u64, - }) - } - } - } -} - -impl TreePreProcessed { - pub fn process(self) -> io::Result<()> { - let processed_logs = self.logs.process(); - let formatted_logs = self.fmt.format(&processed_logs); - - let buf = &formatted_logs[..]; - - match processed_logs.tree_io() { - TreeIo::Stdout => { - // BUG - we can't write to stdout/err directly because this breaks - // cargo test capturing of io. - // io::stdout().write_all(buf) - - match std::str::from_utf8(buf) { - Ok(s) => print!("{}", s), - Err(e) => eprintln!("CRITICAL - UNABLE TO PRINT BUFFER -> {:?}", e), - } - Ok(()) - } - TreeIo::Stderr => { - // io::stderr().write_all(buf) - - match std::str::from_utf8(buf) { - Ok(s) => eprint!("{}", s), - Err(e) => eprintln!("CRITICAL - UNABLE TO PRINT BUFFER -> {:?}", e), - } - Ok(()) - } - TreeIo::File(ref path) => OpenOptions::new() - .create(true) - .append(true) - .write(true) - .open(path)? - .write_all(buf), - } - } -} - -impl TreeProcessed { - fn tree_io(self) -> TreeIo { - match self { - TreeProcessed::Event(_) => TreeIo::Stdout, - TreeProcessed::Span(TreeSpanProcessed { out, .. }) => out, - } - } -} - -// Returns the UUID of the threads current span operation, or None if not in any spans. -#[allow(dead_code)] -pub fn operation_id() -> Option { - tracing::dispatcher::get_default(|dispatch| { - // Try to find the release subscriber - #[allow(clippy::expect_used)] - dispatch - .downcast_ref::>() - .map(TreeSubscriber::::thread_operation_id) - .or_else(|| { - // Try to find the testing subscriber - dispatch - .downcast_ref::>() - .map(TreeSubscriber::::thread_operation_id) - }) - .expect("operation_id only works for `TreeSubscriber`'s!") - }) -} - -pub fn main_init() -> JoinHandle<()> { - let (subscriber, logger) = TreeSubscriber::pretty(); - #[allow(clippy::expect_used)] - let subscriber = EnvFilter::try_from_default_env() - .or_else(|_| EnvFilter::try_new("info")) - .expect("Failed to init envfilter") - .with_subscriber(subscriber); - #[allow(clippy::expect_used)] - tracing::subscriber::set_global_default(subscriber) - .expect("๐Ÿšจ๐Ÿšจ๐Ÿšจ Global subscriber already set, this is a bug ๐Ÿšจ๐Ÿšจ๐Ÿšจ"); - tokio::spawn(logger) -} - -// This should be used in testing only, because it processes logs on the working thread. -// The main benefit is that this makes testing much easier, since it can be called in -// every test without worring about a processing thread in a test holding an `UnboundedReceiver` -// and then getting dropped, making the global subscriber panic on further attempts to send logs. -#[allow(dead_code)] -pub fn test_init() -> Result<(), SetGlobalDefaultError> { - let subscriber = TreeSubscriber { - inner: Registry::default().with(TreeLayer { - fmt: LogFmt::Pretty, - processor: TestProcessor {}, - }), - }; - - tracing::subscriber::set_global_default(subscriber) -} - -#[allow(dead_code)] -pub fn test_level(level: tracing::Level) -> Result<(), SetGlobalDefaultError> { - let subscriber = TreeSubscriber { - inner: Registry::default().with(TreeLayer { - fmt: LogFmt::Pretty, - processor: TestProcessor {}, - }), - }; - - let subscriber = - tracing_subscriber::filter::LevelFilter::from_level(level).with_subscriber(subscriber); - - tracing::subscriber::set_global_default(subscriber) -} diff --git a/kanidmd/idm/src/tracing_tree/timings.rs b/kanidmd/idm/src/tracing_tree/timings.rs deleted file mode 100644 index 1cdf98bfe..000000000 --- a/kanidmd/idm/src/tracing_tree/timings.rs +++ /dev/null @@ -1,28 +0,0 @@ -use std::time::{Duration, Instant}; - -pub struct Timer { - duration: Duration, - start: Instant, -} - -impl Timer { - pub fn new() -> Self { - Timer { - duration: Duration::default(), - start: Instant::now(), - } - } - - pub fn pause(&mut self) { - let stop = Instant::now(); - self.duration += stop - self.start; - } - - pub fn unpause(&mut self) { - self.start = Instant::now(); - } - - pub fn duration(self) -> Duration { - self.duration - } -} diff --git a/kanidmd/score/Cargo.toml b/kanidmd/score/Cargo.toml index d66feced1..655b05c7f 100644 --- a/kanidmd/score/Cargo.toml +++ b/kanidmd/score/Cargo.toml @@ -19,7 +19,6 @@ compact_jwt = "^0.2.3" futures-util = "^0.3.21" http-types = "^2.12.0" kanidm = { path = "../idm" } -kanidm_client = { path = "../../kanidm_client" } kanidm_proto = { path = "../../kanidm_proto" } ldap3_proto = "^0.2.3" libc = "^0.2.126" @@ -27,6 +26,7 @@ openssl = "^0.10.41" regex = "1.5.6" serde = { version = "^1.0.142", features = ["derive"] } serde_json = "^1.0.83" +sketching = { path = "../../sketching" } tide = "^0.16.0" # I tried including brotli and it didn't work, including "default" pulls a mime-type list from the internet on build tide-compress = { version = "0.10.6", default-features = false, features = [ "deflate", "gzip", "regex-check" ] } @@ -41,12 +41,8 @@ uuid = { version = "^1.1.2", features = ["serde", "v4" ] } profiles = { path = "../../profiles" } [dev-dependencies] -tracing-subscriber = "^0.3.14" -# tokio = { version = "1", features = ["rt", "net", "time", "macros", "sync", "signal"] } -# kanidm = { path = "../kanidmd" } -# score = { path = "../kanidmd/score" } +kanidm_client = { path = "../../kanidm_client" } futures = "^0.3.21" -# async-std = { version = "1.6", features = ["tokio1"] } webauthn-authenticator-rs = "0.4.2-beta.3" oauth2_ext = { package = "oauth2", version = "^4.1.0", default-features = false } diff --git a/kanidmd/score/src/https/mod.rs b/kanidmd/score/src/https/mod.rs index b0dc82642..403b57f10 100644 --- a/kanidmd/score/src/https/mod.rs +++ b/kanidmd/score/src/https/mod.rs @@ -16,7 +16,6 @@ use kanidm::actors::v1_write::QueryServerWriteV1; use kanidm::config::{ServerRole, TlsConfiguration}; use kanidm::prelude::*; use kanidm::status::StatusActor; -use kanidm::tracing_tree::TreeMiddleware; use serde::Serialize; use std::fs::canonicalize; use std::path::PathBuf; @@ -135,7 +134,7 @@ impl RequestExtensions for tide::Request { } fn new_eventid(&self) -> (Uuid, String) { - let eventid = kanidm::tracing_tree::operation_id().unwrap(); + let eventid = sketching::tracing_forest::id(); let hv = eventid.as_hyphenated().to_string(); (eventid, hv) } @@ -349,7 +348,7 @@ pub fn create_https_server( }); // Add middleware? - tserver.with(TreeMiddleware::with_stdout()); + tserver.with(sketching::middleware::TreeMiddleware::default()); // tserver.with(tide::log::LogMiddleware::new()); // We do not force a session ttl, because we validate this elsewhere in usage. tserver.with( diff --git a/kanidmd/score/src/ldaps.rs b/kanidmd/score/src/ldaps.rs index 4d281f0c3..2616c19e3 100644 --- a/kanidmd/score/src/ldaps.rs +++ b/kanidmd/score/src/ldaps.rs @@ -35,7 +35,7 @@ async fn client_process_msg( protomsg: LdapMsg, qe_r_ref: &'static QueryServerReadV1, ) -> Option { - let eventid = kanidm::tracing_tree::operation_id().unwrap(); + let eventid = sketching::tracing_forest::id(); security_info!( client_ip = %client_address.ip(), client_port = %client_address.port(), diff --git a/kanidmd/score/tests/common.rs b/kanidmd/score/tests/common.rs index 5a1618661..c5f7bc7eb 100644 --- a/kanidmd/score/tests/common.rs +++ b/kanidmd/score/tests/common.rs @@ -3,7 +3,6 @@ use std::sync::atomic::{AtomicU16, Ordering}; use kanidm::audit::LogLevel; use kanidm::config::{Configuration, IntegrationTestConfig, ServerRole}; -use kanidm::tracing_tree; use kanidm_client::{KanidmClient, KanidmClientBuilder}; use score::create_server_core; use tokio::task; @@ -25,7 +24,7 @@ pub fn is_free_port(port: u16) -> bool { // allowed because the use of this function is behind a test gate #[allow(dead_code)] pub async fn setup_async_test() -> KanidmClient { - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let mut counter = 0; let port = loop { diff --git a/kanidmd/score/tests/https_middleware.rs b/kanidmd/score/tests/https_middleware.rs index ae659da35..d81f0f5ad 100644 --- a/kanidmd/score/tests/https_middleware.rs +++ b/kanidmd/score/tests/https_middleware.rs @@ -5,7 +5,6 @@ use crate::common::{ADMIN_TEST_PASSWORD, ADMIN_TEST_USER, PORT_ALLOC}; use kanidm::audit::LogLevel; use kanidm::config::{Configuration, IntegrationTestConfig, ServerRole}; -use kanidm::tracing_tree; use score::create_server_core; use tokio::task; @@ -14,7 +13,7 @@ use crate::common::is_free_port; #[tokio::test] async fn test_https_middleware_headers() { // tests stuff - let _ = tracing_tree::test_init(); + let _ = sketching::test_init(); let mut counter = 0; let port = loop { diff --git a/sketching/Cargo.toml b/sketching/Cargo.toml new file mode 100644 index 000000000..7ae8ae692 --- /dev/null +++ b/sketching/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "sketching" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +async-trait = "^0.1.57" +tide = "^0.16.0" +num_enum = "^0.5.7" + +tracing = { version = "^0.1.35", features = ["attributes", "max_level_trace", "release_max_level_debug"] } +tracing-subscriber = { version = "^0.3.14", features = ["env-filter"] } + +# tracing-forest = { version = "0.1.4", features = ["uuid", "smallvec", "tokio", "env-filter"] } +tracing-forest = { git = "https://github.com/Firstyear/tracing-forest.git", rev = "18d242a4dde060c4946ade0a2c4d5be1df048aea", features = ["uuid", "smallvec", "tokio", "env-filter"] } + diff --git a/kanidmd/idm/src/tracing_tree/event_tag.rs b/sketching/src/lib.rs similarity index 77% rename from kanidmd/idm/src/tracing_tree/event_tag.rs rename to sketching/src/lib.rs index 9b53b087b..039fbf89c 100644 --- a/kanidmd/idm/src/tracing_tree/event_tag.rs +++ b/sketching/src/lib.rs @@ -1,4 +1,31 @@ +#![deny(warnings)] +#![warn(unused_extern_crates)] + use num_enum::{IntoPrimitive, TryFromPrimitive}; +use tracing_forest::{util::*, Tag}; + +pub mod macros; +pub mod middleware; + +pub use tracing; +pub use tracing_forest; +pub use tracing_subscriber; + +pub fn test_init() -> () { + // tracing_subscriber::fmt::try_init() + let _ = tracing_forest::test_init(); + /* + let _ = Registry::default().with(ForestLayer::new( + TestCapturePrinter::new(), + NoTag, + )).try_init(); + */ +} + +/// This is for tagging events. Currently not wired in. +pub fn event_tagger(_event: &Event) -> Option { + None +} #[derive(Debug, Clone, Copy, IntoPrimitive, TryFromPrimitive)] #[repr(u64)] diff --git a/kanidmd/idm/src/tracing_tree/macros.rs b/sketching/src/macros.rs similarity index 89% rename from kanidmd/idm/src/tracing_tree/macros.rs rename to sketching/src/macros.rs index 380c12e34..4dafb1521 100644 --- a/kanidmd/idm/src/tracing_tree/macros.rs +++ b/sketching/src/macros.rs @@ -2,14 +2,14 @@ macro_rules! spanned { ($name:expr, $code:block) => {{ // Block: can short circuit outer function - use tracing::trace_span; - let _entered_span = trace_span!($name).entered(); + use tracing::debug_span; + let _entered_span = debug_span!($name).entered(); $code }}; ($name:expr, || $code:block) => {{ // Closure: cannot short circuit outer function - use tracing::trace_span; - let _entered_span = trace_span!($name).entered(); + use tracing::debug_span; + let _entered_span = debug_span!($name).entered(); (|| $code)() }}; } @@ -100,6 +100,11 @@ macro_rules! filter_info { ($($arg:tt)*) => { tagged_event!(INFO, EventTag::FilterInfo, $($arg)*) } } +#[macro_export] +macro_rules! filter_debug { + ($($arg:tt)*) => { tagged_event!(DEBUG, EventTag::FilterTrace, $($arg)*) } +} + #[macro_export] macro_rules! filter_trace { ($($arg:tt)*) => { tagged_event!(TRACE, EventTag::FilterTrace, $($arg)*) } diff --git a/kanidmd/idm/src/tracing_tree/middleware.rs b/sketching/src/middleware.rs similarity index 67% rename from kanidmd/idm/src/tracing_tree/middleware.rs rename to sketching/src/middleware.rs index b4d353bef..b20582950 100644 --- a/kanidmd/idm/src/tracing_tree/middleware.rs +++ b/sketching/src/middleware.rs @@ -1,45 +1,23 @@ -use std::path::PathBuf; - -use super::subscriber::TreeIo; -use crate::prelude::*; +use crate::{request_error, request_info, request_warn, security_info}; use tide::{self, Middleware, Next, Request}; use tracing::{self, instrument}; -// Modeled after: -// https://docs.rs/tide/0.16.0/src/tide/log/middleware.rs.html#23-96 +use crate::*; -pub struct TreeMiddleware { - output: TreeIo, +pub struct TreeMiddleware {} + +impl Default for TreeMiddleware { + fn default() -> Self { + TreeMiddleware {} + } } impl TreeMiddleware { - #[allow(dead_code)] - pub fn with_stdout() -> Self { - TreeMiddleware { - output: TreeIo::Stdout, - } - } - - #[allow(dead_code)] - pub fn with_stderr() -> Self { - TreeMiddleware { - output: TreeIo::Stderr, - } - } - - #[allow(dead_code)] - pub fn with_file(path: &str) -> Self { - TreeMiddleware { - output: TreeIo::File(PathBuf::from(path)), - } - } - - #[instrument(name = "tide-request", skip(self, req, next, output), fields(%output))] + #[instrument(name = "tide-request", skip(self, req, next))] async fn log<'a, State: Clone + Send + Sync + 'static>( &'a self, mut req: Request, next: Next<'a, State>, - output: &str, ) -> tide::Result { struct TreeMiddlewareFinished; @@ -111,20 +89,6 @@ impl TreeMiddleware { #[async_trait::async_trait] impl Middleware for TreeMiddleware { async fn handle(&self, req: Request, next: Next<'_, State>) -> tide::Result { - let output = match self.output { - TreeIo::Stdout => "console stdout", - TreeIo::Stderr => "console stderr", - TreeIo::File(ref path) => path.to_str().unwrap_or_else(|| { - eprintln!("File path isn't UTF-8, cannot write logs to: {:#?}", path); - std::process::exit(1); - // warn!( - // "File path isn't UTF-8, logging to stderr instead: {:#?}", - // path - // ); - // "console stderr" - }), - }; - - self.log(req, next, output).await + self.log(req, next).await } }