diff --git a/Cargo.lock b/Cargo.lock index 559753f57..47ce9e2cf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -365,9 +365,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "0.4.6" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b585a98a234c46fc563103e9278c9391fde1f4e6850334da895d27edb9580f62" +checksum = "4d25d88fd6b8041580a654f9d0c581a047baee2b3efee13275f2fc392fc75034" [[package]] name = "arrayref" @@ -383,9 +383,9 @@ checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" [[package]] name = "async-trait" -version = "0.1.31" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26c4f3195085c36ea8d24d32b2f828d23296a9370a28aa39d111f6f16bef9f3b" +checksum = "89cb5d814ab2a47fd66d3266e9efccb53ca4c740b7451043b8ffcf9a6208f3f8" dependencies = [ "proc-macro2", "quote", @@ -556,9 +556,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5356f1d23ee24a1f785a56d1d1a5f0fd5b0f6a0c0fb2412ce11da71649ab78f6" +checksum = "2e8c087f005730276d1096a652e92a8bacee2e2472bcc9715a74d2bec38b5820" [[package]] name = "byte-tools" @@ -647,10 +647,11 @@ dependencies = [ [[package]] name = "concread" -version = "0.1.12" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f9eb637262518ed76134ca6ce47088c60e514c330f89f32d88ac72b10458bb" +checksum = "7ca3d5adc408121d96cb5e3ca77656ca0b6e96429c244086d300ca9ed6a2fd1d" dependencies = [ + "crossbeam", "crossbeam-epoch", "crossbeam-utils", "num", @@ -701,9 +702,9 @@ dependencies = [ [[package]] name = "copyless" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127" +checksum = "a2df960f5d869b2dd8532793fde43eb5427cceb126c929747a26823ab0eeb536" [[package]] name = "core-foundation" @@ -950,11 +951,10 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa0b23de8fd801745c471deffa6e12d248f962c9fd4b4c33787b055599bde7b" +checksum = "8e93d7f5705de3e49895a2b5e0b8855a1c27f080192ae9c32a6432d50741a57a" dependencies = [ - "cfg-if", "libc", "redox_users", "winapi 0.3.8", @@ -1420,9 +1420,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "076f042c5b7b98f31d205f1249267e12a6518c1481e9dae9764af19b707d2292" +checksum = "c398b2b113b55809ceb9ee3e753fcbac793f1956663f3c36549c1346015c2afe" dependencies = [ "autocfg", ] @@ -1499,9 +1499,11 @@ dependencies = [ "crossbeam", "env_logger", "futures", + "futures-util", "idlset", "kanidm_proto", "lazy_static", + "ldap3_server", "libsqlite3-sys", "log", "num_cpus", @@ -1520,6 +1522,8 @@ dependencies = [ "structopt", "time 0.1.43", "tokio", + "tokio-openssl", + "tokio-util 0.2.0", "uuid", "zxcvbn", ] @@ -1623,6 +1627,28 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lber" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a749954d43fcfb8d4381aa0c6cf291065053e0590d622f4f830393a9bd8278a5" +dependencies = [ + "byteorder", + "bytes", + "nom 2.2.1", +] + +[[package]] +name = "ldap3_server" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b32e8fda95d5e76c853678774ba647797b13632e41fbc7cfd9afe0869a232d" +dependencies = [ + "bytes", + "lber", + "tokio-util 0.2.0", +] + [[package]] name = "libc" version = "0.2.71" @@ -1768,7 +1794,7 @@ checksum = "f5e374eff525ce1c5b7687c4cef63943e7686524a387933ad27ca7ec43779cb3" dependencies = [ "log", "mio", - "miow 0.3.4", + "miow 0.3.5", "winapi 0.3.8", ] @@ -1797,9 +1823,9 @@ dependencies = [ [[package]] name = "miow" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22dfdd1d51b2639a5abd17ed07005c3af05fb7a2a3b1a1d0d7af1000a520c1c7" +checksum = "07b88fb9795d4d36d62a012dfbf49a8f5cf12751f36d31a9dbe66d528e58979e" dependencies = [ "socket2", "winapi 0.3.8", @@ -1840,6 +1866,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" +[[package]] +name = "nom" +version = "2.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf51a729ecf40266a2368ad335a5fdde43471f545a967109cd62146ecf8b66ff" + [[package]] name = "nom" version = "4.2.3" @@ -1997,9 +2029,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.57" +version = "0.9.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7410fef80af8ac071d4f63755c0ab89ac3df0fd1ea91f1d1f37cf5cec4395990" +checksum = "a842db4709b604f0fe5d1170ae3565899be2ad3d9cbc72dedc789ac0511f78de" dependencies = [ "autocfg", "cc", @@ -2045,9 +2077,9 @@ dependencies = [ [[package]] name = "paste" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53181dcd37421c08d3b69f887784956674d09c3f9a47a04fece2b130a5b346b" +checksum = "d508492eeb1e5c38ee696371bf7b9fc33c83d46a7d451606b96458fbbbdc2dec" dependencies = [ "paste-impl", "proc-macro-hack", @@ -2055,9 +2087,9 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ca490fa1c034a71412b4d1edcb904ec5a0981a4426c9eb2128c0fda7a68d17" +checksum = "84f328a6a63192b333fce5fbb4be79db6758a4d518dfac6d54412f1492f72d32" dependencies = [ "proc-macro-hack", "proc-macro2", @@ -2079,18 +2111,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "0.4.17" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc93aeee735e60ecb40cf740eb319ff23eab1c5748abfdb5c180e4ce49f7791" +checksum = "e75373ff9037d112bb19bc61333a06a159eaeb217660dcfbea7d88e1db823919" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "0.4.17" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e58db2081ba5b4c93bd6be09c40fd36cb9193a8336c384f3b40012e531aa7e40" +checksum = "10b4b44893d3c370407a1d6a5cfde7c41ae0478e31c516c85f67eb3adc51be6d" dependencies = [ "proc-macro2", "quote", @@ -2099,9 +2131,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9df32da11d84f3a7d70205549562966279adb900e080fad3dccd8e64afccf0ad" +checksum = "282adbf10f2698a7a77f8e983a74b2d18176c19a7fd32a45446139ae7b02b715" [[package]] name = "pin-utils" @@ -2167,9 +2199,9 @@ checksum = "7e0456befd48169b9f13ef0f0ad46d492cf9d2dbb918bcf38e01eed4ce3ec5e4" [[package]] name = "proc-macro-nested" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" +checksum = "0afe1bd463b9e9ed51d0e0f0b50b6b146aec855c56fd182bb242388710a9b6de" [[package]] name = "proc-macro2" @@ -2201,9 +2233,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" +checksum = "aa563d17ecb180e500da1cfd2b028310ac758de548efdd203e18f283af693f37" dependencies = [ "proc-macro2", ] @@ -2578,9 +2610,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.53" +version = "1.0.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993948e75b189211a9b31a7528f950c6adc21f9720b6438ff80a7fa2f864cea2" +checksum = "cfe4c1f6427dbc29329c6288e9e748b8b8e0ea42a0aab733e887fa72c22e965d" dependencies = [ "itoa", "ryu", @@ -2683,9 +2715,12 @@ dependencies = [ [[package]] name = "standback" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47e4b8c631c998468961a9ea159f064c5c8499b95b5e4a34b77849d45949d540" +checksum = "b0437cfb83762844799a60e1e3b489d5ceb6a650fbacb86437badc1b6d87b246" +dependencies = [ + "version_check 0.9.2", +] [[package]] name = "stdweb" @@ -2790,9 +2825,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +checksum = "b834f2d66f734cb897113e34aaff2f1ab4719ca946f9a7358dba8f8064148701" dependencies = [ "proc-macro2", "quote", @@ -3175,7 +3210,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2ca2a14bc3fc5b64d188b087a7d3a927df87b152e941ccfbc66672e20c467ae" dependencies = [ - "nom", + "nom 4.2.3", "proc-macro2", "quote", "syn", @@ -3316,9 +3351,9 @@ dependencies = [ [[package]] name = "widestring" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effc0e4ff8085673ea7b9b2e3c73f6bd4d118810c9009ed8f1e16bd96c331db6" +checksum = "a763e303c0e0f23b0da40888724762e802a8ffefbc22de4127ef42493c2ea68c" [[package]] name = "winapi" diff --git a/Cargo.toml b/Cargo.toml index 6b8a5beca..2c9af436c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,7 @@ +[profile.release] +debug = true + [workspace] members = [ "kanidm_proto", diff --git a/kanidm_client/tests/common.rs b/kanidm_client/tests/common.rs index 05cd193f9..eb949f7d4 100644 --- a/kanidm_client/tests/common.rs +++ b/kanidm_client/tests/common.rs @@ -28,23 +28,24 @@ pub fn run_test(test_fn: fn(KanidmClient) -> ()) { admin_password: ADMIN_TEST_PASSWORD.to_string(), }); + // Setup the config ... let mut config = Configuration::new(); config.address = format!("127.0.0.1:{}", port); config.secure_cookies = false; config.integration_test_config = Some(int_config); - // Setup the config ... - thread::spawn(move || { // Spawn a thread for the test runner, this should have a unique // port.... - System::run(move || { - let sctx = create_server_core(config); + let system = System::new("test-rctx"); - // This appears to be bind random ... - // let srv = srv.bind("127.0.0.1:0").unwrap(); + let rctx = async move { + let sctx = create_server_core(config).await; let _ = tx.send(sctx); - }) - .expect("unable to start system"); + }; + + Arbiter::spawn(rctx); + + system.run().expect("Failed to start thread"); }); let sctx = rx.recv().unwrap().expect("failed to start ctx"); System::set_current(sctx.current()); diff --git a/kanidm_proto/src/v1.rs b/kanidm_proto/src/v1.rs index 30d643e6d..a9854a696 100644 --- a/kanidm_proto/src/v1.rs +++ b/kanidm_proto/src/v1.rs @@ -152,6 +152,7 @@ pub struct UserAuthToken { // may depend on the client application. // pub expiry: DateTime, pub name: String, + pub spn: String, pub displayname: String, pub uuid: String, pub application: Option, @@ -163,6 +164,7 @@ pub struct UserAuthToken { impl fmt::Display for UserAuthToken { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { writeln!(f, "name: {}", self.name)?; + writeln!(f, "spn: {}", self.spn)?; writeln!(f, "display: {}", self.displayname)?; writeln!(f, "uuid: {}", self.uuid)?; writeln!(f, "groups: {:?}", self.groups)?; diff --git a/kanidm_unix_int/tests/cache_layer_test.rs b/kanidm_unix_int/tests/cache_layer_test.rs index 07f5251fd..e5718553f 100644 --- a/kanidm_unix_int/tests/cache_layer_test.rs +++ b/kanidm_unix_int/tests/cache_layer_test.rs @@ -28,22 +28,26 @@ fn run_test(fix_fn: fn(&KanidmClient) -> (), test_fn: fn(CacheLayer, KanidmAsync admin_password: ADMIN_TEST_PASSWORD.to_string(), }); + // Setup the config ... let mut config = Configuration::new(); config.address = format!("127.0.0.1:{}", port); config.secure_cookies = false; config.integration_test_config = Some(int_config); - // Setup the config ... - thread::spawn(move || { // Spawn a thread for the test runner, this should have a unique // port.... - System::run(move || { - let sctx = create_server_core(config); + let system = System::new("test-rctx"); + + let rctx = async move { + let sctx = create_server_core(config).await; let _ = tx.send(sctx); - }) - .expect("Failed to start system"); + }; + + Arbiter::spawn(rctx); + + system.run().expect("Failed to start thread"); }); - let sctx = rx.recv().unwrap().expect("Failed to start server core"); + let sctx = rx.recv().unwrap().expect("failed to start ctx"); System::set_current(sctx.current()); // Setup the client, and the address we selected. diff --git a/kanidmd/Cargo.toml b/kanidmd/Cargo.toml index 0230ad59f..45d8d20ee 100644 --- a/kanidmd/Cargo.toml +++ b/kanidmd/Cargo.toml @@ -77,6 +77,12 @@ idlset = { version = "0.1" , features = ["use_smallvec"] } zxcvbn = "2.0" base64 = "0.12" +ldap3_server = "0.1" +# ldap3_server = { path = "../../ldap3_server" } +futures-util = "0.3" +tokio-util = { version = "0.2", features = ["codec"] } +tokio-openssl = "0.4" + [features] default = [ "libsqlite3-sys/bundled", "openssl/vendored" ] diff --git a/kanidmd/Dockerfile b/kanidmd/Dockerfile index 06fe8baf5..25464200b 100644 --- a/kanidmd/Dockerfile +++ b/kanidmd/Dockerfile @@ -42,7 +42,7 @@ RUN zypper mr -d repo-non-oss && \ COPY --from=builder /usr/src/kanidm/target/release/kanidmd /sbin/ -EXPOSE 8443 +EXPOSE 8443 3636 VOLUME /data ENV RUST_BACKTRACE 1 diff --git a/kanidmd/src/lib/access.rs b/kanidmd/src/lib/access.rs index 6a06a8384..f7f6989d3 100644 --- a/kanidmd/src/lib/access.rs +++ b/kanidmd/src/lib/access.rs @@ -359,136 +359,138 @@ pub trait AccessControlsTransaction { se: &SearchEvent, entries: Vec>, ) -> Result>, OperationError> { - lsecurity_access!(audit, "Access check for event: {:?}", se); + lperf_segment!(audit, "access::search_filter_entries", || { + lsecurity_access!(audit, "Access check for event: {:?}", se); - // If this is an internal search, return our working set. - let rec_entry: &Entry = match &se.event.origin { - EventOrigin::Internal => { - lsecurity_access!(audit, "Internal operation, bypassing access check"); - // No need to check ACS - return Ok(entries); - } - EventOrigin::User(e) => &e, - }; + // If this is an internal search, return our working set. + let rec_entry: &Entry = match &se.event.origin { + EventOrigin::Internal => { + lsecurity_access!(audit, "Internal operation, bypassing access check"); + // No need to check ACS + return Ok(entries); + } + EventOrigin::User(e) => &e, + }; - // Some useful references we'll use for the remainder of the operation - let search_state = self.get_search(); + // Some useful references we'll use for the remainder of the operation + let search_state = self.get_search(); - // First get the set of acps that apply to this receiver - let related_acp: Vec<&AccessControlSearch> = search_state - .iter() - .filter_map(|(_, acs)| { - // Now resolve the receiver filter - // Okay, so in filter resolution, the primary error case - // is that we have a non-user in the event. We have already - // checked for this above BUT we should still check here - // properly just in case. - // - // In this case, we assume that if the event is internal - // that the receiver can NOT match because it has no selfuuid - // and can as a result, never return true. This leads to this - // acp not being considered in that case ... which should never - // happen because we already bypassed internal ops above! - // - // A possible solution is to change the filter resolve function - // such that it takes an entry, rather than an event, but that - // would create issues in search. - let f_val = acs.acp.receiver.clone(); - match f_val.resolve(&se.event, None) { - Ok(f_res) => { - if rec_entry.entry_match_no_index(&f_res) { - Some(acs) - } else { + // First get the set of acps that apply to this receiver + let related_acp: Vec<&AccessControlSearch> = search_state + .iter() + .filter_map(|(_, acs)| { + // Now resolve the receiver filter + // Okay, so in filter resolution, the primary error case + // is that we have a non-user in the event. We have already + // checked for this above BUT we should still check here + // properly just in case. + // + // In this case, we assume that if the event is internal + // that the receiver can NOT match because it has no selfuuid + // and can as a result, never return true. This leads to this + // acp not being considered in that case ... which should never + // happen because we already bypassed internal ops above! + // + // A possible solution is to change the filter resolve function + // such that it takes an entry, rather than an event, but that + // would create issues in search. + let f_val = acs.acp.receiver.clone(); + match f_val.resolve(&se.event, None) { + Ok(f_res) => { + if rec_entry.entry_match_no_index(&f_res) { + Some(acs) + } else { + None + } + } + Err(e) => { + ladmin_error!( + audit, + "A internal filter was passed for resolution!?!? {:?}", + e + ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .collect(); + }) + .collect(); - related_acp.iter().for_each(|racp| { - lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); - }); + related_acp.iter().for_each(|racp| { + lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); + }); - // Get the set of attributes requested by this se filter. This is what we are - // going to access check. - let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set(); + // Get the set of attributes requested by this se filter. This is what we are + // going to access check. + let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set(); - // For each entry - let allowed_entries: Vec> = entries - .into_iter() - .filter(|e| { - // For each acp - let allowed_attrs: BTreeSet<&str> = related_acp - .iter() - .filter_map(|acs| { - let f_val = acs.acp.targetscope.clone(); - match f_val.resolve(&se.event, None) { - Ok(f_res) => { - // if it applies - if e.entry_match_no_index(&f_res) { - lsecurity_access!( + // For each entry + let allowed_entries: Vec> = entries + .into_iter() + .filter(|e| { + // For each acp + let allowed_attrs: BTreeSet<&str> = related_acp + .iter() + .filter_map(|acs| { + let f_val = acs.acp.targetscope.clone(); + match f_val.resolve(&se.event, None) { + Ok(f_res) => { + // if it applies + if e.entry_match_no_index(&f_res) { + lsecurity_access!( + audit, + "entry {:?} matches acs {:?}", + e.get_uuid(), + acs + ); + // add search_attrs to allowed. + let r: Vec<&str> = + acs.attrs.iter().map(|s| s.as_str()).collect(); + Some(r) + } else { + lsecurity_access!( + audit, + "entry {:?} DOES NOT match acs {:?}", + e.get_uuid(), + acs + ); + None + } + } + Err(e) => { + ladmin_error!( audit, - "entry {:?} matches acs {:?}", - e.get_uuid(), - acs - ); - // add search_attrs to allowed. - let r: Vec<&str> = - acs.attrs.iter().map(|s| s.as_str()).collect(); - Some(r) - } else { - lsecurity_access!( - audit, - "entry {:?} DOES NOT match acs {:?}", - e.get_uuid(), - acs + "A internal filter was passed for resolution!?!? {:?}", + e ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .flatten() - .collect(); + }) + .flatten() + .collect(); - lsecurity_access!(audit, "-- for entry --> {:?}", e.get_uuid()); - lsecurity_access!(audit, "allowed attributes --> {:?}", allowed_attrs); - lsecurity_access!(audit, "requested attributes --> {:?}", requested_attrs); + lsecurity_access!(audit, "-- for entry --> {:?}", e.get_uuid()); + lsecurity_access!(audit, "allowed attributes --> {:?}", allowed_attrs); + lsecurity_access!(audit, "requested attributes --> {:?}", requested_attrs); - // is attr set a subset of allowed set? - // true -> entry is allowed in result set - // false -> the entry is not allowed to be searched by this entity, so is - // excluded. - let decision = requested_attrs.is_subset(&allowed_attrs); - lsecurity_access!(audit, "search attr decision --> {:?}", decision); - decision - }) - .collect(); + // is attr set a subset of allowed set? + // true -> entry is allowed in result set + // false -> the entry is not allowed to be searched by this entity, so is + // excluded. + let decision = requested_attrs.is_subset(&allowed_attrs); + lsecurity_access!(audit, "search attr decision --> {:?}", decision); + decision + }) + .collect(); - if allowed_entries.len() > 0 { - lsecurity_access!(audit, "allowed {} entries ✅", allowed_entries.len()); - } else { - lsecurity_access!(audit, "denied ❌"); - } + if allowed_entries.len() > 0 { + lsecurity_access!(audit, "allowed {} entries ✅", allowed_entries.len()); + } else { + lsecurity_access!(audit, "denied ❌"); + } - Ok(allowed_entries) + Ok(allowed_entries) + }) } fn search_filter_entry_attributes( @@ -497,161 +499,163 @@ pub trait AccessControlsTransaction { se: &SearchEvent, entries: Vec>, ) -> Result>, OperationError> { - /* - * Super similar to above (could even re-use some parts). Given a set of entries, - * reduce the attribute sets on them to "what is visible". This is ONLY called on - * the server edge, such that clients only see what they can, but internally, - * impersonate and such actually still get the whole entry back as not to break - * modify and co. - */ - lsecurity_access!(audit, "Access check and reduce for event: {:?}", se); + lperf_segment!(audit, "access::search_filter_entry_attributes", || { + /* + * Super similar to above (could even re-use some parts). Given a set of entries, + * reduce the attribute sets on them to "what is visible". This is ONLY called on + * the server edge, such that clients only see what they can, but internally, + * impersonate and such actually still get the whole entry back as not to break + * modify and co. + */ + lsecurity_access!(audit, "Access check and reduce for event: {:?}", se); - // If this is an internal search, do nothing. How this occurs in this - // interface is beyond me .... - let rec_entry: &Entry = match &se.event.origin { - EventOrigin::Internal => { - if cfg!(test) { - lsecurity_access!(audit, "TEST: Internal search in external interface - allowing due to cfg test ..."); - // In tests we just push everything back. - return Ok(entries - .into_iter() - .map(|e| unsafe { e.into_reduced() }) - .collect()); - } else { - // In production we can't risk leaking data here, so we return - // empty sets. - ladmin_error!(audit, "IMPOSSIBLE STATE: Internal search in external interface?! Returning empty for safety."); - // No need to check ACS - return Ok(Vec::new()); + // If this is an internal search, do nothing. How this occurs in this + // interface is beyond me .... + let rec_entry: &Entry = match &se.event.origin { + EventOrigin::Internal => { + if cfg!(test) { + lsecurity_access!(audit, "TEST: Internal search in external interface - allowing due to cfg test ..."); + // In tests we just push everything back. + return Ok(entries + .into_iter() + .map(|e| unsafe { e.into_reduced() }) + .collect()); + } else { + // In production we can't risk leaking data here, so we return + // empty sets. + ladmin_error!(audit, "IMPOSSIBLE STATE: Internal search in external interface?! Returning empty for safety."); + // No need to check ACS + return Ok(Vec::new()); + } } - } - EventOrigin::User(e) => &e, - }; + EventOrigin::User(e) => &e, + }; - // Some useful references we'll use for the remainder of the operation - let search_state = self.get_search(); + // Some useful references we'll use for the remainder of the operation + let search_state = self.get_search(); - // Get the relevant acps for this receiver. - let related_acp: Vec<&AccessControlSearch> = search_state - .iter() - .filter_map(|(_, acs)| { - let f_val = acs.acp.receiver.clone(); - match f_val.resolve(&se.event, None) { - Ok(f_res) => { - // Is our user covered by this acs? - if rec_entry.entry_match_no_index(&f_res) { - // If so, let's check if the attr request is relevant. + // Get the relevant acps for this receiver. + let related_acp: Vec<&AccessControlSearch> = search_state + .iter() + .filter_map(|(_, acs)| { + let f_val = acs.acp.receiver.clone(); + match f_val.resolve(&se.event, None) { + Ok(f_res) => { + // Is our user covered by this acs? + if rec_entry.entry_match_no_index(&f_res) { + // If so, let's check if the attr request is relevant. - // If we have a requested attr set, are any of them - // in the attrs this acs covers? - let acs_target_attrs = match &se.attrs { - Some(r_attrs) => acs.attrs.intersection(r_attrs).count(), - // All attrs requested, do nothing. - None => acs.attrs.len(), - }; + // If we have a requested attr set, are any of them + // in the attrs this acs covers? + let acs_target_attrs = match &se.attrs { + Some(r_attrs) => acs.attrs.intersection(r_attrs).count(), + // All attrs requested, do nothing. + None => acs.attrs.len(), + }; - // There is nothing in the ACS (not possible) or - // no overlap between the requested set and this acs, so it's - // not worth evaling. - if acs_target_attrs == 0 { - None + // There is nothing in the ACS (not possible) or + // no overlap between the requested set and this acs, so it's + // not worth evaling. + if acs_target_attrs == 0 { + None + } else { + Some(acs) + } } else { - Some(acs) + None } - } else { + } + Err(e) => { + ladmin_error!( + audit, + "A internal filter was passed for resolution!?!? {:?}", + e + ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .collect(); + }) + .collect(); - related_acp.iter().for_each(|racp| { - lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); - }); + related_acp.iter().for_each(|racp| { + lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); + }); - // Build a reference set from the req_attrs - let req_attrs: Option> = se - .attrs - .as_ref() - .map(|vs| vs.iter().map(|s| s.as_str()).collect()); + // Build a reference set from the req_attrs + let req_attrs: Option> = se + .attrs + .as_ref() + .map(|vs| vs.iter().map(|s| s.as_str()).collect()); - // For each entry - let allowed_entries: Vec> = entries - .into_iter() - .map(|e| { - // Get the set of attributes you can see - let allowed_attrs: BTreeSet<&str> = related_acp - .iter() - .filter_map(|acs| { - let f_val = acs.acp.targetscope.clone(); - match f_val.resolve(&se.event, None) { - Ok(f_res) => { - // if it applies - if e.entry_match_no_index(&f_res) { - lsecurity_access!( + // For each entry + let allowed_entries: Vec> = entries + .into_iter() + .map(|e| { + // Get the set of attributes you can see + let allowed_attrs: BTreeSet<&str> = related_acp + .iter() + .filter_map(|acs| { + let f_val = acs.acp.targetscope.clone(); + match f_val.resolve(&se.event, None) { + Ok(f_res) => { + // if it applies + if e.entry_match_no_index(&f_res) { + lsecurity_access!( + audit, + "entry {:?} matches acs {:?}", + e.get_uuid(), + acs + ); + // add search_attrs to allowed. + let r: Vec<&str> = + acs.attrs.iter().map(|s| s.as_str()).collect(); + Some(r) + } else { + lsecurity_access!( + audit, + "entry {:?} DOES NOT match acs {:?}", + e.get_uuid(), + acs + ); + None + } + } + Err(e) => { + ladmin_error!( audit, - "entry {:?} matches acs {:?}", - e.get_uuid(), - acs - ); - // add search_attrs to allowed. - let r: Vec<&str> = - acs.attrs.iter().map(|s| s.as_str()).collect(); - Some(r) - } else { - lsecurity_access!( - audit, - "entry {:?} DOES NOT match acs {:?}", - e.get_uuid(), - acs + "A internal filter was passed for resolution!?!? {:?}", + e ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .flatten() - .collect(); + }) + .flatten() + .collect(); - // Remove all others that are present on the entry. - lsecurity_access!(audit, "-- for entry --> {:?}", e.get_uuid()); - lsecurity_access!(audit, "requested attributes --> {:?}", req_attrs); - lsecurity_access!(audit, "allowed attributes --> {:?}", allowed_attrs); + // Remove all others that are present on the entry. + lsecurity_access!(audit, "-- for entry --> {:?}", e.get_uuid()); + lsecurity_access!(audit, "requested attributes --> {:?}", req_attrs); + lsecurity_access!(audit, "allowed attributes --> {:?}", allowed_attrs); - // Remove anything that wasn't requested. - let f_allowed_attrs: BTreeSet<&str> = match &req_attrs { - Some(v) => allowed_attrs.intersection(&v).copied().collect(), - None => allowed_attrs, - }; + // Remove anything that wasn't requested. + let f_allowed_attrs: BTreeSet<&str> = match &req_attrs { + Some(v) => allowed_attrs.intersection(&v).copied().collect(), + None => allowed_attrs, + }; - // Now purge the attrs that are NOT in this. - e.reduce_attributes(f_allowed_attrs) - }) - .collect(); + // Now purge the attrs that are NOT in this. + e.reduce_attributes(f_allowed_attrs) + }) + .collect(); - lsecurity_access!( - audit, - "attribute set reduced on {} entries", - allowed_entries.len() - ); - Ok(allowed_entries) + lsecurity_access!( + audit, + "attribute set reduced on {} entries", + allowed_entries.len() + ); + Ok(allowed_entries) + }) } fn modify_allow_operation( @@ -660,198 +664,205 @@ pub trait AccessControlsTransaction { me: &ModifyEvent, entries: &[Entry], ) -> Result { - lsecurity_access!(audit, "Access check for event: {:?}", me); + lperf_segment!(audit, "access::modify_allow_operation", || { + lsecurity_access!(audit, "Access check for event: {:?}", me); - let rec_entry: &Entry = match &me.event.origin { - EventOrigin::Internal => { - lsecurity_access!(audit, "Internal operation, bypassing access check"); - // No need to check ACS - return Ok(true); - } - EventOrigin::User(e) => &e, - }; - - // Some useful references we'll use for the remainder of the operation - let modify_state = self.get_modify(); - - // Pre-check if the no-no purge class is present - let disallow = me.modlist.iter().fold(false, |acc, m| { - if acc { - acc - } else { - match m { - Modify::Purged(a) => a == "class", - _ => false, + let rec_entry: &Entry = match &me.event.origin { + EventOrigin::Internal => { + lsecurity_access!(audit, "Internal operation, bypassing access check"); + // No need to check ACS + return Ok(true); } - } - }); - if disallow { - lsecurity_access!(audit, "Disallowing purge class in modification"); - return Ok(false); - } + EventOrigin::User(e) => &e, + }; - // Find the acps that relate to the caller. - let related_acp: Vec<&AccessControlModify> = modify_state - .iter() - .filter_map(|(_, acs)| { - let f_val = acs.acp.receiver.clone(); - match f_val.resolve(&me.event, None) { - Ok(f_res) => { - if rec_entry.entry_match_no_index(&f_res) { - Some(acs) + // Some useful references we'll use for the remainder of the operation + let modify_state = self.get_modify(); + + // Pre-check if the no-no purge class is present + let disallow = me.modlist.iter().fold(false, |acc, m| { + if acc { + acc + } else { + match m { + Modify::Purged(a) => a == "class", + _ => false, + } + } + }); + if disallow { + lsecurity_access!(audit, "Disallowing purge class in modification"); + return Ok(false); + } + + // Find the acps that relate to the caller. + let related_acp: Vec<&AccessControlModify> = modify_state + .iter() + .filter_map(|(_, acs)| { + let f_val = acs.acp.receiver.clone(); + match f_val.resolve(&me.event, None) { + Ok(f_res) => { + if rec_entry.entry_match_no_index(&f_res) { + Some(acs) + } else { + None + } + } + Err(e) => { + ladmin_error!( + audit, + "A internal filter was passed for resolution!?!? {:?}", + e + ); + None + } + } + }) + .collect(); + + related_acp.iter().for_each(|racp| { + lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); + }); + + // build two sets of "requested pres" and "requested rem" + let requested_pres: BTreeSet<&str> = me + .modlist + .iter() + .filter_map(|m| match m { + Modify::Present(a, _) => Some(a.as_str()), + _ => None, + }) + .collect(); + + let requested_rem: BTreeSet<&str> = me + .modlist + .iter() + .filter_map(|m| match m { + Modify::Removed(a, _) => Some(a.as_str()), + Modify::Purged(a) => Some(a.as_str()), + _ => None, + }) + .collect(); + + // Build the set of classes that we to work on, only in terms of "addition". To remove + // I think we have no limit, but ... william of the future may find a problem with this + // policy. + let requested_classes: BTreeSet<&str> = me + .modlist + .iter() + .filter_map(|m| match m { + Modify::Present(a, v) => { + if a.as_str() == "class" { + // Here we have an option<&str> which could mean there is a risk of + // a malicious entity attempting to trick us by masking class mods + // in non-iutf8 types. However, the server first won't respect their + // existance, and second, we would have failed the mod at schema checking + // earlier in the process as these were not correctly type. As a result + // we can trust these to be correct here and not to be "None". + Some(v.to_str_unwrap()) } else { None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None + Modify::Removed(a, v) => { + if a.as_str() == "class" { + Some(v.to_str_unwrap()) + } else { + None + } } - } - }) - .collect(); + _ => None, + }) + .collect(); - related_acp.iter().for_each(|racp| { - lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); - }); + lsecurity_access!(audit, "Requested present set: {:?}", requested_pres); + lsecurity_access!(audit, "Requested remove set: {:?}", requested_rem); + lsecurity_access!(audit, "Requested class set: {:?}", requested_classes); - // build two sets of "requested pres" and "requested rem" - let requested_pres: BTreeSet<&str> = me - .modlist - .iter() - .filter_map(|m| match m { - Modify::Present(a, _) => Some(a.as_str()), - _ => None, - }) - .collect(); - - let requested_rem: BTreeSet<&str> = me - .modlist - .iter() - .filter_map(|m| match m { - Modify::Removed(a, _) => Some(a.as_str()), - Modify::Purged(a) => Some(a.as_str()), - _ => None, - }) - .collect(); - - // Build the set of classes that we to work on, only in terms of "addition". To remove - // I think we have no limit, but ... william of the future may find a problem with this - // policy. - let requested_classes: BTreeSet<&str> = me - .modlist - .iter() - .filter_map(|m| match m { - Modify::Present(a, v) => { - if a.as_str() == "class" { - // Here we have an option<&str> which could mean there is a risk of - // a malicious entity attempting to trick us by masking class mods - // in non-iutf8 types. However, the server first won't respect their - // existance, and second, we would have failed the mod at schema checking - // earlier in the process as these were not correctly type. As a result - // we can trust these to be correct here and not to be "None". - Some(v.to_str_unwrap()) - } else { - None - } - } - Modify::Removed(a, v) => { - if a.as_str() == "class" { - Some(v.to_str_unwrap()) - } else { - None - } - } - _ => None, - }) - .collect(); - - lsecurity_access!(audit, "Requested present set: {:?}", requested_pres); - lsecurity_access!(audit, "Requested remove set: {:?}", requested_rem); - lsecurity_access!(audit, "Requested class set: {:?}", requested_classes); - - let r = entries.iter().fold(true, |acc, e| { - if !acc { - false - } else { - // For this entry, find the acp's that apply to it from the - // set that apply to the entry that is performing the operation - let scoped_acp: Vec<&AccessControlModify> = related_acp - .iter() - .filter_map(|acm: &&AccessControlModify| { - // We are continually compiling and using these - // in a tight loop, so this is a possible oppurtunity - // to cache or handle these filters better - filter compiler - // cache maybe? - let f_val = acm.acp.targetscope.clone(); - match f_val.resolve(&me.event, None) { - Ok(f_res) => { - if e.entry_match_no_index(&f_res) { - Some(*acm) - } else { + let r = entries.iter().fold(true, |acc, e| { + if !acc { + false + } else { + // For this entry, find the acp's that apply to it from the + // set that apply to the entry that is performing the operation + let scoped_acp: Vec<&AccessControlModify> = related_acp + .iter() + .filter_map(|acm: &&AccessControlModify| { + // We are continually compiling and using these + // in a tight loop, so this is a possible oppurtunity + // to cache or handle these filters better - filter compiler + // cache maybe? + let f_val = acm.acp.targetscope.clone(); + match f_val.resolve(&me.event, None) { + Ok(f_res) => { + if e.entry_match_no_index(&f_res) { + Some(*acm) + } else { + None + } + } + Err(e) => { + ladmin_error!( + audit, + "A internal filter was passed for resolution!?!? {:?}", + e + ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .collect(); - // Build the sets of classes, pres and rem we are allowed to modify, extend - // or use based on the set of matched acps. - let allowed_pres: BTreeSet<&str> = scoped_acp - .iter() - .flat_map(|acp| acp.presattrs.iter().map(|v| v.as_str())) - .collect(); + }) + .collect(); + // Build the sets of classes, pres and rem we are allowed to modify, extend + // or use based on the set of matched acps. + let allowed_pres: BTreeSet<&str> = scoped_acp + .iter() + .flat_map(|acp| acp.presattrs.iter().map(|v| v.as_str())) + .collect(); - let allowed_rem: BTreeSet<&str> = scoped_acp - .iter() - .flat_map(|acp| acp.remattrs.iter().map(|v| v.as_str())) - .collect(); + let allowed_rem: BTreeSet<&str> = scoped_acp + .iter() + .flat_map(|acp| acp.remattrs.iter().map(|v| v.as_str())) + .collect(); - let allowed_classes: BTreeSet<&str> = scoped_acp - .iter() - .flat_map(|acp| acp.classes.iter().map(|v| v.as_str())) - .collect(); + let allowed_classes: BTreeSet<&str> = scoped_acp + .iter() + .flat_map(|acp| acp.classes.iter().map(|v| v.as_str())) + .collect(); - // Now check all the subsets are true. Remember, purge class - // is already checked above. - let mut result = true; - if !requested_pres.is_subset(&allowed_pres) { - lsecurity_access!(audit, "requested_pres is not a subset of allowed"); - lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_pres, allowed_pres); - result = false; - } - if !requested_rem.is_subset(&allowed_rem) { - lsecurity_access!(audit, "requested_rem is not a subset of allowed"); - lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_rem, allowed_rem); - result = false; - } - if !requested_classes.is_subset(&allowed_classes) { - lsecurity_access!(audit, "requested_classes is not a subset of allowed"); - lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_classes, allowed_classes); - result = false; - } - lsecurity_access!(audit, "passed pres, rem, classes check."); - result - } // if acc == false - }); - if r { - lsecurity_access!(audit, "allowed ✅"); - } else { - lsecurity_access!(audit, "denied ❌"); - } - Ok(r) + // Now check all the subsets are true. Remember, purge class + // is already checked above. + let mut result = true; + if !requested_pres.is_subset(&allowed_pres) { + lsecurity_access!(audit, "requested_pres is not a subset of allowed"); + lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_pres, allowed_pres); + result = false; + } + if !requested_rem.is_subset(&allowed_rem) { + lsecurity_access!(audit, "requested_rem is not a subset of allowed"); + lsecurity_access!(audit, "{:?} !⊆ {:?}", requested_rem, allowed_rem); + result = false; + } + if !requested_classes.is_subset(&allowed_classes) { + lsecurity_access!(audit, "requested_classes is not a subset of allowed"); + lsecurity_access!( + audit, + "{:?} !⊆ {:?}", + requested_classes, + allowed_classes + ); + result = false; + } + lsecurity_access!(audit, "passed pres, rem, classes check."); + result + } // if acc == false + }); + if r { + lsecurity_access!(audit, "allowed ✅"); + } else { + lsecurity_access!(audit, "denied ❌"); + } + Ok(r) + }) } fn create_allow_operation( @@ -860,170 +871,172 @@ pub trait AccessControlsTransaction { ce: &CreateEvent, entries: &[Entry], ) -> Result { - lsecurity_access!(audit, "Access check for event: {:?}", ce); + lperf_segment!(audit, "access::create_allow_operation", || { + lsecurity_access!(audit, "Access check for event: {:?}", ce); - let rec_entry: &Entry = match &ce.event.origin { - EventOrigin::Internal => { - lsecurity_access!(audit, "Internal operation, bypassing access check"); - // No need to check ACS - return Ok(true); - } - EventOrigin::User(e) => &e, - }; + let rec_entry: &Entry = match &ce.event.origin { + EventOrigin::Internal => { + lsecurity_access!(audit, "Internal operation, bypassing access check"); + // No need to check ACS + return Ok(true); + } + EventOrigin::User(e) => &e, + }; - // Some useful references we'll use for the remainder of the operation - let create_state = self.get_create(); + // Some useful references we'll use for the remainder of the operation + let create_state = self.get_create(); - // Find the acps that relate to the caller. - let related_acp: Vec<&AccessControlCreate> = create_state - .iter() - .filter_map(|(_, acs)| { - let f_val = acs.acp.receiver.clone(); - match f_val.resolve(&ce.event, None) { - Ok(f_res) => { - if rec_entry.entry_match_no_index(&f_res) { - Some(acs) - } else { + // Find the acps that relate to the caller. + let related_acp: Vec<&AccessControlCreate> = create_state + .iter() + .filter_map(|(_, acs)| { + let f_val = acs.acp.receiver.clone(); + match f_val.resolve(&ce.event, None) { + Ok(f_res) => { + if rec_entry.entry_match_no_index(&f_res) { + Some(acs) + } else { + None + } + } + Err(e) => { + ladmin_error!( + audit, + "A internal filter was passed for resolution!?!? {:?}", + e + ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .collect(); + }) + .collect(); - lsecurity_access!(audit, "Related acc -> {:?}", related_acp); + lsecurity_access!(audit, "Related acc -> {:?}", related_acp); - // For each entry - let r = entries.iter().fold(true, |acc, e| { - if !acc { - // We have already failed, move on. - false - } else { - // Build the set of requested classes and attrs here. - let create_attrs: BTreeSet<&str> = e.get_ava_names(); - // If this is empty, we make an empty set, which is fine because - // the empty class set despite matching is_subset, will have the - // following effect: - // * there is no class on entry, so schema will fail - // * plugin-base will add object to give a class, but excess - // attrs will cause fail (could this be a weakness?) - // * class is a "may", so this could be empty in the rules, so - // if the accr is empty this would not be a true subset, - // so this would "fail", but any content in the accr would - // have to be validated. - // - // I still think if this is None, we should just fail here ... - // because it shouldn't be possible to match. + // For each entry + let r = entries.iter().fold(true, |acc, e| { + if !acc { + // We have already failed, move on. + false + } else { + // Build the set of requested classes and attrs here. + let create_attrs: BTreeSet<&str> = e.get_ava_names(); + // If this is empty, we make an empty set, which is fine because + // the empty class set despite matching is_subset, will have the + // following effect: + // * there is no class on entry, so schema will fail + // * plugin-base will add object to give a class, but excess + // attrs will cause fail (could this be a weakness?) + // * class is a "may", so this could be empty in the rules, so + // if the accr is empty this would not be a true subset, + // so this would "fail", but any content in the accr would + // have to be validated. + // + // I still think if this is None, we should just fail here ... + // because it shouldn't be possible to match. - let create_classes: BTreeSet<&str> = match e.get_ava_set_str("class") { - Some(s) => s, - None => { - ladmin_error!(audit, "Class set failed to build - corrupted entry?"); - return false; - } - }; + let create_classes: BTreeSet<&str> = match e.get_ava_set_str("class") { + Some(s) => s, + None => { + ladmin_error!(audit, "Class set failed to build - corrupted entry?"); + return false; + } + }; - related_acp.iter().fold(false, |r_acc, accr| { - if r_acc { - // Already allowed, continue. - r_acc - } else { - // Check to see if allowed. - let f_val = accr.acp.targetscope.clone(); - match f_val.resolve(&ce.event, None) { - Ok(f_res) => { - if e.entry_match_no_index(&f_res) { - lsecurity_access!( - audit, - "entry {:?} matches acs {:?}", - e, - accr - ); - // It matches, so now we have to check attrs and classes. - // Remember, we have to match ALL requested attrs - // and classes to pass! - let allowed_attrs: BTreeSet<&str> = - accr.attrs.iter().map(|s| s.as_str()).collect(); - let allowed_classes: BTreeSet<&str> = - accr.classes.iter().map(|s| s.as_str()).collect(); - - if !create_attrs.is_subset(&allowed_attrs) { + related_acp.iter().fold(false, |r_acc, accr| { + if r_acc { + // Already allowed, continue. + r_acc + } else { + // Check to see if allowed. + let f_val = accr.acp.targetscope.clone(); + match f_val.resolve(&ce.event, None) { + Ok(f_res) => { + if e.entry_match_no_index(&f_res) { lsecurity_access!( audit, - "create_attrs is not a subset of allowed" + "entry {:?} matches acs {:?}", + e, + accr ); + // It matches, so now we have to check attrs and classes. + // Remember, we have to match ALL requested attrs + // and classes to pass! + let allowed_attrs: BTreeSet<&str> = + accr.attrs.iter().map(|s| s.as_str()).collect(); + let allowed_classes: BTreeSet<&str> = + accr.classes.iter().map(|s| s.as_str()).collect(); + + if !create_attrs.is_subset(&allowed_attrs) { + lsecurity_access!( + audit, + "create_attrs is not a subset of allowed" + ); + lsecurity_access!( + audit, + "{:?} !⊆ {:?}", + create_attrs, + allowed_attrs + ); + return false; + } + if !create_classes.is_subset(&allowed_classes) { + lsecurity_access!( + audit, + "create_classes is not a subset of allowed" + ); + lsecurity_access!( + audit, + "{:?} !⊆ {:?}", + create_classes, + allowed_classes + ); + return false; + } + lsecurity_access!(audit, "passed"); + + true + } else { lsecurity_access!( audit, - "{:?} !⊆ {:?}", - create_attrs, - allowed_attrs + "entry {:?} DOES NOT match acs {:?}", + e, + accr ); - return false; + // Does not match, fail this rule. + false } - if !create_classes.is_subset(&allowed_classes) { - lsecurity_access!( - audit, - "create_classes is not a subset of allowed" - ); - lsecurity_access!( - audit, - "{:?} !⊆ {:?}", - create_classes, - allowed_classes - ); - return false; - } - lsecurity_access!(audit, "passed"); - - true - } else { - lsecurity_access!( + } + Err(e) => { + ladmin_error!( audit, - "entry {:?} DOES NOT match acs {:?}", - e, - accr + "A internal filter was passed for resolution!?!? {:?}", + e ); - // Does not match, fail this rule. + // Default to failing here. false } - } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - // Default to failing here. - false - } - } // match - } - }) + } // match + } + }) + } + // Find the set of related acps for this entry. + // + // For each "created" entry. + // If the created entry is 100% allowed by this acp + // IE: all attrs to be created AND classes match classes + // allow + // if no acp allows, fail operation. + }); + + if r { + lsecurity_access!(audit, "allowed ✅"); + } else { + lsecurity_access!(audit, "denied ❌"); } - // Find the set of related acps for this entry. - // - // For each "created" entry. - // If the created entry is 100% allowed by this acp - // IE: all attrs to be created AND classes match classes - // allow - // if no acp allows, fail operation. - }); - if r { - lsecurity_access!(audit, "allowed ✅"); - } else { - lsecurity_access!(audit, "denied ❌"); - } - - Ok(r) + Ok(r) + }) } fn delete_allow_operation( @@ -1032,104 +1045,106 @@ pub trait AccessControlsTransaction { de: &DeleteEvent, entries: &[Entry], ) -> Result { - lsecurity_access!(audit, "Access check for event: {:?}", de); + lperf_segment!(audit, "access::delete_allow_operation", || { + lsecurity_access!(audit, "Access check for event: {:?}", de); - let rec_entry: &Entry = match &de.event.origin { - EventOrigin::Internal => { - lsecurity_access!(audit, "Internal operation, bypassing access check"); - // No need to check ACS - return Ok(true); - } - EventOrigin::User(e) => &e, - }; + let rec_entry: &Entry = match &de.event.origin { + EventOrigin::Internal => { + lsecurity_access!(audit, "Internal operation, bypassing access check"); + // No need to check ACS + return Ok(true); + } + EventOrigin::User(e) => &e, + }; - // Some useful references we'll use for the remainder of the operation - let delete_state = self.get_delete(); + // Some useful references we'll use for the remainder of the operation + let delete_state = self.get_delete(); - // Find the acps that relate to the caller. - let related_acp: Vec<&AccessControlDelete> = delete_state - .iter() - .filter_map(|(_, acs)| { - let f_val = acs.acp.receiver.clone(); - match f_val.resolve(&de.event, None) { - Ok(f_res) => { - if rec_entry.entry_match_no_index(&f_res) { - Some(acs) - } else { + // Find the acps that relate to the caller. + let related_acp: Vec<&AccessControlDelete> = delete_state + .iter() + .filter_map(|(_, acs)| { + let f_val = acs.acp.receiver.clone(); + match f_val.resolve(&de.event, None) { + Ok(f_res) => { + if rec_entry.entry_match_no_index(&f_res) { + Some(acs) + } else { + None + } + } + Err(e) => { + ladmin_error!( + audit, + "A internal filter was passed for resolution!?!? {:?}", + e + ); None } } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - None - } - } - }) - .collect(); + }) + .collect(); - related_acp.iter().for_each(|racp| { - lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); - }); + related_acp.iter().for_each(|racp| { + lsecurity_access!(audit, "Related acs -> {:?}", racp.acp.name); + }); - // For each entry - let r = entries.iter().fold(true, |acc, e| { - if !acc { - // Any false, denies the whole operation. - false - } else { - related_acp.iter().fold(false, |r_acc, acd| { - if r_acc { - // If something allowed us to delete, skip doing silly work. - r_acc - } else { - let f_val = acd.acp.targetscope.clone(); - match f_val.resolve(&de.event, None) { - Ok(f_res) => { - if e.entry_match_no_index(&f_res) { - lsecurity_access!( + // For each entry + let r = entries.iter().fold(true, |acc, e| { + if !acc { + // Any false, denies the whole operation. + false + } else { + related_acp.iter().fold(false, |r_acc, acd| { + if r_acc { + // If something allowed us to delete, skip doing silly work. + r_acc + } else { + let f_val = acd.acp.targetscope.clone(); + match f_val.resolve(&de.event, None) { + Ok(f_res) => { + if e.entry_match_no_index(&f_res) { + lsecurity_access!( + audit, + "entry {:?} matches acs {:?}", + e.get_uuid(), + acd + ); + // It matches, so we can delete this! + lsecurity_access!(audit, "passed"); + true + } else { + lsecurity_access!( + audit, + "entry {:?} DOES NOT match acs {:?}", + e.get_uuid(), + acd + ); + // Does not match, fail. + false + } + } + Err(e) => { + ladmin_error!( audit, - "entry {:?} matches acs {:?}", - e.get_uuid(), - acd + "A internal filter was passed for resolution!?!? {:?}", + e ); - // It matches, so we can delete this! - lsecurity_access!(audit, "passed"); - true - } else { - lsecurity_access!( - audit, - "entry {:?} DOES NOT match acs {:?}", - e.get_uuid(), - acd - ); - // Does not match, fail. + // Default to failing here. false } - } - Err(e) => { - ladmin_error!( - audit, - "A internal filter was passed for resolution!?!? {:?}", - e - ); - // Default to failing here. - false - } - } // match - } // else - }) // fold related_acp - } // if/else - }); - if r { - lsecurity_access!(audit, "allowed ✅"); - } else { - lsecurity_access!(audit, "denied ❌"); - } - Ok(r) + } // match + } // else + }) // fold related_acp + } // if/else + }); + if r { + lsecurity_access!(audit, "allowed ✅"); + } else { + lsecurity_access!(audit, "denied ❌"); + } + Ok(r) + }) } } diff --git a/kanidmd/src/lib/actors/v1_read.rs b/kanidmd/src/lib/actors/v1_read.rs index 971dd1fcb..bc1e88b02 100644 --- a/kanidmd/src/lib/actors/v1_read.rs +++ b/kanidmd/src/lib/actors/v1_read.rs @@ -12,6 +12,7 @@ use kanidm_proto::v1::{OperationError, RadiusAuthToken}; use crate::filter::{Filter, FilterInvalid}; use crate::idm::server::IdmServer; +use crate::ldap::{LdapBoundToken, LdapResponseState, LdapServer}; use crate::server::{QueryServer, QueryServerTransaction}; use kanidm_proto::v1::Entry as ProtoEntry; @@ -24,6 +25,9 @@ use actix::prelude::*; use std::time::SystemTime; use uuid::Uuid; +use ldap3_server::simple::*; +use std::convert::TryFrom; + // These are used when the request (IE Get) has no intrising request // type. Additionally, they are used in some requests where we need // to supplement extra server state (IE userauthtokens) to a request. @@ -183,6 +187,7 @@ pub struct QueryServerReadV1 { log: Sender>, qs: QueryServer, idms: Arc, + ldap: Arc, } impl Actor for QueryServerReadV1 { @@ -194,19 +199,35 @@ impl Actor for QueryServerReadV1 { } impl QueryServerReadV1 { - pub fn new(log: Sender>, qs: QueryServer, idms: Arc) -> Self { + pub fn new( + log: Sender>, + qs: QueryServer, + idms: Arc, + ldap: Arc, + ) -> Self { info!("Starting query server v1 worker ..."); - QueryServerReadV1 { log, qs, idms } + QueryServerReadV1 { + log, + qs, + idms, + ldap, + } } pub fn start( log: Sender>, query_server: QueryServer, idms: Arc, + ldap: Arc, threads: usize, ) -> actix::Addr { SyncArbiter::start(threads, move || { - QueryServerReadV1::new(log.clone(), query_server.clone(), idms.clone()) + QueryServerReadV1::new( + log.clone(), + query_server.clone(), + idms.clone(), + ldap.clone(), + ) }) } } @@ -857,3 +878,58 @@ impl Handler for QueryServerReadV1 { res } } + +#[derive(Message)] +#[rtype(result = "Option")] +pub struct LdapRequestMessage { + pub eventid: Uuid, + pub protomsg: LdapMsg, + pub uat: Option, +} + +impl Handler for QueryServerReadV1 { + type Result = Option; + + fn handle(&mut self, msg: LdapRequestMessage, _: &mut Self::Context) -> Self::Result { + let LdapRequestMessage { + eventid, + protomsg, + uat, + } = msg; + let mut audit = AuditScope::new("ldap_request_message", eventid.clone()); + let res = lperf_segment!( + &mut audit, + "actors::v1_read::handle", + || { + let server_op = match ServerOps::try_from(protomsg) { + Ok(v) => v, + Err(_) => { + return LdapResponseState::Disconnect(DisconnectionNotice::gen( + LdapResultCode::ProtocolError, + format!("Invalid Request {:?}", &eventid).as_str(), + )); + } + }; + + self.ldap + .do_op(&mut audit, &self.idms, server_op, uat, &eventid) + .unwrap_or_else(|e| { + ladmin_error!(&mut audit, "do_op failed -> {:?}", e); + LdapResponseState::Disconnect(DisconnectionNotice::gen( + LdapResultCode::Other, + format!("Internal Server Error {:?}", &eventid).as_str(), + )) + }) + } + ); + if self.log.send(Some(audit)).is_err() { + error!("Unable to commit log -> {:?}", &eventid); + Some(LdapResponseState::Disconnect(DisconnectionNotice::gen( + LdapResultCode::Other, + format!("Internal Server Error {:?}", &eventid).as_str(), + ))) + } else { + Some(res) + } + } +} diff --git a/kanidmd/src/lib/audit.rs b/kanidmd/src/lib/audit.rs index b87f21628..fdb54164e 100644 --- a/kanidmd/src/lib/audit.rs +++ b/kanidmd/src/lib/audit.rs @@ -87,15 +87,19 @@ macro_rules! ltrace { macro_rules! lfilter { ($au:expr, $($arg:tt)*) => ({ - lqueue!($au, LogTag::Filter, $($arg)*) + if log_enabled!(log::Level::Info) { + lqueue!($au, LogTag::Filter, $($arg)*) + } }) } +/* macro_rules! lfilter_warning { ($au:expr, $($arg:tt)*) => ({ lqueue!($au, LogTag::FilterWarning, $($arg)*) }) } +*/ macro_rules! lfilter_error { ($au:expr, $($arg:tt)*) => ({ @@ -111,13 +115,17 @@ macro_rules! ladmin_error { macro_rules! ladmin_warning { ($au:expr, $($arg:tt)*) => ({ + if log_enabled!(log::Level::Warn) { lqueue!($au, LogTag::AdminWarning, $($arg)*) + } }) } macro_rules! ladmin_info { ($au:expr, $($arg:tt)*) => ({ - lqueue!($au, LogTag::AdminInfo, $($arg)*) + if log_enabled!(log::Level::Info) { + lqueue!($au, LogTag::AdminInfo, $($arg)*) + } }) } @@ -141,28 +149,32 @@ macro_rules! lsecurity_access { macro_rules! lperf_segment { ($au:expr, $id:expr, $fun:expr) => {{ - use std::time::Instant; + if log_enabled!(log::Level::Debug) { + use std::time::Instant; - // start timer. - let start = Instant::now(); + // start timer. + let start = Instant::now(); - // Create a new perf event - this sets - // us as the current active, and the parent - // correctly. - let pe = unsafe { $au.new_perfevent($id) }; + // Create a new perf event - this sets + // us as the current active, and the parent + // correctly. + let pe = unsafe { $au.new_perfevent($id) }; - // fun run time - let r = $fun(); - // end timer, and diff - let end = Instant::now(); - let diff = end.duration_since(start); + // fun run time + let r = $fun(); + // end timer, and diff + let end = Instant::now(); + let diff = end.duration_since(start); - // Now we are done, we put our parent back as - // the active. - unsafe { $au.end_perfevent(pe, diff) }; + // Now we are done, we put our parent back as + // the active. + unsafe { $au.end_perfevent(pe, diff) }; - // Return the result. Hope this works! - r + // Return the result. Hope this works! + r + } else { + $fun() + } }}; } @@ -295,7 +307,7 @@ impl PartialEq for PerfProcessed { impl PerfProcessed { fn int_write_fmt(&self, parents: usize, uuid: &HyphenatedRef) { let mut prefix = String::new(); - prefix.push_str(format!("[- {} perf::trace] ", uuid).as_str()); + prefix.push_str("[- perf::trace] "); let d = &self.duration; let df = d.as_secs() as f64 + d.subsec_nanos() as f64 * 1e-9; if parents > 0 { @@ -355,19 +367,19 @@ impl AuditScope { pub fn write_log(self) { let uuid_ref = self.uuid.to_hyphenated_ref(); + error!("[- event::start] {}", uuid_ref); self.events.iter().for_each(|e| match e.tag { LogTag::AdminError | LogTag::RequestError | LogTag::FilterError => { - error!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data) + error!("[{} {}] {}", e.time, e.tag, e.data) } LogTag::AdminWarning | LogTag::Security | LogTag::SecurityAccess - | LogTag::FilterWarning => warn!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data), - LogTag::AdminInfo | LogTag::Filter => { - info!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data) - } - LogTag::Trace => debug!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data), + | LogTag::FilterWarning => warn!("[{} {}] {}", e.time, e.tag, e.data), + LogTag::AdminInfo | LogTag::Filter => info!("[{} {}] {}", e.time, e.tag, e.data), + LogTag::Trace => debug!("[{} {}] {}", e.time, e.tag, e.data), }); + error!("[- event::end] {}", uuid_ref); // First, we pre-process all the perf events to order them let mut proc_perf: Vec<_> = self.perf.iter().map(|pe| pe.process()).collect(); @@ -377,7 +389,8 @@ impl AuditScope { // Now write the perf events proc_perf .iter() - .for_each(|pe| pe.int_write_fmt(0, &uuid_ref)) + .for_each(|pe| pe.int_write_fmt(0, &uuid_ref)); + error!("[- perf::end] {}", uuid_ref); } pub fn log_event(&mut self, tag: LogTag, data: String) { diff --git a/kanidmd/src/lib/be/idl_arc_sqlite.rs b/kanidmd/src/lib/be/idl_arc_sqlite.rs index 3d9e6c599..170cf1923 100644 --- a/kanidmd/src/lib/be/idl_arc_sqlite.rs +++ b/kanidmd/src/lib/be/idl_arc_sqlite.rs @@ -14,8 +14,8 @@ use uuid::Uuid; // use std::borrow::Borrow; -const DEFAULT_CACHE_TARGET: usize = 1024; -const DEFAULT_IDL_CACHE_RATIO: usize = 16; +const DEFAULT_CACHE_TARGET: usize = 10240; +const DEFAULT_IDL_CACHE_RATIO: usize = 32; const DEFAULT_CACHE_RMISS: usize = 8; const DEFAULT_CACHE_WMISS: usize = 8; diff --git a/kanidmd/src/lib/be/mod.rs b/kanidmd/src/lib/be/mod.rs index c2697d688..6411ff4ba 100644 --- a/kanidmd/src/lib/be/mod.rs +++ b/kanidmd/src/lib/be/mod.rs @@ -203,7 +203,7 @@ pub trait BackendTransaction { // and terms. let (f_andnot, f_rem): (Vec<_>, Vec<_>) = l.iter().partition(|f| f.is_andnot()); - // We make this an iter, so everything comes off in order. Using pop means we + // We make this an iter, so everything comes off in order. if we used pop it means we // pull from the tail, which is the WORST item to start with! let mut f_rem_iter = f_rem.iter(); @@ -216,6 +216,11 @@ pub trait BackendTransaction { } }; + // Setup the counter of terms we have left to evaluate. + // This is used so that we shortcut return ONLY when we really do have + // more terms remaining. + let mut f_rem_count = f_rem.len() + f_andnot.len() - 1; + // Setup the query plan tracker let mut plan = Vec::new(); plan.push(fp); @@ -225,20 +230,12 @@ pub trait BackendTransaction { // When below thres, we have to return partials to trigger the entry_no_match_filter check. // But we only do this when there are actually multiple elements in the and, // because an and with 1 element now is FULLY resolved. - if idl.len() < thres && f_rem.len() > 0 { - lfilter_warning!( - au, - "NOTICE: Cand set shorter than threshold, early return" - ); + if idl.len() < thres && f_rem_count > 0 { let setplan = FilterPlan::AndPartialThreshold(plan); return Ok((IDL::PartialThreshold(idl.clone()), setplan)); } else if idl.len() == 0 { // Regardless of the input state, if it's empty, this can never // be satisfied, so return we are indexed and complete. - lfilter_warning!( - au, - "NOTICE: empty candidate set, shortcutting return." - ); let setplan = FilterPlan::AndEmptyCand(plan); return Ok((IDL::Indexed(IDLBitRange::new()), setplan)); } @@ -248,26 +245,19 @@ pub trait BackendTransaction { // Now, for all remaining, for f in f_rem_iter { + f_rem_count -= 1; let (inter, fp) = self.filter2idl(au, f, thres)?; plan.push(fp); cand_idl = match (cand_idl, inter) { (IDL::Indexed(ia), IDL::Indexed(ib)) => { let r = ia & ib; - if r.len() < thres { + if r.len() < thres && f_rem_count > 0 { // When below thres, we have to return partials to trigger the entry_no_match_filter check. - lfilter_warning!( - au, - "NOTICE: Cand set shorter than threshold, early return" - ); let setplan = FilterPlan::AndPartialThreshold(plan); return Ok((IDL::PartialThreshold(r), setplan)); } else if r.len() == 0 { // Regardless of the input state, if it's empty, this can never // be satisfied, so return we are indexed and complete. - lfilter_warning!( - au, - "NOTICE: empty candidate set, shortcutting return." - ); let setplan = FilterPlan::AndEmptyCand(plan); return Ok((IDL::Indexed(IDLBitRange::new()), setplan)); } else { @@ -278,12 +268,8 @@ pub trait BackendTransaction { | (IDL::Partial(ia), IDL::Indexed(ib)) | (IDL::Partial(ia), IDL::Partial(ib)) => { let r = ia & ib; - if r.len() < thres { + if r.len() < thres && f_rem_count > 0 { // When below thres, we have to return partials to trigger the entry_no_match_filter check. - lfilter_warning!( - au, - "NOTICE: Cand set shorter than threshold, early return" - ); let setplan = FilterPlan::AndPartialThreshold(plan); return Ok((IDL::PartialThreshold(r), setplan)); } else { @@ -296,12 +282,8 @@ pub trait BackendTransaction { | (IDL::PartialThreshold(ia), IDL::Partial(ib)) | (IDL::Partial(ia), IDL::PartialThreshold(ib)) => { let r = ia & ib; - if r.len() < thres { + if r.len() < thres && f_rem_count > 0 { // When below thres, we have to return partials to trigger the entry_no_match_filter check. - lfilter_warning!( - au, - "NOTICE: Cand set shorter than threshold, early return" - ); let setplan = FilterPlan::AndPartialThreshold(plan); return Ok((IDL::PartialThreshold(r), setplan)); } else { @@ -321,6 +303,7 @@ pub trait BackendTransaction { // debug!("partial cand set ==> {:?}", cand_idl); for f in f_andnot.iter() { + f_rem_count -= 1; let f_in = match f { FilterResolved::AndNot(f_in) => f_in, _ => { @@ -341,7 +324,6 @@ pub trait BackendTransaction { // Don't trigger threshold on and nots if fully indexed. if r.len() < thres { // When below thres, we have to return partials to trigger the entry_no_match_filter check. - lfilter_warning!(au, "NOTICE: Cand set shorter than threshold, early return"); return Ok(IDL::PartialThreshold(r)); } else { IDL::Indexed(r) @@ -355,11 +337,7 @@ pub trait BackendTransaction { let r = ia.andnot(ib); // DO trigger threshold on partials, because we have to apply the filter // test anyway, so we may as well shortcut at this point. - if r.len() < thres { - lfilter_warning!( - au, - "NOTICE: Cand set shorter than threshold, early return" - ); + if r.len() < thres && f_rem_count > 0 { let setplan = FilterPlan::AndPartialThreshold(plan); return Ok((IDL::PartialThreshold(r), setplan)); } else { @@ -374,11 +352,7 @@ pub trait BackendTransaction { let r = ia.andnot(ib); // DO trigger threshold on partials, because we have to apply the filter // test anyway, so we may as well shortcut at this point. - if r.len() < thres { - lfilter_warning!( - au, - "NOTICE: Cand set shorter than threshold, early return" - ); + if r.len() < thres && f_rem_count > 0 { let setplan = FilterPlan::AndPartialThreshold(plan); return Ok((IDL::PartialThreshold(r), setplan)); } else { @@ -436,13 +410,12 @@ pub trait BackendTransaction { au: &mut AuditScope, filt: &Filter, ) -> Result>, OperationError> { - // // Unlike DS, even if we don't get the index back, we can just pass // to the in-memory filter test and be done. lperf_segment!(au, "be::search", || { // Do a final optimise of the filter lfilter!(au, "filter unoptimised form --> {:?}", filt); - let filt = filt.optimise(); + let filt = lperf_segment!(au, "be::search", || { filt.optimise() }); lfilter!(au, "filter optimised to --> {:?}", filt); // Using the indexes, resolve the IDL here, or ALLIDS. @@ -466,21 +439,20 @@ pub trait BackendTransaction { "filter (search) was partially or fully unindexed. {:?}", filt ); - entries - .into_iter() - .filter(|e| e.entry_match_no_index(&filt)) - .collect() + lperf_segment!(au, "be::search", || { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(&filt)) + .collect() + }) } IDL::PartialThreshold(_) => { - lfilter_warning!( - au, - "filter (search) was partial unindexed due to test threshold {:?}", - filt - ); - entries - .into_iter() - .filter(|e| e.entry_match_no_index(&filt)) - .collect() + lperf_segment!(au, "be::search", || { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(&filt)) + .collect() + }) } // Since the index fully resolved, we can shortcut the filter test step here! IDL::Indexed(_) => { @@ -537,16 +509,8 @@ pub trait BackendTransaction { // Now, check the idl -- if it's fully resolved, we can skip this because the query // was fully indexed. match &idl { - IDL::Indexed(idl) => { - lfilter!(au, "filter (exists) was fully indexed 👏"); - Ok(idl.len() > 0) - } + IDL::Indexed(idl) => Ok(idl.len() > 0), IDL::PartialThreshold(_) => { - lfilter_warning!( - au, - "filter (exists) was partial unindexed due to test threshold {:?}", - filt - ); let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl)); // if not 100% resolved query, apply the filter test. @@ -894,10 +858,14 @@ impl<'a> BackendWriteTransaction<'a> { audit: &mut AuditScope, v: i64, ) -> Result<(), OperationError> { - if self.get_db_index_version() < v { + let dbv = self.get_db_index_version(); + ladmin_info!(audit, "upgrade_reindex -> dbv: {} v: {}", dbv, v); + if dbv < v { self.reindex(audit)?; + self.set_db_index_version(v) + } else { + Ok(()) } - self.set_db_index_version(v) } pub fn reindex(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { @@ -1125,16 +1093,17 @@ impl Backend { } } - pub fn write(&self, idxmeta: BTreeSet<(String, IndexType)>) -> BackendWriteTransaction { + pub fn write(&self, idxmeta: &BTreeSet<(String, IndexType)>) -> BackendWriteTransaction { BackendWriteTransaction { idlayer: self.idlayer.write(), - idxmeta, + // TODO: Performance improvement here by NOT cloning the idxmeta. + idxmeta: (*idxmeta).clone(), } } // Should this actually call the idlayer directly? pub fn reset_db_s_uuid(&self, audit: &mut AuditScope) -> Uuid { - let wr = self.write(BTreeSet::new()); + let wr = self.write(&BTreeSet::new()); let sid = wr.reset_db_s_uuid().unwrap(); wr.commit(audit).unwrap(); sid @@ -1186,7 +1155,7 @@ mod tests { idxmeta.insert(("uuid".to_string(), IndexType::PRESENCE)); idxmeta.insert(("ta".to_string(), IndexType::EQUALITY)); idxmeta.insert(("tb".to_string(), IndexType::EQUALITY)); - let mut be_txn = be.write(idxmeta); + let mut be_txn = be.write(&idxmeta); // Could wrap another future here for the future::ok bit... let r = $test_fn(&mut audit, &mut be_txn); diff --git a/kanidmd/src/lib/config.rs b/kanidmd/src/lib/config.rs index 8540a3c69..3e3af76a1 100644 --- a/kanidmd/src/lib/config.rs +++ b/kanidmd/src/lib/config.rs @@ -17,6 +17,7 @@ pub struct TlsConfiguration { #[derive(Serialize, Deserialize, Debug, Default)] pub struct Configuration { pub address: String, + pub ldapaddress: Option, pub threads: usize, // db type later pub db_path: String, @@ -30,6 +31,10 @@ pub struct Configuration { impl fmt::Display for Configuration { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "address: {}, ", self.address) + .and_then(|_| match &self.ldapaddress { + Some(la) => write!(f, "ldap address: {}, ", la), + None => write!(f, "ldap address: disabled, "), + }) .and_then(|_| write!(f, "thread count: {}, ", self.threads)) .and_then(|_| write!(f, "dbpath: {}, ", self.db_path)) .and_then(|_| write!(f, "max request size: {}b, ", self.maximum_request)) @@ -49,6 +54,7 @@ impl Configuration { pub fn new() -> Self { let mut c = Configuration { address: String::from("127.0.0.1:8080"), + ldapaddress: None, threads: num_cpus::get(), db_path: String::from(""), maximum_request: 262_144, // 256k @@ -82,6 +88,10 @@ impl Configuration { .unwrap_or_else(|| String::from("127.0.0.1:8080")); } + pub fn update_ldapbind(&mut self, l: &Option) { + self.ldapaddress = l.clone(); + } + pub fn update_tls( &mut self, ca: &Option, diff --git a/kanidmd/src/lib/constants/uuids.rs b/kanidmd/src/lib/constants/uuids.rs index da4611f03..9438121f5 100644 --- a/kanidmd/src/lib/constants/uuids.rs +++ b/kanidmd/src/lib/constants/uuids.rs @@ -105,7 +105,7 @@ pub const UUID_SCHEMA_ATTR_PASSWORD_IMPORT: &str = "00000000-0000-0000-0000-ffff // System and domain infos // I'd like to strongly criticise william of the past for fucking up these allocations. pub const STR_UUID_SYSTEM_INFO: &str = "00000000-0000-0000-0000-ffffff000001"; -pub const UUID_DOMAIN_INFO: &str = "00000000-0000-0000-0000-ffffff000025"; +pub const STR_UUID_DOMAIN_INFO: &str = "00000000-0000-0000-0000-ffffff000025"; // DO NOT allocate here, allocate below. // Access controls @@ -152,4 +152,5 @@ lazy_static! { pub static ref UUID_ANONYMOUS: Uuid = Uuid::parse_str(STR_UUID_ANONYMOUS).unwrap(); pub static ref UUID_SYSTEM_CONFIG: Uuid = Uuid::parse_str(STR_UUID_SYSTEM_CONFIG).unwrap(); pub static ref UUID_SYSTEM_INFO: Uuid = Uuid::parse_str(STR_UUID_SYSTEM_INFO).unwrap(); + pub static ref UUID_DOMAIN_INFO: Uuid = Uuid::parse_str(STR_UUID_DOMAIN_INFO).unwrap(); } diff --git a/kanidmd/src/lib/core/ldaps.rs b/kanidmd/src/lib/core/ldaps.rs new file mode 100644 index 000000000..5ea13d862 --- /dev/null +++ b/kanidmd/src/lib/core/ldaps.rs @@ -0,0 +1,236 @@ +use crate::actors::v1_read::{LdapRequestMessage, QueryServerReadV1}; +use crate::ldap::{LdapBoundToken, LdapResponseState}; +use openssl::ssl::{SslAcceptor, SslAcceptorBuilder}; + +use actix::prelude::*; +use futures_util::stream::StreamExt; +use ldap3_server::simple::*; +use ldap3_server::LdapCodec; +// use std::convert::TryFrom; +use std::io; +use std::marker::Unpin; +use std::net; +use std::str::FromStr; +use tokio::io::{AsyncWrite, WriteHalf}; +use tokio::net::{TcpListener, TcpStream}; +use tokio_util::codec::FramedRead; +use uuid::Uuid; + +struct LdapReq(pub LdapMsg); + +impl Message for LdapReq { + type Result = Result<(), ()>; +} + +pub struct LdapServer { + qe_r: Addr, +} + +pub struct LdapSession +where + T: AsyncWrite + Unpin, +{ + qe_r: Addr, + framed: actix::io::FramedWrite, LdapCodec>, + uat: Option, +} + +impl Actor for LdapSession +where + T: 'static + AsyncWrite + Unpin, +{ + type Context = actix::Context; +} + +impl actix::io::WriteHandler for LdapSession where T: 'static + AsyncWrite + Unpin {} + +impl Handler for LdapSession +where + T: 'static + AsyncWrite + Unpin, +{ + type Result = ResponseActFuture>; + + fn handle(&mut self, msg: LdapReq, _ctx: &mut Self::Context) -> Self::Result { + let protomsg = msg.0; + // Transform the LdapMsg to something the query server can work with. + + // Because of the way these futures works, it's up to the qe_r to manage + // a lot of this, so we just palm off the processing to the thead pool. + let eventid = Uuid::new_v4(); + let uat = self.uat.clone(); + let qsf = self.qe_r.send(LdapRequestMessage { + eventid, + protomsg, + uat, + }); + let qsf = actix::fut::wrap_future::<_, Self>(qsf); + + let f = qsf.map(|result, actor, ctx| { + match result { + Ok(Some(LdapResponseState::Unbind)) => ctx.stop(), + Ok(Some(LdapResponseState::Disconnect(r))) => { + actor.framed.write(r); + ctx.stop() + } + Ok(Some(LdapResponseState::Bind(uat, r))) => { + actor.uat = Some(uat); + actor.framed.write(r); + } + Ok(Some(LdapResponseState::Respond(r))) => { + actor.framed.write(r); + } + Ok(Some(LdapResponseState::MultiPartResponse(v))) => { + v.into_iter().for_each(|r| actor.framed.write(r)); + } + Ok(Some(LdapResponseState::BindMultiPartResponse(uat, v))) => { + actor.uat = Some(uat); + v.into_iter().for_each(|r| actor.framed.write(r)); + } + Ok(None) | Err(_) => { + error!("Internal server error"); + ctx.stop(); + } + }; + Ok(()) + }); + + Box::new(f) + } +} + +impl StreamHandler> for LdapSession +where + T: 'static + AsyncWrite + Unpin, +{ + fn handle(&mut self, msg: Result, ctx: &mut Self::Context) { + match msg { + Ok(lm) => match ctx.address().try_send(LdapReq(lm)) { + // It's queued, we are done. + Ok(_) => {} + Err(_) => { + error!("Too many queue msgs for connection"); + ctx.stop() + } + }, + Err(_) => { + error!("Io error"); + ctx.stop() + } + } + } +} + +impl LdapSession +where + T: 'static + AsyncWrite + Unpin, +{ + pub fn new( + framed: actix::io::FramedWrite, LdapCodec>, + qe_r: Addr, + ) -> Self { + LdapSession { + qe_r, + framed, + uat: None, + } + } +} + +impl Actor for LdapServer { + type Context = Context; +} + +#[derive(Message)] +#[rtype(result = "()")] +struct TcpConnect(pub TcpStream, pub net::SocketAddr); + +impl Handler for LdapServer { + type Result = (); + fn handle(&mut self, msg: TcpConnect, _: &mut Context) { + LdapSession::create(move |ctx| { + let (r, w) = tokio::io::split(msg.0); + LdapSession::add_stream(FramedRead::new(r, LdapCodec), ctx); + LdapSession::new( + actix::io::FramedWrite::new(w, LdapCodec, ctx), + self.qe_r.clone(), + ) + }); + } +} + +#[derive(Message)] +#[rtype(result = "Result<(), ()>")] +struct TlsConnect(pub &'static SslAcceptor, pub TcpStream, pub net::SocketAddr); + +impl Handler for LdapServer { + type Result = ResponseActFuture>; + fn handle(&mut self, msg: TlsConnect, _: &mut Context) -> Self::Result { + let qsf = tokio_openssl::accept(msg.0, msg.1); + let qsf = actix::fut::wrap_future::<_, Self>(qsf); + + let f = qsf.map(|result, actor, _ctx| { + result + .map(|tlsstream| { + LdapSession::create(move |ctx| { + let (r, w) = tokio::io::split(tlsstream); + LdapSession::add_stream(FramedRead::new(r, LdapCodec), ctx); + LdapSession::new( + actix::io::FramedWrite::new(w, LdapCodec, ctx), + actor.qe_r.clone(), + ) + }); + () + }) + .map_err(|_| { + error!("invalid tls handshake"); + () + }) + }); + + Box::new(f) + } +} + +pub(crate) async fn create_ldap_server( + address: &str, + opt_tls_params: Option, + qe_r: Addr, +) -> Result<(), ()> { + let addr = net::SocketAddr::from_str(address).map_err(|e| { + error!("Could not parse ldap server address {} -> {:?}", address, e); + () + })?; + + let listener = Box::new(TcpListener::bind(&addr).await.unwrap()); + + match opt_tls_params { + Some(tls_params) => { + info!("Starting LDAPS interface ldaps://{} ...", address); + LdapServer::create(move |ctx| { + let acceptor = Box::new(tls_params.build()); + let lacceptor = Box::leak(acceptor) as &'static _; + + ctx.add_message_stream(Box::leak(listener).incoming().map(move |st| { + let st = st.unwrap(); + let addr = st.peer_addr().unwrap(); + TlsConnect(lacceptor, st, addr) + })); + LdapServer { qe_r } + }); + } + None => { + info!("Starting LDAP interface ldap://{} ...", address); + LdapServer::create(move |ctx| { + ctx.add_message_stream(Box::leak(listener).incoming().map(|st| { + let st = st.unwrap(); + let addr = st.peer_addr().unwrap(); + TcpConnect(st, addr) + })); + LdapServer { qe_r } + }); + } + } + + info!("Created LDAP interface"); + Ok(()) +} diff --git a/kanidmd/src/lib/core/mod.rs b/kanidmd/src/lib/core/mod.rs index 34488001c..c57f1882c 100644 --- a/kanidmd/src/lib/core/mod.rs +++ b/kanidmd/src/lib/core/mod.rs @@ -1,4 +1,5 @@ mod ctx; +mod ldaps; // use actix_files as fs; use actix::prelude::*; use actix_session::{CookieSession, Session}; @@ -36,6 +37,7 @@ use crate::crypto::setup_tls; use crate::filter::{Filter, FilterInvalid}; use crate::idm::server::IdmServer; use crate::interval::IntervalActor; +use crate::ldap::LdapServer; use crate::schema::Schema; use crate::schema::SchemaTransaction; use crate::server::QueryServer; @@ -1319,9 +1321,12 @@ pub fn restore_server_core(config: Configuration, dst_path: &str) { }; // Limit the scope of the schema txn. - let idxmeta = { schema.write().get_idxmeta_set() }; - let mut be_wr_txn = be.write(idxmeta); + let mut be_wr_txn = { + let schema_txn = schema.write(); + let idxmeta = schema_txn.get_idxmeta_set(); + be.write(idxmeta) + }; let r = be_wr_txn .restore(&mut audit, dst_path) .and_then(|_| be_wr_txn.commit(&mut audit)); @@ -1384,11 +1389,13 @@ pub fn reindex_server_core(config: Configuration) { }; info!("Start Index Phase 1 ..."); - // Limit the scope of the schema txn. - let idxmeta = { schema.write().get_idxmeta_set() }; - // Reindex only the core schema attributes to bootstrap the process. - let mut be_wr_txn = be.write(idxmeta); + let mut be_wr_txn = { + // Limit the scope of the schema txn. + let schema_txn = schema.write(); + let idxmeta = schema_txn.get_idxmeta_set(); + be.write(idxmeta) + }; let r = be_wr_txn .reindex(&mut audit) .and_then(|_| be_wr_txn.commit(&mut audit)); @@ -1562,7 +1569,7 @@ pub fn recover_account_core(config: Configuration, name: String, password: Strin }; } -pub fn create_server_core(config: Configuration) -> Result { +pub async fn create_server_core(config: Configuration) -> Result { // Until this point, we probably want to write to the log macro fns. if config.integration_test_config.is_some() { @@ -1611,6 +1618,7 @@ pub fn create_server_core(config: Configuration) -> Result { return Err(()); } }; + // Any pre-start tasks here. match &config.integration_test_config { Some(itc) => { @@ -1641,23 +1649,57 @@ pub fn create_server_core(config: Configuration) -> Result { } None => {} } + + let ldap = match LdapServer::new(&mut audit, &idms) { + Ok(l) => l, + Err(e) => { + audit.write_log(); + error!("Unable to start LdapServer -> {:?}", e); + return Err(()); + } + }; + log_tx.send(Some(audit)).unwrap_or_else(|_| { error!("CRITICAL: UNABLE TO COMMIT LOGS"); }); - // Arc the idms. + // Arc the idms and ldap let idms_arc = Arc::new(idms); + let ldap_arc = Arc::new(ldap); // Pass it to the actor for threading. // Start the read query server with the given be path: future config - let server_read_addr = - QueryServerReadV1::start(log_tx.clone(), qs.clone(), idms_arc.clone(), config.threads); + let server_read_addr = QueryServerReadV1::start( + log_tx.clone(), + qs.clone(), + idms_arc.clone(), + ldap_arc.clone(), + config.threads, + ); // Start the write thread let server_write_addr = QueryServerWriteV1::start(log_tx.clone(), qs, idms_arc); // Setup timed events associated to the write thread let _int_addr = IntervalActor::new(server_write_addr.clone()).start(); + // If we have been requested to init LDAP, configure it now. + match &config.ldapaddress { + Some(la) => { + let opt_ldap_tls_params = match setup_tls(&config) { + Ok(t) => t, + Err(e) => { + error!("Failed to configure LDAP TLS parameters -> {:?}", e); + return Err(()); + } + }; + ldaps::create_ldap_server(la.as_str(), opt_ldap_tls_params, server_read_addr.clone()) + .await?; + } + None => { + debug!("LDAP not requested, skipping"); + } + } + // Copy the max size let secure_cookies = config.secure_cookies; // domain will come from the qs now! @@ -1695,7 +1737,8 @@ pub fn create_server_core(config: Configuration) -> Result { // .app_data(web::Json::::configure(|cfg| { cfg .app_data( web::JsonConfig::default() - .limit(4096) + // Currently 4MB + .limit(4194304) .error_handler(|err, _req| { let s = format!("{}", err); error::InternalError::from_response(err, HttpResponse::BadRequest().json(s)) @@ -1872,8 +1915,8 @@ pub fn create_server_core(config: Configuration) -> Result { server.bind(config.address) } }; - server.expect("Failed to initialise server!").run(); + info!("ready to rock! 🤘"); Ok(ServerCtx::new(System::current(), log_tx, log_thread)) diff --git a/kanidmd/src/lib/entry.rs b/kanidmd/src/lib/entry.rs index e58625e27..284da5f6a 100644 --- a/kanidmd/src/lib/entry.rs +++ b/kanidmd/src/lib/entry.rs @@ -44,6 +44,7 @@ use kanidm_proto::v1::{OperationError, SchemaError}; use crate::be::dbentry::{DbEntry, DbEntryV1, DbEntryVers}; +use ldap3_server::simple::{LdapPartialAttribute, LdapSearchResultEntry}; use std::collections::btree_map::Iter as BTreeIter; use std::collections::btree_set::Iter as BTreeSetIter; use std::collections::BTreeMap; @@ -1332,6 +1333,47 @@ impl Entry { .collect(); Ok(ProtoEntry { attrs: attrs? }) } + + pub fn to_ldap( + &self, + audit: &mut AuditScope, + qs: &mut QueryServerReadTransaction, + basedn: &str, + ) -> Result { + let (attr, rdn) = self + .get_ava_single("spn") + .map(|v| ("spn", v.to_proto_string_clone())) + .or_else(|| { + self.get_ava_single("name") + .map(|v| ("name", v.to_proto_string_clone())) + }) + .unwrap_or_else(|| ("uuid", self.get_uuid().to_hyphenated_ref().to_string())); + + let dn = format!("{}={},{}", attr, rdn, basedn); + + let attributes: Result, _> = self + .attrs + .iter() + .map(|(k, vs)| { + let pvs: Result, _> = + vs.iter().map(|v| qs.resolve_value(audit, v)).collect(); + let pvs = pvs?; + let pvs = if k == "memberof" || k == "member" { + pvs.into_iter() + .map(|s| format!("spn={},{}", s, basedn)) + .collect() + } else { + pvs + }; + Ok(LdapPartialAttribute { + atype: k.clone(), + vals: pvs, + }) + }) + .collect(); + let attributes = attributes?; + Ok(LdapSearchResultEntry { dn, attributes }) + } } // impl Entry { diff --git a/kanidmd/src/lib/event.rs b/kanidmd/src/lib/event.rs index e6e394970..8084089e4 100644 --- a/kanidmd/src/lib/event.rs +++ b/kanidmd/src/lib/event.rs @@ -105,18 +105,9 @@ impl Event { pub fn from_ro_request( audit: &mut AuditScope, qs: &mut QueryServerReadTransaction, - user_uuid: &str, + user_uuid: &Uuid, ) -> Result { - // Do we need to check or load the entry from the user_uuid? - // In the future, probably yes. - // - // For now, no. - let u = try_audit!( - audit, - Uuid::parse_str(user_uuid).map_err(|_| OperationError::InvalidUuid) - ); - - let e = try_audit!(audit, qs.internal_search_uuid(audit, &u)); + let e = try_audit!(audit, qs.internal_search_uuid(audit, &user_uuid)); Ok(Event { origin: EventOrigin::User(e), @@ -438,6 +429,27 @@ impl SearchEvent { } } + pub(crate) fn new_ext_impersonate_uuid( + audit: &mut AuditScope, + qs: &mut QueryServerReadTransaction, + euuid: &Uuid, + filter: Filter, + attrs: Option>, + ) -> Result { + Ok(SearchEvent { + event: Event::from_ro_request(audit, qs, euuid)?, + filter: filter + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?, + filter_orig: filter + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?, + attrs, + }) + } + #[cfg(test)] pub unsafe fn new_internal_invalid(filter: Filter) -> Self { SearchEvent { diff --git a/kanidmd/src/lib/filter.rs b/kanidmd/src/lib/filter.rs index d7404fffa..8d7d33938 100644 --- a/kanidmd/src/lib/filter.rs +++ b/kanidmd/src/lib/filter.rs @@ -17,6 +17,7 @@ use crate::server::{ use crate::value::{IndexType, PartialValue}; use kanidm_proto::v1::Filter as ProtoFilter; use kanidm_proto::v1::{OperationError, SchemaError}; +use ldap3_server::simple::LdapFilter; use std::cmp::{Ordering, PartialOrd}; use std::collections::BTreeSet; use std::iter; @@ -419,10 +420,12 @@ impl Filter { f: &ProtoFilter, qs: &mut QueryServerReadTransaction, ) -> Result { - Ok(Filter { - state: FilterInvalid { - inner: FilterComp::from_ro(audit, f, qs)?, - }, + lperf_segment!(audit, "filter::from_ro", || { + Ok(Filter { + state: FilterInvalid { + inner: FilterComp::from_ro(audit, f, qs)?, + }, + }) }) } @@ -431,10 +434,26 @@ impl Filter { f: &ProtoFilter, qs: &mut QueryServerWriteTransaction, ) -> Result { - Ok(Filter { - state: FilterInvalid { - inner: FilterComp::from_rw(audit, f, qs)?, - }, + lperf_segment!(audit, "filter::from_rw", || { + Ok(Filter { + state: FilterInvalid { + inner: FilterComp::from_rw(audit, f, qs)?, + }, + }) + }) + } + + pub fn from_ldap_ro( + audit: &mut AuditScope, + f: &LdapFilter, + qs: &mut QueryServerReadTransaction, + ) -> Result { + lperf_segment!(audit, "filter::from_ldap_ro", || { + Ok(Filter { + state: FilterInvalid { + inner: FilterComp::from_ldap_ro(audit, f, qs)?, + }, + }) }) } } @@ -654,6 +673,30 @@ impl FilterComp { ProtoFilter::SelfUUID => FilterComp::SelfUUID, }) } + + fn from_ldap_ro( + audit: &mut AuditScope, + f: &LdapFilter, + qs: &mut QueryServerReadTransaction, + ) -> Result { + Ok(match f { + LdapFilter::And(l) => FilterComp::And( + l.iter() + .map(|f| Self::from_ldap_ro(audit, f, qs)) + .collect::, _>>()?, + ), + LdapFilter::Or(l) => FilterComp::Or( + l.iter() + .map(|f| Self::from_ldap_ro(audit, f, qs)) + .collect::, _>>()?, + ), + LdapFilter::Not(l) => FilterComp::AndNot(Box::new(Self::from_ldap_ro(audit, l, qs)?)), + LdapFilter::Equality(a, v) => { + FilterComp::Eq(a.clone(), qs.clone_partialvalue(audit, a, v)?) + } + LdapFilter::Present(a) => FilterComp::Pres(a.clone()), + }) + } } /* We only configure partial eq if cfg test on the invalid/valid types */ diff --git a/kanidmd/src/lib/idm/account.rs b/kanidmd/src/lib/idm/account.rs index 6f3cd683d..ecbc3a4d3 100644 --- a/kanidmd/src/lib/idm/account.rs +++ b/kanidmd/src/lib/idm/account.rs @@ -130,6 +130,7 @@ impl Account { Some(UserAuthToken { name: self.name.clone(), + spn: self.spn.clone(), displayname: self.name.clone(), uuid: self.uuid.to_hyphenated_ref().to_string(), application: None, diff --git a/kanidmd/src/lib/idm/event.rs b/kanidmd/src/lib/idm/event.rs index 7e35b89e4..c746661bb 100644 --- a/kanidmd/src/lib/idm/event.rs +++ b/kanidmd/src/lib/idm/event.rs @@ -332,3 +332,39 @@ impl VerifyTOTPEvent { } } } + +#[derive(Debug)] +pub struct LdapAuthEvent { + // pub event: Event, + pub target: Uuid, + pub cleartext: String, +} + +impl LdapAuthEvent { + /* + #[cfg(test)] + pub fn new_internal(target: &Uuid, cleartext: &str) -> Self { + LdapAuthEvent { + // event: Event::from_internal(), + target: *target, + cleartext: cleartext.to_string(), + } + } + */ + + pub fn from_parts( + _audit: &mut AuditScope, + // qs: &mut QueryServerReadTransaction, + // uat: Option, + target: Uuid, + cleartext: String, + ) -> Result { + // let e = Event::from_ro_uat(audit, qs, uat)?; + + Ok(LdapAuthEvent { + // event: e, + target, + cleartext, + }) + } +} diff --git a/kanidmd/src/lib/idm/server.rs b/kanidmd/src/lib/idm/server.rs index 9a851d8bc..445b98b1b 100644 --- a/kanidmd/src/lib/idm/server.rs +++ b/kanidmd/src/lib/idm/server.rs @@ -1,17 +1,18 @@ use crate::audit::AuditScope; -use crate::constants::UUID_SYSTEM_CONFIG; use crate::constants::{AUTH_SESSION_TIMEOUT, MFAREG_SESSION_TIMEOUT, PW_MIN_LENGTH}; +use crate::constants::{UUID_ANONYMOUS, UUID_SYSTEM_CONFIG}; use crate::event::{AuthEvent, AuthEventStep, AuthResult}; use crate::idm::account::Account; use crate::idm::authsession::AuthSession; use crate::idm::event::{ - GeneratePasswordEvent, GenerateTOTPEvent, PasswordChangeEvent, RadiusAuthTokenEvent, - RegenerateRadiusSecretEvent, UnixGroupTokenEvent, UnixPasswordChangeEvent, UnixUserAuthEvent, - UnixUserTokenEvent, VerifyTOTPEvent, + GeneratePasswordEvent, GenerateTOTPEvent, LdapAuthEvent, PasswordChangeEvent, + RadiusAuthTokenEvent, RegenerateRadiusSecretEvent, UnixGroupTokenEvent, + UnixPasswordChangeEvent, UnixUserAuthEvent, UnixUserTokenEvent, VerifyTOTPEvent, }; use crate::idm::mfareg::{MfaRegCred, MfaRegNext, MfaRegSession, MfaReqInit, MfaReqStep}; use crate::idm::radius::RadiusAccount; use crate::idm::unix::{UnixGroup, UnixUserAccount}; +use crate::ldap::LdapBoundToken; use crate::server::QueryServerReadTransaction; use crate::server::{QueryServer, QueryServerTransaction, QueryServerWriteTransaction}; use crate::utils::{password_from_random, readable_password_from_random, uuid_from_duration, SID}; @@ -246,6 +247,43 @@ impl<'a> IdmServerWriteTransaction<'a> { account.verify_unix_credential(au, uae.cleartext.as_str()) } + pub fn auth_ldap( + &mut self, + au: &mut AuditScope, + lae: LdapAuthEvent, + _ct: Duration, + ) -> Result, OperationError> { + // TODO #59: Implement soft lock checking for unix creds here! + + let account_entry = try_audit!(au, self.qs_read.internal_search_uuid(au, &lae.target)); + /* !!! This would probably be better if we DIDN'T use the Unix/Account types ... ? */ + + // if anonymous + if lae.target == *UUID_ANONYMOUS { + // TODO: #59 We should have checked if anonymous was locked by now! + let account = Account::try_from_entry_ro(au, account_entry, &mut self.qs_read)?; + Ok(Some(LdapBoundToken { + spn: account.spn.clone(), + uuid: UUID_ANONYMOUS.clone(), + effective_uuid: UUID_ANONYMOUS.clone(), + })) + } else { + let account = UnixUserAccount::try_from_entry_ro(au, account_entry, &mut self.qs_read)?; + if account + .verify_unix_credential(au, lae.cleartext.as_str())? + .is_some() + { + Ok(Some(LdapBoundToken { + spn: account.spn.clone(), + uuid: account.uuid.clone(), + effective_uuid: UUID_ANONYMOUS.clone(), + })) + } else { + Ok(None) + } + } + } + pub fn commit(self, au: &mut AuditScope) -> Result<(), OperationError> { lperf_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || { self.sessions.commit(); diff --git a/kanidmd/src/lib/ldap.rs b/kanidmd/src/lib/ldap.rs new file mode 100644 index 000000000..f1c3bf846 --- /dev/null +++ b/kanidmd/src/lib/ldap.rs @@ -0,0 +1,394 @@ +use crate::audit::AuditScope; +use crate::constants::{STR_UUID_DOMAIN_INFO, UUID_ANONYMOUS, UUID_DOMAIN_INFO}; +use crate::event::SearchEvent; +use crate::filter::Filter; +use crate::idm::event::LdapAuthEvent; +use crate::idm::server::IdmServer; +use crate::server::QueryServerTransaction; +use kanidm_proto::v1::OperationError; +use ldap3_server::simple::*; +use std::collections::BTreeSet; +use std::iter; +use std::time::SystemTime; +use uuid::Uuid; + +use regex::Regex; + +pub enum LdapResponseState { + Unbind, + Disconnect(LdapMsg), + Bind(LdapBoundToken, LdapMsg), + Respond(LdapMsg), + MultiPartResponse(Vec), + BindMultiPartResponse(LdapBoundToken, Vec), +} + +#[derive(Debug, Clone)] +pub struct LdapBoundToken { + pub spn: String, + pub uuid: Uuid, + // For now, always anonymous + pub effective_uuid: Uuid, +} + +pub struct LdapServer { + rootdse: LdapSearchResultEntry, + basedn: String, + dnre: Regex, +} + +impl LdapServer { + pub fn new(au: &mut AuditScope, idms: &IdmServer) -> Result { + let mut idms_prox_read = idms.proxy_read(); + // This is the rootdse path. + // get the domain_info item + let domain_entry = idms_prox_read + .qs_read + .internal_search_uuid(au, &UUID_DOMAIN_INFO)?; + + let domain_name = domain_entry + .get_ava_single_string("domain_name") + .ok_or(OperationError::InvalidEntryState)?; + + let basedn = ldap_domain_to_dc(domain_name.as_str()); + + let dnre = Regex::new(format!("^((?P[^=]+)=(?P[^=]+),)?{}$", basedn).as_str()) + .map_err(|_| OperationError::InvalidEntryState)?; + + let rootdse = LdapSearchResultEntry { + dn: "".to_string(), + attributes: vec![ + LdapPartialAttribute { + atype: "objectClass".to_string(), + vals: vec!["top".to_string()], + }, + LdapPartialAttribute { + atype: "vendorName".to_string(), + vals: vec!["Kanidm Project".to_string()], + }, + LdapPartialAttribute { + atype: "vendorVersion".to_string(), + vals: vec!["kanidm_ldap_1.0.0".to_string()], + }, + LdapPartialAttribute { + atype: "supportedLDAPVersion".to_string(), + vals: vec!["3".to_string()], + }, + LdapPartialAttribute { + atype: "supportedExtension".to_string(), + vals: vec!["1.3.6.1.4.1.4203.1.11.3".to_string()], + }, + LdapPartialAttribute { + atype: "defaultnamingcontext".to_string(), + vals: vec![basedn.clone()], + }, + ], + }; + + Ok(LdapServer { + basedn, + rootdse, + dnre, + }) + } + + fn do_search( + &self, + au: &mut AuditScope, + idms: &IdmServer, + sr: &SearchRequest, + uat: &LdapBoundToken, + // eventid: &Uuid, + ) -> Result, OperationError> { + // If the request is "", Base, Present("objectclass"), [], then we want the rootdse. + if sr.base == "" && sr.scope == LdapSearchScope::Base { + Ok(vec![ + sr.gen_result_entry(self.rootdse.clone()), + sr.gen_success(), + ]) + } else { + // We want something else apparently. Need to do some more work ... + // Parse the operation and make sure it's sane before we start the txn. + + // This scoping returns an extra filter component. + + let (opt_attr, opt_value) = match self.dnre.captures(sr.base.as_str()) { + Some(caps) => ( + caps.name("attr").map(|v| v.as_str().to_string()), + caps.name("val").map(|v| v.as_str().to_string()), + ), + None => { + return Err(OperationError::InvalidRequestState); + } + }; + + let req_dn = match (opt_attr, opt_value) { + (Some(a), Some(v)) => Some((a, v)), + (None, None) => None, + _ => { + return Err(OperationError::InvalidRequestState); + } + }; + + ltrace!(au, "RDN -> {:?}", req_dn); + + // Map the Some(a,v) to ...? + + let ext_filter = match (&sr.scope, req_dn) { + (LdapSearchScope::OneLevel, Some(_r)) => return Ok(vec![sr.gen_success()]), + (LdapSearchScope::OneLevel, None) => { + // exclude domain_info + Some(LdapFilter::Not(Box::new(LdapFilter::Equality( + "uuid".to_string(), + STR_UUID_DOMAIN_INFO.to_string(), + )))) + } + (LdapSearchScope::Base, Some((a, v))) => Some(LdapFilter::Equality(a, v)), + (LdapSearchScope::Base, None) => { + // domain_info + Some(LdapFilter::Equality( + "uuid".to_string(), + STR_UUID_DOMAIN_INFO.to_string(), + )) + } + (LdapSearchScope::Subtree, Some((a, v))) => Some(LdapFilter::Equality(a, v)), + (LdapSearchScope::Subtree, None) => { + // No filter changes needed. + None + } + }; + + // TODO #67: limit the number of attributes here! + let attrs = if sr.attrs.len() == 0 { + // If [], then "all" attrs + None + } else { + let mut all_attrs = false; + let attrs: BTreeSet<_> = sr + .attrs + .iter() + .filter_map(|a| { + if a == "*" { + // if *, then all + all_attrs = true; + None + } else if a == "+" { + // if +, then ignore (kanidm doesn't have operational) this part. + None + } else { + // if list, add to the search + Some(a.clone()) + } + }) + .collect(); + if all_attrs { + None + } else { + Some(attrs) + } + }; + + ltrace!(au, "Attrs -> {:?}", attrs); + + lperf_segment!(au, "ldap::do_search", || { + // Now start the txn - we need it for resolving filter components. + let mut idm_read = idms.proxy_read(); + + // join the filter, with ext_filter + let lfilter = match ext_filter { + Some(ext) => LdapFilter::And(vec![ + sr.filter.clone(), + ext, + LdapFilter::Not(Box::new(LdapFilter::Or(vec![ + LdapFilter::Equality("class".to_string(), "classtype".to_string()), + LdapFilter::Equality("class".to_string(), "attributetype".to_string()), + LdapFilter::Equality( + "class".to_string(), + "access_control_profile".to_string(), + ), + ]))), + ]), + None => LdapFilter::And(vec![ + sr.filter.clone(), + LdapFilter::Not(Box::new(LdapFilter::Or(vec![ + LdapFilter::Equality("class".to_string(), "classtype".to_string()), + LdapFilter::Equality("class".to_string(), "attributetype".to_string()), + LdapFilter::Equality( + "class".to_string(), + "access_control_profile".to_string(), + ), + ]))), + ]), + }; + + ltrace!(au, "ldapfilter -> {:?}", lfilter); + + // Kanidm Filter from LdapFilter + let filter = + Filter::from_ldap_ro(au, &lfilter, &mut idm_read.qs_read).map_err(|e| { + lrequest_error!(au, "invalid ldap filter {:?}", e); + e + })?; + + // Build the event, with the permissions from effective_uuid + // (should always be anonymous at the moment) + // ! Remember, searchEvent wraps to ignore hidden for us. + let se = lperf_segment!(au, "ldap::do_search", || { + SearchEvent::new_ext_impersonate_uuid( + au, + &mut idm_read.qs_read, + &uat.effective_uuid, + filter, + attrs, + ) + })?; + + let res = idm_read.qs_read.search_ext(au, &se).map_err(|e| { + ladmin_error!(au, "search failure {:?}", e); + e + })?; + + // These have already been fully reduced, so we can just slap it into the result. + let lres = lperf_segment!(au, "ldap::do_search", || { + let lres: Result, _> = res + .into_iter() + .map(|e| { + e.to_ldap(au, &mut idm_read.qs_read, self.basedn.as_str()) + // if okay, wrap in a ldap msg. + .map(|r| sr.gen_result_entry(r)) + }) + .chain(iter::once(Ok(sr.gen_success()))) + .collect(); + lres + }); + + let lres = lres.map_err(|e| { + ladmin_error!(au, "entry resolve failure {:?}", e); + e + })?; + + Ok(lres) + }) + } + } + + fn do_bind( + &self, + au: &mut AuditScope, + idms: &IdmServer, + dn: &str, + pw: &str, + ) -> Result, OperationError> { + let mut idm_write = idms.write(); + + let target_uuid: Uuid = if dn == "" && pw == "" { + UUID_ANONYMOUS.clone() + } else { + idm_write.qs_read.name_to_uuid(au, dn).map_err(|e| { + ladmin_info!(au, "Error resolving id to target {:?}", e); + e + })? + }; + + let ct = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("Clock failure!"); + + let lae = LdapAuthEvent::from_parts(au, target_uuid, pw.to_string())?; + idm_write + .auth_ldap(au, lae, ct) + .and_then(|r| idm_write.commit(au).map(|_| r)) + } + + pub fn do_op( + &self, + au: &mut AuditScope, + idms: &IdmServer, + server_op: ServerOps, + uat: Option, + eventid: &Uuid, + ) -> Result { + match server_op { + ServerOps::SimpleBind(sbr) => self + .do_bind(au, idms, sbr.dn.as_str(), sbr.pw.as_str()) + .map(|r| match r { + Some(lbt) => LdapResponseState::Bind(lbt, sbr.gen_success()), + None => LdapResponseState::Respond(sbr.gen_invalid_cred()), + }) + .or_else(|e| { + let (rc, msg) = operationerr_to_ldapresultcode(e); + Ok(LdapResponseState::Respond(sbr.gen_error(rc, msg))) + }), + ServerOps::Search(sr) => match uat { + Some(u) => self + .do_search(au, idms, &sr, &u) + .map(LdapResponseState::MultiPartResponse) + .or_else(|e| { + let (rc, msg) = operationerr_to_ldapresultcode(e); + Ok(LdapResponseState::Respond(sr.gen_error(rc, msg))) + }), + None => { + // Search can occur without a bind, so bind first. + let lbt = match self.do_bind(au, idms, "", "") { + Ok(Some(lbt)) => lbt, + Ok(None) => { + return Ok(LdapResponseState::Respond( + sr.gen_error(LdapResultCode::InvalidCredentials, "".to_string()), + )) + } + Err(e) => { + let (rc, msg) = operationerr_to_ldapresultcode(e); + return Ok(LdapResponseState::Respond(sr.gen_error(rc, msg))); + } + }; + // If okay, do the search. + self.do_search(au, idms, &sr, &lbt) + .map(|r| LdapResponseState::BindMultiPartResponse(lbt, r)) + .or_else(|e| { + let (rc, msg) = operationerr_to_ldapresultcode(e); + Ok(LdapResponseState::Respond(sr.gen_error(rc, msg))) + }) + } + }, + ServerOps::Unbind(_) => { + // No need to notify on unbind (per rfc4511) + Ok(LdapResponseState::Unbind) + } + ServerOps::Whoami(wr) => match uat { + Some(u) => Ok(LdapResponseState::Respond( + wr.gen_success(format!("u: {}", u.spn).as_str()), + )), + None => Ok(LdapResponseState::Respond(wr.gen_operror( + format!("Unbound Connection {:?}", &eventid).as_str(), + ))), + }, + } // end match server op + } +} + +fn ldap_domain_to_dc(input: &str) -> String { + let mut output: String = String::new(); + input.split('.').for_each(|dc| { + output.push_str("dc="); + output.push_str(dc); + output.push_str(","); + }); + // Remove the last ',' + output.pop(); + output +} + +fn operationerr_to_ldapresultcode(e: OperationError) -> (LdapResultCode, String) { + match e { + OperationError::InvalidRequestState => { + (LdapResultCode::ConstraintViolation, "".to_string()) + } + OperationError::InvalidAttributeName(s) | OperationError::InvalidAttribute(s) => { + (LdapResultCode::InvalidAttributeSyntax, s) + } + OperationError::SchemaViolation(se) => { + (LdapResultCode::UnwillingToPerform, format!("{:?}", se)) + } + e => (LdapResultCode::Other, format!("{:?}", e)), + } +} diff --git a/kanidmd/src/lib/lib.rs b/kanidmd/src/lib/lib.rs index aba3d71ec..3404d3b79 100644 --- a/kanidmd/src/lib/lib.rs +++ b/kanidmd/src/lib/lib.rs @@ -25,6 +25,7 @@ mod entry; mod event; mod filter; mod interval; +pub(crate) mod ldap; mod modify; mod value; #[macro_use] diff --git a/kanidmd/src/lib/plugins/domain.rs b/kanidmd/src/lib/plugins/domain.rs index a9c90dc3f..22a710fc2 100644 --- a/kanidmd/src/lib/plugins/domain.rs +++ b/kanidmd/src/lib/plugins/domain.rs @@ -5,7 +5,6 @@ // which is importart for management of the replication topo and trust // relationships. use crate::plugins::Plugin; -use uuid::Uuid; use crate::audit::AuditScope; use crate::constants::UUID_DOMAIN_INFO; @@ -17,9 +16,7 @@ use kanidm_proto::v1::OperationError; lazy_static! { static ref PVCLASS_DOMAIN_INFO: PartialValue = PartialValue::new_class("domain_info"); - static ref PVUUID_DOMAIN_INFO: PartialValue = PartialValue::new_uuid( - Uuid::parse_str(UUID_DOMAIN_INFO).expect("Unable to parse constant UUID_DOMAIN_INFO") - ); + static ref PVUUID_DOMAIN_INFO: PartialValue = PartialValue::new_uuidr(&UUID_DOMAIN_INFO); } pub struct Domain {} @@ -63,16 +60,14 @@ mod tests { use crate::constants::UUID_DOMAIN_INFO; use crate::server::QueryServerTransaction; use crate::value::PartialValue; - use uuid::Uuid; + // use uuid::Uuid; // test we can create and generate the id #[test] fn test_domain_generate_uuid() { run_test!(|server: &QueryServer, au: &mut AuditScope| { let mut server_txn = server.write(duration_from_epoch_now()); - let uuid_domain = Uuid::parse_str(UUID_DOMAIN_INFO) - .expect("Unable to parse constant UUID_DOMAIN_INFO"); let e_dom = server_txn - .internal_search_uuid(au, &uuid_domain) + .internal_search_uuid(au, &UUID_DOMAIN_INFO) .expect("must not fail"); let u_dom = server_txn.get_domain_uuid(); diff --git a/kanidmd/src/lib/plugins/spn.rs b/kanidmd/src/lib/plugins/spn.rs index c4cbe49d7..6984ae9f4 100644 --- a/kanidmd/src/lib/plugins/spn.rs +++ b/kanidmd/src/lib/plugins/spn.rs @@ -12,17 +12,13 @@ use crate::server::{ use crate::value::PartialValue; // use crate::value::{PartialValue, Value}; use kanidm_proto::v1::{ConsistencyError, OperationError}; -use uuid::Uuid; pub struct Spn {} lazy_static! { - static ref UUID_DOMAIN_INFO_T: Uuid = - Uuid::parse_str(UUID_DOMAIN_INFO).expect("Unable to parse constant UUID_DOMAIN_INFO"); static ref CLASS_GROUP: PartialValue = PartialValue::new_class("group"); static ref CLASS_ACCOUNT: PartialValue = PartialValue::new_class("account"); - static ref PV_UUID_DOMAIN_INFO: PartialValue = PartialValue::new_uuids(UUID_DOMAIN_INFO) - .expect("Unable to parse constant UUID_DOMAIN_INFO"); + static ref PV_UUID_DOMAIN_INFO: PartialValue = PartialValue::new_uuidr(&UUID_DOMAIN_INFO); } impl Spn { @@ -30,7 +26,7 @@ impl Spn { au: &mut AuditScope, qs: &mut QueryServerWriteTransaction, ) -> Result { - qs.internal_search_uuid(au, &UUID_DOMAIN_INFO_T) + qs.internal_search_uuid(au, &UUID_DOMAIN_INFO) .and_then(|e| { e.get_ava_single_string("domain_name") .ok_or(OperationError::InvalidEntryState) @@ -45,7 +41,7 @@ impl Spn { au: &mut AuditScope, qs: &mut QueryServerReadTransaction, ) -> Result { - qs.internal_search_uuid(au, &UUID_DOMAIN_INFO_T) + qs.internal_search_uuid(au, &UUID_DOMAIN_INFO) .and_then(|e| { e.get_ava_single_string("domain_name") .ok_or(OperationError::InvalidEntryState) diff --git a/kanidmd/src/lib/schema.rs b/kanidmd/src/lib/schema.rs index aea087f0e..01a5b7357 100644 --- a/kanidmd/src/lib/schema.rs +++ b/kanidmd/src/lib/schema.rs @@ -458,7 +458,7 @@ impl SchemaClass { pub trait SchemaTransaction { fn get_classes(&self) -> BptreeMapReadSnapshot; fn get_attributes(&self) -> BptreeMapReadSnapshot; - fn get_idxmeta(&self) -> BTreeSet<(String, IndexType)>; + fn get_idxmeta(&self) -> &BTreeSet<(String, IndexType)>; fn validate(&self, _audit: &mut AuditScope) -> Vec> { let mut res = Vec::new(); @@ -546,7 +546,7 @@ pub trait SchemaTransaction { .collect() } - fn get_idxmeta_set(&self) -> BTreeSet<(String, IndexType)> { + fn get_idxmeta_set(&self) -> &BTreeSet<(String, IndexType)> { self.get_idxmeta() } } @@ -1370,8 +1370,8 @@ impl<'a> SchemaTransaction for SchemaWriteTransaction<'a> { self.attributes.to_snapshot() } - fn get_idxmeta(&self) -> BTreeSet<(String, IndexType)> { - self.idxmeta.clone() + fn get_idxmeta(&self) -> &BTreeSet<(String, IndexType)> { + &self.idxmeta } } @@ -1384,8 +1384,8 @@ impl SchemaTransaction for SchemaReadTransaction { self.attributes.to_snapshot() } - fn get_idxmeta(&self) -> BTreeSet<(String, IndexType)> { - (*self.idxmeta).clone() + fn get_idxmeta(&self) -> &BTreeSet<(String, IndexType)> { + &(*self.idxmeta) } } diff --git a/kanidmd/src/lib/server.rs b/kanidmd/src/lib/server.rs index 1dd52e94f..e0f76b301 100644 --- a/kanidmd/src/lib/server.rs +++ b/kanidmd/src/lib/server.rs @@ -124,7 +124,12 @@ pub trait QueryServerTransaction { let schema = self.get_schema(); let idxmeta = schema.get_idxmeta_set(); // Now resolve all references and indexes. - let vfr = try_audit!(au, se.filter.resolve(&se.event, Some(&idxmeta))); + let vfr = try_audit!( + au, + lperf_segment!(au, "server::search", || { + se.filter.resolve(&se.event, Some(idxmeta)) + }) + ); // NOTE: We currently can't build search plugins due to the inability to hand // the QS wr/ro to the plugin trait. However, there shouldn't be a need for search @@ -155,7 +160,7 @@ pub trait QueryServerTransaction { lperf_segment!(au, "server::exists", || { let schema = self.get_schema(); let idxmeta = schema.get_idxmeta_set(); - let vfr = try_audit!(au, ee.filter.resolve(&ee.event, Some(&idxmeta))); + let vfr = try_audit!(au, ee.filter.resolve(&ee.event, Some(idxmeta))); self.get_be_txn() .exists(au, &vfr) @@ -779,7 +784,7 @@ pub struct QueryServer { impl QueryServer { pub fn new(be: Backend, schema: Schema) -> Self { let (s_uuid, d_uuid) = { - let mut wr = be.write(BTreeSet::new()); + let mut wr = be.write(&BTreeSet::new()); (wr.get_db_s_uuid(), wr.get_db_d_uuid()) }; @@ -2108,7 +2113,7 @@ impl<'a> QueryServerWriteTransaction<'a> { ) -> Result<(), OperationError> { let modl = ModifyList::new_purge_and_set("domain_name", Value::new_iname_s(new_domain_name)); - let udi = PartialValue::new_uuids(UUID_DOMAIN_INFO).ok_or(OperationError::InvalidUuid)?; + let udi = PartialValue::new_uuidr(&UUID_DOMAIN_INFO); let filt = filter_all!(f_eq("uuid", udi)); self.internal_modify(audit, filt, modl) } @@ -3544,10 +3549,7 @@ mod tests { // ++ Mod domain name and name to be the old type. let me_dn = unsafe { ModifyEvent::new_internal_invalid( - filter!(f_eq( - "uuid", - PartialValue::new_uuids(UUID_DOMAIN_INFO).expect("invalid uuid") - )), + filter!(f_eq("uuid", PartialValue::new_uuidr(&UUID_DOMAIN_INFO))), ModifyList::new_list(vec![ Modify::Purged("name".to_string()), Modify::Purged("domain_name".to_string()), @@ -3590,7 +3592,7 @@ mod tests { // Assert that it migrated and worked as expected. let mut server_txn = server.write(duration_from_epoch_now()); let domain = server_txn - .internal_search_uuid(audit, &Uuid::parse_str(UUID_DOMAIN_INFO).unwrap()) + .internal_search_uuid(audit, &UUID_DOMAIN_INFO) .expect("failed"); // ++ assert all names are iname domain diff --git a/kanidmd/src/lib/value.rs b/kanidmd/src/lib/value.rs index bc3c25330..98a21cd9e 100644 --- a/kanidmd/src/lib/value.rs +++ b/kanidmd/src/lib/value.rs @@ -18,7 +18,12 @@ lazy_static! { static ref SPN_RE: Regex = Regex::new("(?P[^@]+)@(?P[^@]+)").expect("Invalid SPN regex found"); static ref INAME_RE: Regex = - Regex::new("^(_.*|.*@.*|\\d+|.*\\s.*)$").expect("Invalid Iname regex found"); + Regex::new("^(_.*|.*(\\s|@|,|=).*|\\d+)$").expect("Invalid Iname regex found"); + // ^ ^ ^ + // | | \- must not be only integers + // | \- must not contain whitespace, @, ',', = + // \- must not start with _ + // Them's be the rules. } #[allow(non_camel_case_types)] @@ -1503,12 +1508,15 @@ mod tests { * - contain an @ (confuses SPN) * - can not start with _ (... I forgot but it's important I swear >.>) * - can not have spaces (confuses too many systems :() + * - can not have = or , (confuses ldap) */ let inv1 = Value::new_iname_s("1234"); let inv2 = Value::new_iname_s("bc23f637-4439-4c07-b95d-eaed0d9e4b8b"); let inv3 = Value::new_iname_s("hello@test.com"); let inv4 = Value::new_iname_s("_bad"); let inv5 = Value::new_iname_s("no spaces I'm sorry :("); + let inv6 = Value::new_iname_s("bad=equals"); + let inv7 = Value::new_iname_s("bad,comma"); let val1 = Value::new_iname_s("William"); let val2 = Value::new_iname_s("this_is_okay"); @@ -1520,6 +1528,8 @@ mod tests { assert!(!inv3.validate()); assert!(!inv4.validate()); assert!(!inv5.validate()); + assert!(!inv6.validate()); + assert!(!inv7.validate()); assert!(val1.validate()); assert!(val2.validate()); diff --git a/kanidmd/src/server/main.rs b/kanidmd/src/server/main.rs index 15d9d378d..343438303 100644 --- a/kanidmd/src/server/main.rs +++ b/kanidmd/src/server/main.rs @@ -29,6 +29,8 @@ struct ServerOpt { key_path: Option, #[structopt(short = "b", long = "bindaddr")] bind: Option, + #[structopt(short = "l", long = "ldapbindaddr")] + ldapbind: Option, #[structopt(flatten)] commonopts: CommonOpt, } @@ -112,7 +114,7 @@ async fn main() { if opt.debug() { ::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug"); } else { - ::std::env::set_var("RUST_LOG", "actix_web=info,kanidm=info"); + ::std::env::set_var("RUST_LOG", "actix_web=info,kanidm=warn"); } env_logger::builder() @@ -127,8 +129,9 @@ async fn main() { config.update_db_path(&sopt.commonopts.db_path); config.update_tls(&sopt.ca_path, &sopt.cert_path, &sopt.key_path); config.update_bind(&sopt.bind); + config.update_ldapbind(&sopt.ldapbind); - let sctx = create_server_core(config); + let sctx = create_server_core(config).await; match sctx { Ok(sctx) => { tokio::signal::ctrl_c().await.unwrap();