diff --git a/designs/resource_limits.rst b/designs/resource_limits.rst index 2f37274ad..373f7e71e 100644 --- a/designs/resource_limits.rst +++ b/designs/resource_limits.rst @@ -172,8 +172,10 @@ The session limits would be: * write rate limit (writes over time) * network request size -The system limits that can not be account overridden are: +The entry specific limits are: * maximum entry size * maximum number of multi value attributes +These are store as attributes on the entry itself + diff --git a/kanidm_proto/src/v1.rs b/kanidm_proto/src/v1.rs index ec761dfcd..13bf3f6f8 100644 --- a/kanidm_proto/src/v1.rs +++ b/kanidm_proto/src/v1.rs @@ -101,6 +101,7 @@ pub enum OperationError { PasswordEmpty, PasswordBadListed, CryptographyError, + ResourceLimit, } impl PartialEq for OperationError { @@ -173,6 +174,10 @@ pub struct UserAuthToken { pub groups: Vec, pub claims: Vec, // Should we allow supplemental ava's to be added on request? + pub lim_uidx: bool, + pub lim_rmax: usize, + pub lim_pmax: usize, + pub lim_fmax: usize, } impl fmt::Display for UserAuthToken { diff --git a/kanidmd/src/lib/access.rs b/kanidmd/src/lib/access.rs index 3d52dc691..d99ecd2a4 100644 --- a/kanidmd/src/lib/access.rs +++ b/kanidmd/src/lib/access.rs @@ -31,7 +31,7 @@ use crate::modify::Modify; use crate::server::{QueryServerTransaction, QueryServerWriteTransaction}; use crate::value::PartialValue; -use crate::event::{CreateEvent, DeleteEvent, EventOrigin, ModifyEvent, SearchEvent}; +use crate::event::{CreateEvent, DeleteEvent, Event, EventOrigin, ModifyEvent, SearchEvent}; lazy_static! { static ref CLASS_ACS: PartialValue = PartialValue::new_class("access_control_search"); @@ -320,7 +320,9 @@ impl AccessControlProfile { OperationError::InvalidACPState("Missing acp_targetscope".to_string()) })?; - let receiver_i = Filter::from_rw(audit, &receiver_f, qs).map_err(|e| { + let event = Event::from_internal(); + + let receiver_i = Filter::from_rw(audit, &event, &receiver_f, qs).map_err(|e| { ladmin_error!(audit, "Receiver validation failed {:?}", e); e })?; @@ -329,7 +331,7 @@ impl AccessControlProfile { OperationError::SchemaViolation(e) })?; - let targetscope_i = Filter::from_rw(audit, &targetscope_f, qs).map_err(|e| { + let targetscope_i = Filter::from_rw(audit, &event, &targetscope_f, qs).map_err(|e| { ladmin_error!(audit, "Targetscope validation failed {:?}", e); e })?; diff --git a/kanidmd/src/lib/actors/v1_read.rs b/kanidmd/src/lib/actors/v1_read.rs index 326f9e1d4..346702c2d 100644 --- a/kanidmd/src/lib/actors/v1_read.rs +++ b/kanidmd/src/lib/actors/v1_read.rs @@ -355,13 +355,14 @@ impl Handler for QueryServerReadV1 { // this far. let uat = msg.uat.clone().ok_or(OperationError::NotAuthenticated)?; - let srch = match SearchEvent::from_whoami_request(&mut audit, msg.uat, &qs_read) { - Ok(s) => s, - Err(e) => { - ladmin_error!(audit, "Failed to begin whoami: {:?}", e); - return Err(e); - } - }; + let srch = + match SearchEvent::from_whoami_request(&mut audit, msg.uat.as_ref(), &qs_read) { + Ok(s) => s, + Err(e) => { + ladmin_error!(audit, "Failed to begin whoami: {:?}", e); + return Err(e); + } + }; ltrace!(audit, "Begin event {:?}", srch); @@ -500,7 +501,7 @@ impl Handler for QueryServerReadV1 { // Make an event from the request let srch = match SearchEvent::from_target_uuid_request( &mut audit, - msg.uat, + msg.uat.as_ref(), target_uuid, &qs_read, ) { @@ -568,7 +569,7 @@ impl Handler for QueryServerReadV1 { let rate = match RadiusAuthTokenEvent::from_parts( &mut audit, &idm_read.qs_read, - msg.uat, + msg.uat.as_ref(), target_uuid, ) { Ok(s) => s, @@ -624,7 +625,7 @@ impl Handler for QueryServerReadV1 { let rate = match UnixUserTokenEvent::from_parts( &mut audit, &idm_read.qs_read, - msg.uat, + msg.uat.as_ref(), target_uuid, ) { Ok(s) => s, @@ -680,7 +681,7 @@ impl Handler for QueryServerReadV1 { let rate = match UnixGroupTokenEvent::from_parts( &mut audit, &idm_read.qs_read, - msg.uat, + msg.uat.as_ref(), target_uuid, ) { Ok(s) => s, @@ -725,7 +726,7 @@ impl Handler for QueryServerReadV1 { // Make an event from the request let srch = match SearchEvent::from_target_uuid_request( &mut audit, - msg.uat, + msg.uat.as_ref(), target_uuid, &qs_read, ) { @@ -794,7 +795,7 @@ impl Handler for QueryServerReadV1 { // Make an event from the request let srch = match SearchEvent::from_target_uuid_request( &mut audit, - uat, + uat.as_ref(), target_uuid, &qs_read, ) { @@ -866,7 +867,7 @@ impl Handler for QueryServerReadV1 { let uuae = match UnixUserAuthEvent::from_parts( &mut audit, &idm_write.qs_read, - msg.uat, + msg.uat.as_ref(), target_uuid, msg.cred, ) { diff --git a/kanidmd/src/lib/actors/v1_write.rs b/kanidmd/src/lib/actors/v1_write.rs index e8eed0827..ca5dda0f7 100644 --- a/kanidmd/src/lib/actors/v1_write.rs +++ b/kanidmd/src/lib/actors/v1_write.rs @@ -364,14 +364,20 @@ impl QueryServerWriteV1 { e })?; - let mdf = - match ModifyEvent::from_parts(audit, uat, target_uuid, proto_ml, filter, &qs_write) { - Ok(m) => m, - Err(e) => { - ladmin_error!(audit, "Failed to begin modify: {:?}", e); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_parts( + audit, + uat.as_ref(), + target_uuid, + proto_ml, + filter, + &qs_write, + ) { + Ok(m) => m, + Err(e) => { + ladmin_error!(audit, "Failed to begin modify: {:?}", e); + return Err(e); + } + }; ltrace!(audit, "Begin modify event {:?}", mdf); @@ -397,7 +403,7 @@ impl QueryServerWriteV1 { let mdf = match ModifyEvent::from_internal_parts( audit, - uat, + uat.as_ref(), target_uuid, ml, filter, @@ -531,8 +537,12 @@ impl Handler for QueryServerWriteV1 { || { let qs_write = self.qs.write(duration_from_epoch_now()); - let del = match DeleteEvent::from_parts(&mut audit, msg.uat, &msg.filter, &qs_write) - { + let del = match DeleteEvent::from_parts( + &mut audit, + msg.uat.as_ref(), + &msg.filter, + &qs_write, + ) { Ok(d) => d, Err(e) => { ladmin_error!(audit, "Failed to begin delete: {:?}", e); @@ -567,7 +577,10 @@ impl Handler for QueryServerWriteV1 { let qs_write = self.qs.write(duration_from_epoch_now()); let rev = match ReviveRecycledEvent::from_parts( - &mut audit, msg.uat, msg.filter, &qs_write, + &mut audit, + msg.uat.as_ref(), + msg.filter, + &qs_write, ) { Ok(r) => r, Err(e) => { @@ -632,7 +645,7 @@ impl Handler for QueryServerWriteV1 { let pce = PasswordChangeEvent::from_parts( &mut audit, &idms_prox_write.qs_write, - msg.uat, + msg.uat.as_ref(), target_uuid, cleartext, msg.appid, @@ -654,7 +667,7 @@ impl Handler for QueryServerWriteV1 { let gpe = GeneratePasswordEvent::from_parts( &mut audit, &idms_prox_write.qs_write, - msg.uat, + msg.uat.as_ref(), target_uuid, msg.appid, ) @@ -675,7 +688,7 @@ impl Handler for QueryServerWriteV1 { let gte = GenerateTOTPEvent::from_parts( &mut audit, &idms_prox_write.qs_write, - msg.uat, + msg.uat.as_ref(), target_uuid, label, ) @@ -695,7 +708,7 @@ impl Handler for QueryServerWriteV1 { let vte = VerifyTOTPEvent::from_parts( &mut audit, &idms_prox_write.qs_write, - msg.uat, + msg.uat.as_ref(), target_uuid, uuid, chal, @@ -789,7 +802,7 @@ impl Handler for QueryServerWriteV1 { let rrse = RegenerateRadiusSecretEvent::from_parts( &mut audit, &idms_prox_write.qs_write, - msg.uat, + msg.uat.as_ref(), target_uuid, ) .map_err(|e| { @@ -834,7 +847,7 @@ impl Handler for QueryServerWriteV1 { let mdf = match ModifyEvent::from_target_uuid_attr_purge( &mut audit, - msg.uat, + msg.uat.as_ref(), target_uuid, &msg.attr, msg.filter, @@ -885,7 +898,7 @@ impl Handler for QueryServerWriteV1 { let mdf = match ModifyEvent::from_parts( &mut audit, - msg.uat, + msg.uat.as_ref(), target_uuid, &proto_ml, msg.filter, @@ -1181,7 +1194,7 @@ impl Handler for QueryServerWriteV1 { let upce = UnixPasswordChangeEvent::from_parts( &mut audit, &idms_prox_write.qs_write, - msg.uat, + msg.uat.as_ref(), target_uuid, msg.cred, ) diff --git a/kanidmd/src/lib/be/idl_sqlite.rs b/kanidmd/src/lib/be/idl_sqlite.rs index a9ffa5a30..8e606afe1 100644 --- a/kanidmd/src/lib/be/idl_sqlite.rs +++ b/kanidmd/src/lib/be/idl_sqlite.rs @@ -1248,7 +1248,10 @@ impl IdlSqlite { let manager = SqliteConnectionManager::file(path) .with_init(move |c| { c.execute_batch( - format!("PRAGMA page_size={}; VACUUM; PRAGMA journal_mode=WAL;", fstype as u32) + format!( + "PRAGMA page_size={}; VACUUM; PRAGMA journal_mode=WAL;", + fstype as u32 + ) .as_str(), ) }) @@ -1259,8 +1262,8 @@ impl IdlSqlite { // a single DB thread, else we cause consistency issues. builder1.max_size(1) } else { - // Have to add 1 for the write thread. - builder1.max_size(pool_size + 1) + // Have to add 1 for the write thread, and for the interval threads + builder1.max_size(pool_size + 2) }; // Look at max_size and thread_pool here for perf later let pool = builder2.build(manager).map_err(|e| { @@ -1275,7 +1278,7 @@ impl IdlSqlite { #[allow(clippy::expect_used)] let conn = self .pool - .get() + .try_get() .expect("Unable to get connection from pool!!!"); IdlSqliteReadTransaction::new(conn) } @@ -1284,7 +1287,7 @@ impl IdlSqlite { #[allow(clippy::expect_used)] let conn = self .pool - .get() + .try_get() .expect("Unable to get connection from pool!!!"); IdlSqliteWriteTransaction::new(conn) } diff --git a/kanidmd/src/lib/be/mod.rs b/kanidmd/src/lib/be/mod.rs index 89ca463de..76d8ae764 100644 --- a/kanidmd/src/lib/be/mod.rs +++ b/kanidmd/src/lib/be/mod.rs @@ -8,6 +8,7 @@ use std::sync::Arc; use crate::audit::AuditScope; use crate::be::dbentry::DbEntry; use crate::entry::{Entry, EntryCommitted, EntryNew, EntrySealed}; +use crate::event::EventLimits; use crate::filter::{Filter, FilterPlan, FilterResolved, FilterValidResolved}; use crate::value::Value; use concread::cowcell::*; @@ -470,10 +471,10 @@ pub trait BackendTransaction { }) } - // Take filter, and AuditScope ref? fn search( &self, au: &mut AuditScope, + erl: &EventLimits, filt: &Filter, ) -> Result>, OperationError> { // Unlike DS, even if we don't get the index back, we can just pass @@ -493,25 +494,53 @@ pub trait BackendTransaction { lfilter_info!(au, "filter executed plan -> {:?}", fplan); + // Based on the IDL we determine if limits are required at this point. + match &idl { + IDL::ALLIDS => { + if !erl.unindexed_allow { + ladmin_error!(au, "filter (search) is fully unindexed, and not allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + IDL::Partial(idl_br) => { + if idl_br.len() > erl.search_max_filter_test { + ladmin_error!(au, "filter (search) is partial indexed and greater than search_max_filter_test allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + IDL::PartialThreshold(_) => { + // Since we opted for this, this is not the fault + // of the user and we should not penalise them by limiting on partial. + } + IDL::Indexed(idl_br) => { + // We know this is resolved here, so we can attempt the limit + // check. This has to fold the whole index, but you know, class=pres is + // indexed ... + if idl_br.len() > erl.search_max_results { + ladmin_error!(au, "filter (search) is indexed and greater than search_max_results allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + }; + let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { ladmin_error!(au, "get_identry failed {:?}", e); e })?; - // Do other things - // Now, de-serialise the raw_entries back to entries, and populate their ID's - - // if not 100% resolved. let entries_filtered = match idl { - IDL::ALLIDS | IDL::Partial(_) => { - lfilter_error!(au, "filter (search) was partially or fully unindexed.",); - lperf_segment!(au, "be::search", || { - entries - .into_iter() - .filter(|e| e.entry_match_no_index(&filt)) - .collect() - }) - } + IDL::ALLIDS => lperf_segment!(au, "be::search", || { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(&filt)) + .collect() + }), + IDL::Partial(_) => lperf_segment!(au, "be::search", || { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(&filt)) + .collect() + }), IDL::PartialThreshold(_) => { lperf_trace_segment!(au, "be::search", || { entries @@ -527,23 +556,12 @@ pub trait BackendTransaction { } }; - /* - // This is good for testing disagreements between the idl layer and the filter/entries - if cfg!(test) { - let check_raw_entries = try_audit!(au, self.get_idlayer().get_identry(au, &IDL::ALLIDS)); - let check_entries: Result, _> = - check_raw_entries.into_iter().map(|ide| ide.into_entry()).collect(); - let check_entries = try_audit!(au, check_entries); - let f_check_entries: Vec<_> = - check_entries - .into_iter() - .filter(|e| e.entry_match_no_index(&filt)) - .collect(); - debug!("raw -> {:?}", entries_filtered); - debug!("check -> {:?}", f_check_entries); - assert!(f_check_entries == entries_filtered); + // If the idl was not indexed, apply the resource limit now. Avoid the needless match since the + // if statement is quick. + if entries_filtered.len() > erl.search_max_results { + ladmin_error!(au, "filter (search) is resolved and greater than search_max_results allowed by resource limits"); + return Err(OperationError::ResourceLimit); } - */ Ok(entries_filtered) }) @@ -556,6 +574,7 @@ pub trait BackendTransaction { fn exists( &self, au: &mut AuditScope, + erl: &EventLimits, filt: &Filter, ) -> Result { lperf_trace_segment!(au, "be::exists", || { @@ -572,26 +591,32 @@ pub trait BackendTransaction { lfilter_info!(au, "filter executed plan -> {:?}", fplan); + // Apply limits to the IDL. + match &idl { + IDL::ALLIDS => { + if !erl.unindexed_allow { + ladmin_error!(au, "filter (exists) is fully unindexed, and not allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + IDL::Partial(idl_br) => { + if idl_br.len() > erl.search_max_filter_test { + ladmin_error!(au, "filter (exists) is partial indexed and greater than search_max_filter_test allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + IDL::PartialThreshold(_) => { + // Since we opted for this, this is not the fault + // of the user and we should not penalise them. + } + IDL::Indexed(_) => {} + } + // Now, check the idl -- if it's fully resolved, we can skip this because the query // was fully indexed. match &idl { IDL::Indexed(idl) => Ok(!idl.is_empty()), - IDL::PartialThreshold(_) => { - let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { - ladmin_error!(au, "get_identry failed {:?}", e); - e - })?; - - // if not 100% resolved query, apply the filter test. - let entries_filtered: Vec<_> = entries - .into_iter() - .filter(|e| e.entry_match_no_index(&filt)) - .collect(); - - Ok(!entries_filtered.is_empty()) - } _ => { - lfilter_error!(au, "filter (exists) was partially or fully unindexed",); let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { ladmin_error!(au, "get_identry failed {:?}", e); e @@ -1372,6 +1397,7 @@ mod tests { use super::{ Backend, BackendTransaction, BackendWriteTransaction, FsType, OperationError, IDL, }; + use crate::event::EventLimits; use crate::value::{IndexType, PartialValue, Value}; macro_rules! run_test { @@ -1438,7 +1464,8 @@ mod tests { .expect("failed to generate filter") .into_valid_resolved() }; - let entries = $be.search($audit, &filt).expect("failed to search"); + let lims = EventLimits::unlimited(); + let entries = $be.search($audit, &lims, &filt).expect("failed to search"); entries.first().is_some() }}; } @@ -1451,7 +1478,8 @@ mod tests { .expect("failed to generate filter") .into_valid_resolved() }; - let entries = $be.search($audit, &filt).expect("failed to search"); + let lims = EventLimits::unlimited(); + let entries = $be.search($audit, &lims, &filt).expect("failed to search"); match entries.first() { Some(ent) => ent.attribute_pres($attr), None => false, @@ -1509,7 +1537,9 @@ mod tests { let filt = unsafe { filter_resolved!(f_eq("userid", PartialValue::new_utf8s("claire"))) }; - let r = be.search(audit, &filt); + let lims = EventLimits::unlimited(); + + let r = be.search(audit, &lims, &filt); assert!(r.expect("Search failed!").len() == 1); // Test empty search @@ -1524,6 +1554,7 @@ mod tests { fn test_be_simple_modify() { run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { ltrace!(audit, "Simple Modify"); + let lims = EventLimits::unlimited(); // First create some entries (3?) let mut e1: Entry = Entry::new(); e1.add_ava("userid", Value::from("william")); @@ -1542,7 +1573,7 @@ mod tests { // You need to now retrieve the entries back out to get the entry id's let mut results = be - .search(audit, unsafe { &filter_resolved!(f_pres("userid")) }) + .search(audit, &lims, unsafe { &filter_resolved!(f_pres("userid")) }) .expect("Failed to search"); // Get these out to usable entries. @@ -1597,6 +1628,7 @@ mod tests { fn test_be_simple_delete() { run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { ltrace!(audit, "Simple Delete"); + let lims = EventLimits::unlimited(); // First create some entries (3?) let mut e1: Entry = Entry::new(); @@ -1622,7 +1654,7 @@ mod tests { // You need to now retrieve the entries back out to get the entry id's let mut results = be - .search(audit, unsafe { &filter_resolved!(f_pres("userid")) }) + .search(audit, &lims, unsafe { &filter_resolved!(f_pres("userid")) }) .expect("Failed to search"); // Get these out to usable entries. @@ -2458,4 +2490,155 @@ mod tests { } }) } + + #[test] + fn test_be_limits_allids() { + run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { + let mut lim_allow_allids = EventLimits::unlimited(); + lim_allow_allids.unindexed_allow = true; + + let mut lim_deny_allids = EventLimits::unlimited(); + lim_deny_allids.unindexed_allow = false; + + let mut e: Entry = Entry::new(); + e.add_ava("userid", Value::from("william")); + e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); + e.add_ava("nonexist", Value::from("x")); + let e = unsafe { e.into_sealed_new() }; + let single_result = be.create(audit, vec![e.clone()]); + + assert!(single_result.is_ok()); + let filt = unsafe { + e.filter_from_attrs(&vec![String::from("nonexist")]) + .expect("failed to generate filter") + .into_valid_resolved() + }; + // check allow on allids + let res = be.search(audit, &lim_allow_allids, &filt); + assert!(res.is_ok()); + let res = be.exists(audit, &lim_allow_allids, &filt); + assert!(res.is_ok()); + + // check deny on allids + let res = be.search(audit, &lim_deny_allids, &filt); + assert!(res == Err(OperationError::ResourceLimit)); + let res = be.exists(audit, &lim_deny_allids, &filt); + assert!(res == Err(OperationError::ResourceLimit)); + }) + } + + #[test] + fn test_be_limits_results_max() { + run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { + let mut lim_allow = EventLimits::unlimited(); + lim_allow.search_max_results = usize::MAX; + + let mut lim_deny = EventLimits::unlimited(); + lim_deny.search_max_results = 0; + + let mut e: Entry = Entry::new(); + e.add_ava("userid", Value::from("william")); + e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); + e.add_ava("nonexist", Value::from("x")); + let e = unsafe { e.into_sealed_new() }; + let single_result = be.create(audit, vec![e.clone()]); + assert!(single_result.is_ok()); + + let filt = unsafe { + e.filter_from_attrs(&vec![String::from("nonexist")]) + .expect("failed to generate filter") + .into_valid_resolved() + }; + + // --> This is the all ids path (unindexed) + // check allow on entry max + let res = be.search(audit, &lim_allow, &filt); + assert!(res.is_ok()); + let res = be.exists(audit, &lim_allow, &filt); + assert!(res.is_ok()); + + // check deny on entry max + let res = be.search(audit, &lim_deny, &filt); + assert!(res == Err(OperationError::ResourceLimit)); + // we don't limit on exists because we never load the entries. + let res = be.exists(audit, &lim_deny, &filt); + assert!(res.is_ok()); + + // --> This will shortcut due to indexing. + assert!(be.reindex(audit).is_ok()); + let res = be.search(audit, &lim_deny, &filt); + assert!(res == Err(OperationError::ResourceLimit)); + // we don't limit on exists because we never load the entries. + let res = be.exists(audit, &lim_deny, &filt); + assert!(res.is_ok()); + }) + } + + #[test] + fn test_be_limits_partial_filter() { + run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { + // This relies on how we do partials, so it could be a bit sensitive. + // A partial is generated after an allids + indexed in a single and + // as we require both conditions to exist. Allids comes from unindexed + // terms. we need to ensure we don't hit partial threshold too. + // + // This means we need an and query where the first term is allids + // and the second is indexed, but without the filter shortcutting. + // + // To achieve this we need a monstrously evil query. + // + let mut lim_allow = EventLimits::unlimited(); + lim_allow.search_max_filter_test = usize::MAX; + + let mut lim_deny = EventLimits::unlimited(); + lim_deny.search_max_filter_test = 0; + + let mut e: Entry = Entry::new(); + e.add_ava("name", Value::from("william")); + e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); + e.add_ava("nonexist", Value::from("x")); + e.add_ava("nonexist", Value::from("y")); + let e = unsafe { e.into_sealed_new() }; + let single_result = be.create(audit, vec![e.clone()]); + assert!(single_result.is_ok()); + + // Reindex so we have things in place for our query + assert!(be.reindex(audit).is_ok()); + + // 🚨 This is evil! + // The and allows us to hit "allids + indexed -> partial". + // the or terms prevent re-arrangement. They can't be folded or dead + // term elimed either. + // + // This means the f_or nonexist will become allids and the second will be indexed + // due to f_eq userid in both with the result of william. + // + // This creates a partial, and because it's the first iteration in the loop, this + // doesn't encounter partial threshold testing. + let filt = unsafe { + filter_resolved!(f_and!([ + f_or!([ + f_eq("nonexist", PartialValue::new_utf8s("x")), + f_eq("nonexist", PartialValue::new_utf8s("y")) + ]), + f_or!([ + f_eq("name", PartialValue::new_utf8s("claire")), + f_eq("name", PartialValue::new_utf8s("william")) + ]), + ])) + }; + + let res = be.search(audit, &lim_allow, &filt); + assert!(res.is_ok()); + let res = be.exists(audit, &lim_allow, &filt); + assert!(res.is_ok()); + + // check deny on entry max + let res = be.search(audit, &lim_deny, &filt); + assert!(res == Err(OperationError::ResourceLimit)); + // we don't limit on exists because we never load the entries. + let res = be.exists(audit, &lim_deny, &filt); + assert!(res == Err(OperationError::ResourceLimit)); + }) + } } diff --git a/kanidmd/src/lib/event.rs b/kanidmd/src/lib/event.rs index d172090e6..fa766ce3f 100644 --- a/kanidmd/src/lib/event.rs +++ b/kanidmd/src/lib/event.rs @@ -24,6 +24,7 @@ use crate::actors::v1_write::{CreateMessage, DeleteMessage, ModifyMessage}; // use crate::schema::SchemaTransaction; use actix::prelude::*; +use ldap3_server::simple::LdapFilter; use std::collections::BTreeSet; use uuid::Uuid; @@ -94,11 +95,46 @@ pub enum EventOrigin { // Replication, } +#[derive(Debug, Clone)] +/// Limits on the resources a single event can consume. These are defined per-event +/// as they are derived from the userAuthToken based on that individual session +pub struct EventLimits { + pub unindexed_allow: bool, + pub search_max_results: usize, + pub search_max_filter_test: usize, + pub filter_max_elements: usize, + // pub write_max_entries: usize, + // pub write_max_rate: usize, + // pub network_max_request: usize, +} + +impl EventLimits { + pub fn unlimited() -> Self { + EventLimits { + unindexed_allow: true, + search_max_results: usize::MAX, + search_max_filter_test: usize::MAX, + filter_max_elements: usize::MAX, + } + } + + // From a userauthtoken + pub fn from_uat(uat: &UserAuthToken) -> Self { + EventLimits { + unindexed_allow: uat.lim_uidx, + search_max_results: uat.lim_rmax, + search_max_filter_test: uat.lim_pmax, + filter_max_elements: uat.lim_fmax, + } + } +} + #[derive(Debug, Clone)] pub struct Event { // The event's initiator aka origin source. // This importantly, is used for access control! pub origin: EventOrigin, + pub(crate) limits: EventLimits, } impl std::fmt::Display for Event { @@ -119,25 +155,10 @@ impl std::fmt::Display for Event { } impl Event { - pub fn from_ro_request( - audit: &mut AuditScope, - qs: &QueryServerReadTransaction, - user_uuid: &Uuid, - ) -> Result { - qs.internal_search_uuid(audit, &user_uuid) - .map(|e| Event { - origin: EventOrigin::User(e), - }) - .map_err(|e| { - ladmin_error!(audit, "from_ro_request failed {:?}", e); - e - }) - } - pub fn from_ro_uat( audit: &mut AuditScope, qs: &QueryServerReadTransaction, - uat: Option, + uat: Option<&UserAuthToken>, ) -> Result { ltrace!(audit, "from_ro_uat -> {:?}", uat); let uat = uat.ok_or(OperationError::NotAuthenticated)?; @@ -153,15 +174,17 @@ impl Event { // TODO #64: Now apply claims from the uat into the Entry // to allow filtering. + let limits = EventLimits::from_uat(uat); Ok(Event { origin: EventOrigin::User(e), + limits, }) } pub fn from_rw_uat( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, ) -> Result { ltrace!(audit, "from_rw_uat -> {:?}", uat); let uat = uat.ok_or(OperationError::NotAuthenticated)?; @@ -177,37 +200,17 @@ impl Event { // TODO #64: Now apply claims from the uat into the Entry // to allow filtering. + let limits = EventLimits::from_uat(uat); Ok(Event { origin: EventOrigin::User(e), - }) - } - - pub fn from_rw_request( - audit: &mut AuditScope, - qs: &QueryServerWriteTransaction, - user_uuid: &str, - ) -> Result { - // Do we need to check or load the entry from the user_uuid? - // In the future, probably yes. - // - // For now, no. - let u = Uuid::parse_str(user_uuid).map_err(|_| { - ladmin_error!(audit, "from_ro_request invalid uat uuid"); - OperationError::InvalidUuid - })?; - let e = qs.internal_search_uuid(audit, &u).map_err(|e| { - ladmin_error!(audit, "from_rw_request failed {:?}", e); - e - })?; - - Ok(Event { - origin: EventOrigin::User(e), + limits, }) } pub fn from_internal() -> Self { Event { origin: EventOrigin::Internal, + limits: EventLimits::unlimited(), } } @@ -215,6 +218,7 @@ impl Event { pub fn from_impersonate_entry(e: Entry) -> Self { Event { origin: EventOrigin::User(e), + limits: EventLimits::unlimited(), } } @@ -263,25 +267,26 @@ impl SearchEvent { msg: SearchMessage, qs: &QueryServerReadTransaction, ) -> Result { - match Filter::from_ro(audit, &msg.req.filter, qs) { - Ok(f) => Ok(SearchEvent { - event: Event::from_ro_uat(audit, qs, msg.uat)?, - // We do need to do this twice to account for the ignore_hidden - // changes. - filter: f - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: f - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - // We can't get this from the SearchMessage because it's annoying with the - // current macro design. - attrs: None, - }), - Err(e) => Err(e), - } + let event = Event::from_ro_uat(audit, qs, msg.uat.as_ref())?; + let f = Filter::from_ro(audit, &event, &msg.req.filter, qs)?; + // We do need to do this twice to account for the ignore_hidden + // changes. + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(SearchEvent { + event, + filter, + filter_orig, + // We can't get this from the SearchMessage because it's annoying with the + // current macro design. + attrs: None, + }) } pub fn from_internal_message( @@ -302,23 +307,28 @@ impl SearchEvent { } } + let event = Event::from_ro_uat(audit, qs, msg.uat.as_ref())?; + + let filter = msg + .filter + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(|e| { + lrequest_error!(audit, "filter schema violation -> {:?}", e); + OperationError::SchemaViolation(e) + })?; + let filter_orig = msg.filter.validate(qs.get_schema()).map_err(|e| { + lrequest_error!(audit, "filter_orig schema violation -> {:?}", e); + OperationError::SchemaViolation(e) + })?; + Ok(SearchEvent { - event: Event::from_ro_uat(audit, qs, msg.uat)?, + event, // We do need to do this twice to account for the ignore_hidden // changes. - filter: msg - .filter - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(|e| { - lrequest_error!(audit, "filter schema violation -> {:?}", e); - OperationError::SchemaViolation(e) - })?, - filter_orig: msg.filter.validate(qs.get_schema()).map_err(|e| { - lrequest_error!(audit, "filter_orig schema violation -> {:?}", e); - OperationError::SchemaViolation(e) - })?, + filter, + filter_orig, attrs: r_attrs, }) } @@ -340,54 +350,65 @@ impl SearchEvent { } } + let event = Event::from_ro_uat(audit, qs, msg.uat.as_ref())?; + let filter = msg + .filter + .clone() + .into_recycled() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = msg + .filter + .into_recycled() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(SearchEvent { - event: Event::from_ro_uat(audit, qs, msg.uat)?, - filter: msg - .filter - .clone() - .into_recycled() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: msg - .filter - .into_recycled() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, attrs: r_attrs, }) } pub fn from_whoami_request( audit: &mut AuditScope, - uat: Option, + uat: Option<&UserAuthToken>, qs: &QueryServerReadTransaction, ) -> Result { + let event = Event::from_ro_uat(audit, qs, uat)?; + let filter = filter!(f_self()) + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = filter_all!(f_self()) + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(SearchEvent { - event: Event::from_ro_uat(audit, qs, uat)?, - filter: filter!(f_self()) - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: filter_all!(f_self()) - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, attrs: None, }) } pub fn from_target_uuid_request( audit: &mut AuditScope, - uat: Option, + uat: Option<&UserAuthToken>, target_uuid: Uuid, qs: &QueryServerReadTransaction, ) -> Result { + let event = Event::from_ro_uat(audit, qs, uat)?; + let filter = filter!(f_eq("uuid", PartialValue::new_uuid(target_uuid))) + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid))) + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; Ok(SearchEvent { - event: Event::from_ro_uat(audit, qs, uat)?, - filter: filter!(f_eq("uuid", PartialValue::new_uuid(target_uuid))) - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid))) - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, attrs: None, }) } @@ -460,20 +481,25 @@ impl SearchEvent { pub(crate) fn new_ext_impersonate_uuid( audit: &mut AuditScope, qs: &QueryServerReadTransaction, - euuid: &Uuid, - filter: &Filter, + euat: &UserAuthToken, + lf: &LdapFilter, attrs: Option>, ) -> Result { + let event = Event::from_ro_uat(audit, qs, Some(euat))?; + // Kanidm Filter from LdapFilter + let f = Filter::from_ldap_ro(audit, &event, &lf, qs)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; Ok(SearchEvent { - event: Event::from_ro_request(audit, qs, euuid)?, - filter: filter - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: filter - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, attrs, }) } @@ -496,6 +522,10 @@ impl SearchEvent { attrs: None, } } + + pub(crate) fn get_limits(&self) -> &EventLimits { + &self.event.limits + } } // Represents the decoded entries from the protocol -> internal entry representation @@ -528,7 +558,7 @@ impl CreateEvent { // From ProtoEntry -> Entry // What is the correct consuming iterator here? Can we // even do that? - event: Event::from_rw_uat(audit, qs, msg.uat)?, + event: Event::from_rw_uat(audit, qs, msg.uat.as_ref())?, entries, }), Err(e) => Err(e), @@ -582,6 +612,10 @@ impl ExistsEvent { filter_orig: filter.into_valid(), } } + + pub(crate) fn get_limits(&self) -> &EventLimits { + &self.event.limits + } } #[derive(Debug)] @@ -599,38 +633,42 @@ impl DeleteEvent { msg: DeleteMessage, qs: &QueryServerWriteTransaction, ) -> Result { - match Filter::from_rw(audit, &msg.req.filter, qs) { - Ok(f) => Ok(DeleteEvent { - event: Event::from_rw_uat(audit, qs, msg.uat)?, - filter: f - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: f - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - }), - Err(e) => Err(e), - } + let event = Event::from_rw_uat(audit, qs, msg.uat.as_ref())?; + let f = Filter::from_rw(audit, &event, &msg.req.filter, qs)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(DeleteEvent { + event, + filter, + filter_orig, + }) } pub fn from_parts( audit: &mut AuditScope, - uat: Option, - filter: &Filter, + uat: Option<&UserAuthToken>, + f: &Filter, qs: &QueryServerWriteTransaction, ) -> Result { + let event = Event::from_rw_uat(audit, qs, uat)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; Ok(DeleteEvent { - event: Event::from_rw_uat(audit, qs, uat)?, - filter: filter - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: filter - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, }) } @@ -689,32 +727,31 @@ impl ModifyEvent { msg: ModifyMessage, qs: &QueryServerWriteTransaction, ) -> Result { - match Filter::from_rw(audit, &msg.req.filter, qs) { - Ok(f) => match ModifyList::from(audit, &msg.req.modlist, qs) { - Ok(m) => Ok(ModifyEvent { - event: Event::from_rw_uat(audit, qs, msg.uat)?, - filter: f - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: f - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - modlist: m - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - }), - Err(e) => Err(e), - }, - - Err(e) => Err(e), - } + let event = Event::from_rw_uat(audit, qs, msg.uat.as_ref())?; + let f = Filter::from_rw(audit, &event, &msg.req.filter, qs)?; + let m = ModifyList::from(audit, &msg.req.modlist, qs)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let modlist = m + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(ModifyEvent { + event, + filter, + filter_orig, + modlist, + }) } pub fn from_parts( audit: &mut AuditScope, - uat: Option, + uat: Option<&UserAuthToken>, target_uuid: Uuid, proto_ml: &ProtoModifyList, filter: Filter, @@ -724,28 +761,31 @@ impl ModifyEvent { // Add any supplemental conditions we have. let f = Filter::join_parts_and(f_uuid, filter); - match ModifyList::from(audit, &proto_ml, qs) { - Ok(m) => Ok(ModifyEvent { - event: Event::from_rw_uat(audit, qs, uat)?, - filter: f - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: f - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - modlist: m - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - }), - Err(e) => Err(e), - } + let m = ModifyList::from(audit, &proto_ml, qs)?; + let event = Event::from_rw_uat(audit, qs, uat)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let modlist = m + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + + Ok(ModifyEvent { + event, + filter, + filter_orig, + modlist, + }) } pub fn from_internal_parts( audit: &mut AuditScope, - uat: Option, + uat: Option<&UserAuthToken>, target_uuid: Uuid, ml: &ModifyList, filter: Filter, @@ -755,25 +795,30 @@ impl ModifyEvent { // Add any supplemental conditions we have. let f = Filter::join_parts_and(f_uuid, filter); + let event = Event::from_rw_uat(audit, qs, uat)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let modlist = ml + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(ModifyEvent { - event: Event::from_rw_uat(audit, qs, uat)?, - filter: f - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: f - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - modlist: ml - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, + modlist, }) } pub fn from_target_uuid_attr_purge( audit: &mut AuditScope, - uat: Option, + uat: Option<&UserAuthToken>, target_uuid: Uuid, attr: &str, filter: Filter, @@ -783,19 +828,24 @@ impl ModifyEvent { let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid))); // Add any supplemental conditions we have. let f = Filter::join_parts_and(f_uuid, filter); + + let event = Event::from_rw_uat(audit, qs, uat)?; + let filter = f + .clone() + .into_ignore_hidden() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let filter_orig = f + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let modlist = ml + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; Ok(ModifyEvent { - event: Event::from_rw_uat(audit, qs, uat)?, - filter: f - .clone() - .into_ignore_hidden() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - filter_orig: f - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - modlist: ml - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, + event, + filter, + filter_orig, + modlist, }) } @@ -1085,17 +1135,16 @@ impl Message for ReviveRecycledEvent { impl ReviveRecycledEvent { pub fn from_parts( audit: &mut AuditScope, - uat: Option, + uat: Option<&UserAuthToken>, filter: Filter, qs: &QueryServerWriteTransaction, ) -> Result { - Ok(ReviveRecycledEvent { - event: Event::from_rw_uat(audit, qs, uat)?, - filter: filter - .into_recycled() - .validate(qs.get_schema()) - .map_err(OperationError::SchemaViolation)?, - }) + let event = Event::from_rw_uat(audit, qs, uat)?; + let filter = filter + .into_recycled() + .validate(qs.get_schema()) + .map_err(OperationError::SchemaViolation)?; + Ok(ReviveRecycledEvent { event, filter }) } #[cfg(test)] diff --git a/kanidmd/src/lib/filter.rs b/kanidmd/src/lib/filter.rs index 93da40b22..8cbebed25 100644 --- a/kanidmd/src/lib/filter.rs +++ b/kanidmd/src/lib/filter.rs @@ -27,6 +27,8 @@ use std::iter; use uuid::Uuid; +const FILTER_DEPTH_MAX: usize = 16; + // Default filter is safe, ignores all hidden types! // This is &Value so we can lazy const then clone, but perhaps we can reconsider @@ -434,13 +436,16 @@ impl Filter { // takes "clone_value(t, a, v) instead, but that may have a similar issue. pub fn from_ro( audit: &mut AuditScope, + ev: &Event, f: &ProtoFilter, qs: &QueryServerReadTransaction, ) -> Result { lperf_trace_segment!(audit, "filter::from_ro", || { + let depth = FILTER_DEPTH_MAX; + let mut elems = ev.limits.filter_max_elements; Ok(Filter { state: FilterInvalid { - inner: FilterComp::from_ro(audit, f, qs)?, + inner: FilterComp::from_ro(audit, f, qs, depth, &mut elems)?, }, }) }) @@ -448,13 +453,16 @@ impl Filter { pub fn from_rw( audit: &mut AuditScope, + ev: &Event, f: &ProtoFilter, qs: &QueryServerWriteTransaction, ) -> Result { lperf_trace_segment!(audit, "filter::from_rw", || { + let depth = FILTER_DEPTH_MAX; + let mut elems = ev.limits.filter_max_elements; Ok(Filter { state: FilterInvalid { - inner: FilterComp::from_rw(audit, f, qs)?, + inner: FilterComp::from_rw(audit, f, qs, depth, &mut elems)?, }, }) }) @@ -462,13 +470,16 @@ impl Filter { pub fn from_ldap_ro( audit: &mut AuditScope, + ev: &Event, f: &LdapFilter, qs: &QueryServerReadTransaction, ) -> Result { lperf_trace_segment!(audit, "filter::from_ldap_ro", || { + let depth = FILTER_DEPTH_MAX; + let mut elems = ev.limits.filter_max_elements; Ok(Filter { state: FilterInvalid { - inner: FilterComp::from_ldap_ro(audit, f, qs)?, + inner: FilterComp::from_ldap_ro(audit, f, qs, depth, &mut elems)?, }, }) }) @@ -656,7 +667,10 @@ impl FilterComp { audit: &mut AuditScope, f: &ProtoFilter, qs: &QueryServerReadTransaction, + depth: usize, + elems: &mut usize, ) -> Result { + let ndepth = depth.checked_sub(1).ok_or(OperationError::ResourceLimit)?; Ok(match f { ProtoFilter::Eq(a, v) => { let nk = qs.get_schema().normalise_attr_name(a); @@ -672,17 +686,32 @@ impl FilterComp { let nk = qs.get_schema().normalise_attr_name(a); FilterComp::Pres(nk) } - ProtoFilter::Or(l) => FilterComp::Or( - l.iter() - .map(|f| Self::from_ro(audit, f, qs)) - .collect::, _>>()?, - ), - ProtoFilter::And(l) => FilterComp::And( - l.iter() - .map(|f| Self::from_ro(audit, f, qs)) - .collect::, _>>()?, - ), - ProtoFilter::AndNot(l) => FilterComp::AndNot(Box::new(Self::from_ro(audit, l, qs)?)), + ProtoFilter::Or(l) => { + *elems = (*elems) + .checked_sub(l.len()) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::Or( + l.iter() + .map(|f| Self::from_ro(audit, f, qs, ndepth, elems)) + .collect::, _>>()?, + ) + } + ProtoFilter::And(l) => { + *elems = (*elems) + .checked_sub(l.len()) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::And( + l.iter() + .map(|f| Self::from_ro(audit, f, qs, ndepth, elems)) + .collect::, _>>()?, + ) + } + ProtoFilter::AndNot(l) => { + *elems = (*elems) + .checked_sub(1) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::AndNot(Box::new(Self::from_ro(audit, l, qs, ndepth, elems)?)) + } ProtoFilter::SelfUUID => FilterComp::SelfUUID, }) } @@ -691,7 +720,10 @@ impl FilterComp { audit: &mut AuditScope, f: &ProtoFilter, qs: &QueryServerWriteTransaction, + depth: usize, + elems: &mut usize, ) -> Result { + let ndepth = depth.checked_sub(1).ok_or(OperationError::ResourceLimit)?; Ok(match f { ProtoFilter::Eq(a, v) => { let nk = qs.get_schema().normalise_attr_name(a); @@ -707,17 +739,33 @@ impl FilterComp { let nk = qs.get_schema().normalise_attr_name(a); FilterComp::Pres(nk) } - ProtoFilter::Or(l) => FilterComp::Or( - l.iter() - .map(|f| Self::from_rw(audit, f, qs)) - .collect::, _>>()?, - ), - ProtoFilter::And(l) => FilterComp::And( - l.iter() - .map(|f| Self::from_rw(audit, f, qs)) - .collect::, _>>()?, - ), - ProtoFilter::AndNot(l) => FilterComp::AndNot(Box::new(Self::from_rw(audit, l, qs)?)), + ProtoFilter::Or(l) => { + *elems = (*elems) + .checked_sub(l.len()) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::Or( + l.iter() + .map(|f| Self::from_rw(audit, f, qs, ndepth, elems)) + .collect::, _>>()?, + ) + } + ProtoFilter::And(l) => { + *elems = (*elems) + .checked_sub(l.len()) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::And( + l.iter() + .map(|f| Self::from_rw(audit, f, qs, ndepth, elems)) + .collect::, _>>()?, + ) + } + ProtoFilter::AndNot(l) => { + *elems = (*elems) + .checked_sub(1) + .ok_or(OperationError::ResourceLimit)?; + + FilterComp::AndNot(Box::new(Self::from_rw(audit, l, qs, ndepth, elems)?)) + } ProtoFilter::SelfUUID => FilterComp::SelfUUID, }) } @@ -726,19 +774,38 @@ impl FilterComp { audit: &mut AuditScope, f: &LdapFilter, qs: &QueryServerReadTransaction, + depth: usize, + elems: &mut usize, ) -> Result { + let ndepth = depth.checked_sub(1).ok_or(OperationError::ResourceLimit)?; Ok(match f { - LdapFilter::And(l) => FilterComp::And( - l.iter() - .map(|f| Self::from_ldap_ro(audit, f, qs)) - .collect::, _>>()?, - ), - LdapFilter::Or(l) => FilterComp::Or( - l.iter() - .map(|f| Self::from_ldap_ro(audit, f, qs)) - .collect::, _>>()?, - ), - LdapFilter::Not(l) => FilterComp::AndNot(Box::new(Self::from_ldap_ro(audit, l, qs)?)), + LdapFilter::And(l) => { + *elems = (*elems) + .checked_sub(l.len()) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::And( + l.iter() + .map(|f| Self::from_ldap_ro(audit, f, qs, ndepth, elems)) + .collect::, _>>()?, + ) + } + LdapFilter::Or(l) => { + *elems = (*elems) + .checked_sub(l.len()) + .ok_or(OperationError::ResourceLimit)?; + + FilterComp::Or( + l.iter() + .map(|f| Self::from_ldap_ro(audit, f, qs, ndepth, elems)) + .collect::, _>>()?, + ) + } + LdapFilter::Not(l) => { + *elems = (*elems) + .checked_sub(1) + .ok_or(OperationError::ResourceLimit)?; + FilterComp::AndNot(Box::new(Self::from_ldap_ro(audit, l, qs, ndepth, elems)?)) + } LdapFilter::Equality(a, v) => { let a = ldap_attr_filter_map(a); let v = qs.clone_partialvalue(audit, a.as_str(), v)?; @@ -1126,12 +1193,18 @@ impl FilterResolved { #[cfg(test)] mod tests { - use crate::entry::{Entry, EntryNew, EntrySealed}; - use crate::filter::{Filter, FilterInvalid}; - use crate::value::PartialValue; + use crate::entry::{Entry, EntryInit, EntryNew, EntrySealed}; + use crate::event::{CreateEvent, Event}; + use crate::filter::{Filter, FilterInvalid, FILTER_DEPTH_MAX}; + use crate::server::QueryServerTransaction; + use crate::value::{PartialValue, Value}; use std::cmp::{Ordering, PartialOrd}; use std::collections::BTreeSet; + use kanidm_proto::v1::Filter as ProtoFilter; + use kanidm_proto::v1::OperationError; + use ldap3_server::simple::LdapFilter; + #[test] fn test_filter_simple() { // Test construction. @@ -1586,4 +1659,147 @@ mod tests { assert!(f_t2a.get_attr_set() == f_expect); } + + #[test] + fn test_filter_resolve_value() { + run_test!(|server: &QueryServer, audit: &mut AuditScope| { + let server_txn = server.write(duration_from_epoch_now()); + let e1: Entry = Entry::unsafe_from_entry_str( + r#"{ + "attrs": { + "class": ["object", "person", "account"], + "name": ["testperson1"], + "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"], + "description": ["testperson"], + "displayname": ["testperson1"] + } + }"#, + ); + let e2: Entry = Entry::unsafe_from_entry_str( + r#"{ + "attrs": { + "class": ["object", "person"], + "name": ["testperson2"], + "uuid": ["a67c0c71-0b35-4218-a6b0-22d23d131d27"], + "description": ["testperson"], + "displayname": ["testperson2"] + } + }"#, + ); + let e_ts: Entry = Entry::unsafe_from_entry_str( + r#"{ + "attrs": { + "class": ["tombstone", "object"], + "uuid": ["9557f49c-97a5-4277-a9a5-097d17eb8317"] + } + }"#, + ); + let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]); + let cr = server_txn.create(audit, &ce); + assert!(cr.is_ok()); + + // Resolving most times should yield expected results + let t1 = Value::new_utf8s("teststring"); + let r1 = server_txn.resolve_value(audit, &t1); + assert!(r1 == Ok("teststring".to_string())); + + // Resolve UUID with matching spn + let t_uuid = Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(); + let r_uuid = server_txn.resolve_value(audit, &t_uuid); + debug!("{:?}", r_uuid); + assert!(r_uuid == Ok("testperson1@example.com".to_string())); + + // Resolve UUID with matching name + let t_uuid = Value::new_refer_s("a67c0c71-0b35-4218-a6b0-22d23d131d27").unwrap(); + let r_uuid = server_txn.resolve_value(audit, &t_uuid); + debug!("{:?}", r_uuid); + assert!(r_uuid == Ok("testperson2".to_string())); + + // Resolve UUID non-exist + let t_uuid_non = Value::new_refer_s("b83e98f0-3d2e-41d2-9796-d8d993289c86").unwrap(); + let r_uuid_non = server_txn.resolve_value(audit, &t_uuid_non); + debug!("{:?}", r_uuid_non); + assert!(r_uuid_non == Ok("b83e98f0-3d2e-41d2-9796-d8d993289c86".to_string())); + + // Resolve UUID to tombstone/recycled (same an non-exst) + let t_uuid_ts = Value::new_refer_s("9557f49c-97a5-4277-a9a5-097d17eb8317").unwrap(); + let r_uuid_ts = server_txn.resolve_value(audit, &t_uuid_ts); + debug!("{:?}", r_uuid_ts); + assert!(r_uuid_ts == Ok("9557f49c-97a5-4277-a9a5-097d17eb8317".to_string())); + }) + } + + #[test] + fn test_filter_depth_limits() { + run_test!(|server: &QueryServer, audit: &mut AuditScope| { + let r_txn = server.read(); + + let mut inv_proto = ProtoFilter::Pres("class".to_string()); + for _i in 0..(FILTER_DEPTH_MAX + 1) { + inv_proto = ProtoFilter::And(vec![inv_proto]); + } + + let mut inv_ldap = LdapFilter::Present("class".to_string()); + for _i in 0..(FILTER_DEPTH_MAX + 1) { + inv_ldap = LdapFilter::And(vec![inv_ldap]); + } + + let ev = Event::from_internal(); + + // Test proto + read + let res = Filter::from_ro(audit, &ev, &inv_proto, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); + + // ldap + let res = Filter::from_ldap_ro(audit, &ev, &inv_ldap, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); + + // Can only have one db conn at a time. + std::mem::drop(r_txn); + + // proto + write + let wr_txn = server.write(duration_from_epoch_now()); + let res = Filter::from_rw(audit, &ev, &inv_proto, &wr_txn); + assert!(res == Err(OperationError::ResourceLimit)); + }) + } + + #[test] + fn test_filter_max_element_limits() { + run_test!(|server: &QueryServer, audit: &mut AuditScope| { + const LIMIT: usize = 4; + let r_txn = server.read(); + + let inv_proto = ProtoFilter::And( + (0..(LIMIT * 2)) + .map(|_| ProtoFilter::Pres("class".to_string())) + .collect(), + ); + + let inv_ldap = LdapFilter::And( + (0..(LIMIT * 2)) + .map(|_| LdapFilter::Present("class".to_string())) + .collect(), + ); + + let mut ev = Event::from_internal(); + ev.limits.filter_max_elements = LIMIT; + + // Test proto + read + let res = Filter::from_ro(audit, &ev, &inv_proto, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); + + // ldap + let res = Filter::from_ldap_ro(audit, &ev, &inv_ldap, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); + + // Can only have one db conn at a time. + std::mem::drop(r_txn); + + // proto + write + let wr_txn = server.write(duration_from_epoch_now()); + let res = Filter::from_rw(audit, &ev, &inv_proto, &wr_txn); + assert!(res == Err(OperationError::ResourceLimit)); + }) + } } diff --git a/kanidmd/src/lib/idm/account.rs b/kanidmd/src/lib/idm/account.rs index 8245fa482..cb4f3ce0c 100644 --- a/kanidmd/src/lib/idm/account.rs +++ b/kanidmd/src/lib/idm/account.rs @@ -128,7 +128,6 @@ impl Account { // This could consume self? // The cred handler provided is what authenticated this user, so we can use it to // process what the proper claims should be. - // Get the claims from the cred_h Some(UserAuthToken { @@ -139,6 +138,11 @@ impl Account { // application: None, groups: self.groups.iter().map(|g| g.to_proto()).collect(), claims: claims.iter().map(|c| c.to_proto()).collect(), + // What's the best way to get access to these limits with regard to claims/other? + lim_uidx: false, + lim_rmax: 128, + lim_pmax: 256, + lim_fmax: 32, }) } diff --git a/kanidmd/src/lib/idm/event.rs b/kanidmd/src/lib/idm/event.rs index 0dbebf93f..de1e7b67a 100644 --- a/kanidmd/src/lib/idm/event.rs +++ b/kanidmd/src/lib/idm/event.rs @@ -29,7 +29,7 @@ impl PasswordChangeEvent { qs: &QueryServerWriteTransaction, msg: IdmAccountSetPasswordMessage, ) -> Result { - let e = Event::from_rw_uat(audit, qs, msg.uat)?; + let e = Event::from_rw_uat(audit, qs, msg.uat.as_ref())?; let u = *e.get_uuid().ok_or(OperationError::InvalidState)?; Ok(PasswordChangeEvent { @@ -43,7 +43,7 @@ impl PasswordChangeEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, cleartext: String, appid: Option, @@ -78,7 +78,7 @@ impl UnixPasswordChangeEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, cleartext: String, ) -> Result { @@ -103,7 +103,7 @@ impl GeneratePasswordEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, appid: Option, ) -> Result { @@ -127,7 +127,7 @@ impl RegenerateRadiusSecretEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, ) -> Result { let e = Event::from_rw_uat(audit, qs, uat)?; @@ -153,7 +153,7 @@ impl RadiusAuthTokenEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerReadTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, ) -> Result { let e = Event::from_ro_uat(audit, qs, uat)?; @@ -179,7 +179,7 @@ impl UnixUserTokenEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerReadTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, ) -> Result { let e = Event::from_ro_uat(audit, qs, uat)?; @@ -205,7 +205,7 @@ impl UnixGroupTokenEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerReadTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, ) -> Result { let e = Event::from_ro_uat(audit, qs, uat)?; @@ -249,7 +249,7 @@ impl UnixUserAuthEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerReadTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, cleartext: String, ) -> Result { @@ -274,7 +274,7 @@ impl GenerateTOTPEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, label: String, ) -> Result { @@ -311,7 +311,7 @@ impl VerifyTOTPEvent { pub fn from_parts( audit: &mut AuditScope, qs: &QueryServerWriteTransaction, - uat: Option, + uat: Option<&UserAuthToken>, target: Uuid, session: Uuid, chal: u32, diff --git a/kanidmd/src/lib/idm/server.rs b/kanidmd/src/lib/idm/server.rs index b0623fb29..994d677dc 100644 --- a/kanidmd/src/lib/idm/server.rs +++ b/kanidmd/src/lib/idm/server.rs @@ -272,10 +272,17 @@ impl<'a> IdmServerWriteTransaction<'a> { if lae.target == *UUID_ANONYMOUS { // TODO: #59 We should have checked if anonymous was locked by now! let account = Account::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?; + // Account must be anon, so we can gen the uat. Ok(Some(LdapBoundToken { - spn: account.spn, uuid: *UUID_ANONYMOUS, - effective_uuid: *UUID_ANONYMOUS, + effective_uat: account + .to_userauthtoken(&[]) + .ok_or(OperationError::InvalidState) + .map_err(|e| { + ladmin_error!(au, "Unable to generate effective_uat -> {:?}", e); + e + })?, + spn: account.spn, })) } else { let account = @@ -284,10 +291,26 @@ impl<'a> IdmServerWriteTransaction<'a> { .verify_unix_credential(au, lae.cleartext.as_str())? .is_some() { + // Get the anon uat + let anon_entry = self + .qs_read + .internal_search_uuid(au, &UUID_ANONYMOUS) + .map_err(|e| { + ladmin_error!(au, "Failed to find effective uat for auth ldap -> {:?}", e); + e + })?; + let anon_account = Account::try_from_entry_ro(au, &anon_entry, &mut self.qs_read)?; + Ok(Some(LdapBoundToken { spn: account.spn, uuid: account.uuid, - effective_uuid: *UUID_ANONYMOUS, + effective_uat: anon_account + .to_userauthtoken(&[]) + .ok_or(OperationError::InvalidState) + .map_err(|e| { + ladmin_error!(au, "Unable to generate effective_uat -> {:?}", e); + e + })?, })) } else { Ok(None) diff --git a/kanidmd/src/lib/ldap.rs b/kanidmd/src/lib/ldap.rs index e00c17bb4..32dde37f5 100644 --- a/kanidmd/src/lib/ldap.rs +++ b/kanidmd/src/lib/ldap.rs @@ -1,11 +1,10 @@ use crate::audit::AuditScope; use crate::constants::{STR_UUID_DOMAIN_INFO, UUID_ANONYMOUS, UUID_DOMAIN_INFO}; use crate::event::SearchEvent; -use crate::filter::Filter; use crate::idm::event::LdapAuthEvent; use crate::idm::server::IdmServer; use crate::server::QueryServerTransaction; -use kanidm_proto::v1::OperationError; +use kanidm_proto::v1::{OperationError, UserAuthToken}; use ldap3_server::simple::*; use std::collections::BTreeSet; use std::iter; @@ -31,7 +30,7 @@ pub struct LdapBoundToken { pub spn: String, pub uuid: Uuid, // For now, always anonymous - pub effective_uuid: Uuid, + pub effective_uat: UserAuthToken, } pub struct LdapServer { @@ -236,13 +235,6 @@ impl LdapServer { ladmin_info!(au, "LDAP Search Filter -> {:?}", lfilter); - // Kanidm Filter from LdapFilter - let filter = - Filter::from_ldap_ro(au, &lfilter, &idm_read.qs_read).map_err(|e| { - lrequest_error!(au, "invalid ldap filter {:?}", e); - e - })?; - // Build the event, with the permissions from effective_uuid // (should always be anonymous at the moment) // ! Remember, searchEvent wraps to ignore hidden for us. @@ -250,10 +242,14 @@ impl LdapServer { SearchEvent::new_ext_impersonate_uuid( au, &idm_read.qs_read, - &uat.effective_uuid, - &filter, + &uat.effective_uat, + &lfilter, attrs, ) + }) + .map_err(|e| { + ladmin_error!(au, "failed to create search event -> {:?}", e); + e })?; let res = idm_read.qs_read.search_ext(au, &se).map_err(|e| { diff --git a/kanidmd/src/lib/lib.rs b/kanidmd/src/lib/lib.rs index 5041a76b0..5668de513 100644 --- a/kanidmd/src/lib/lib.rs +++ b/kanidmd/src/lib/lib.rs @@ -1,4 +1,4 @@ -#![deny(warnings)] +// #![deny(warnings)] #![warn(unused_extern_crates)] #![deny(clippy::unwrap_used)] #![deny(clippy::expect_used)] diff --git a/kanidmd/src/lib/server.rs b/kanidmd/src/lib/server.rs index 72a3ff271..f0221458d 100644 --- a/kanidmd/src/lib/server.rs +++ b/kanidmd/src/lib/server.rs @@ -145,14 +145,20 @@ pub trait QueryServerTransaction { e })?; + let lims = se.get_limits(); + // NOTE: We currently can't build search plugins due to the inability to hand // the QS wr/ro to the plugin trait. However, there shouldn't be a need for search // plugis, because all data transforms should be in the write path. - let res = self.get_be_txn().search(au, &vfr).map(|r| r).map_err(|e| { - ladmin_error!(au, "backend failure -> {:?}", e); - OperationError::Backend - })?; + let res = self + .get_be_txn() + .search(au, lims, &vfr) + .map(|r| r) + .map_err(|e| { + ladmin_error!(au, "backend failure -> {:?}", e); + OperationError::Backend + })?; // Apply ACP before we let the plugins "have at it". // WARNING; for external searches this is NOT the only @@ -176,7 +182,9 @@ pub trait QueryServerTransaction { e })?; - self.get_be_txn().exists(au, &vfr).map_err(|e| { + let lims = ee.get_limits(); + + self.get_be_txn().exists(au, &lims, &vfr).map_err(|e| { ladmin_error!(au, "backend failure -> {:?}", e); OperationError::Backend }) @@ -3251,75 +3259,6 @@ mod tests { }) } - #[test] - fn test_qs_resolve_value() { - run_test!(|server: &QueryServer, audit: &mut AuditScope| { - let server_txn = server.write(duration_from_epoch_now()); - let e1: Entry = Entry::unsafe_from_entry_str( - r#"{ - "attrs": { - "class": ["object", "person", "account"], - "name": ["testperson1"], - "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"], - "description": ["testperson"], - "displayname": ["testperson1"] - } - }"#, - ); - let e2: Entry = Entry::unsafe_from_entry_str( - r#"{ - "attrs": { - "class": ["object", "person"], - "name": ["testperson2"], - "uuid": ["a67c0c71-0b35-4218-a6b0-22d23d131d27"], - "description": ["testperson"], - "displayname": ["testperson2"] - } - }"#, - ); - let e_ts: Entry = Entry::unsafe_from_entry_str( - r#"{ - "attrs": { - "class": ["tombstone", "object"], - "uuid": ["9557f49c-97a5-4277-a9a5-097d17eb8317"] - } - }"#, - ); - let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]); - let cr = server_txn.create(audit, &ce); - assert!(cr.is_ok()); - - // Resolving most times should yield expected results - let t1 = Value::new_utf8s("teststring"); - let r1 = server_txn.resolve_value(audit, &t1); - assert!(r1 == Ok("teststring".to_string())); - - // Resolve UUID with matching spn - let t_uuid = Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(); - let r_uuid = server_txn.resolve_value(audit, &t_uuid); - debug!("{:?}", r_uuid); - assert!(r_uuid == Ok("testperson1@example.com".to_string())); - - // Resolve UUID with matching name - let t_uuid = Value::new_refer_s("a67c0c71-0b35-4218-a6b0-22d23d131d27").unwrap(); - let r_uuid = server_txn.resolve_value(audit, &t_uuid); - debug!("{:?}", r_uuid); - assert!(r_uuid == Ok("testperson2".to_string())); - - // Resolve UUID non-exist - let t_uuid_non = Value::new_refer_s("b83e98f0-3d2e-41d2-9796-d8d993289c86").unwrap(); - let r_uuid_non = server_txn.resolve_value(audit, &t_uuid_non); - debug!("{:?}", r_uuid_non); - assert!(r_uuid_non == Ok("b83e98f0-3d2e-41d2-9796-d8d993289c86".to_string())); - - // Resolve UUID to tombstone/recycled (same an non-exst) - let t_uuid_ts = Value::new_refer_s("9557f49c-97a5-4277-a9a5-097d17eb8317").unwrap(); - let r_uuid_ts = server_txn.resolve_value(audit, &t_uuid_ts); - debug!("{:?}", r_uuid_ts); - assert!(r_uuid_ts == Ok("9557f49c-97a5-4277-a9a5-097d17eb8317".to_string())); - }) - } - #[test] fn test_qs_dynamic_schema_class() { run_test!(|server: &QueryServer, audit: &mut AuditScope| {