67 resource limits impl (#307)

Fixes #67 and #237 - this add's resource limits to all server accounts by default. These limits bound the size of search sets, how many entries can be tested with filter tests, disables unindexed queries, and limits the size of filters to prevent stack depth and other issues.
This commit is contained in:
Firstyear 2020-08-15 15:22:04 +10:00 committed by GitHub
parent 7d00f76fa9
commit a6269de9ab
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 881 additions and 445 deletions

View file

@ -172,8 +172,10 @@ The session limits would be:
* write rate limit (writes over time) * write rate limit (writes over time)
* network request size * network request size
The system limits that can not be account overridden are: The entry specific limits are:
* maximum entry size * maximum entry size
* maximum number of multi value attributes * maximum number of multi value attributes
These are store as attributes on the entry itself

View file

@ -101,6 +101,7 @@ pub enum OperationError {
PasswordEmpty, PasswordEmpty,
PasswordBadListed, PasswordBadListed,
CryptographyError, CryptographyError,
ResourceLimit,
} }
impl PartialEq for OperationError { impl PartialEq for OperationError {
@ -173,6 +174,10 @@ pub struct UserAuthToken {
pub groups: Vec<Group>, pub groups: Vec<Group>,
pub claims: Vec<Claim>, pub claims: Vec<Claim>,
// Should we allow supplemental ava's to be added on request? // Should we allow supplemental ava's to be added on request?
pub lim_uidx: bool,
pub lim_rmax: usize,
pub lim_pmax: usize,
pub lim_fmax: usize,
} }
impl fmt::Display for UserAuthToken { impl fmt::Display for UserAuthToken {

View file

@ -31,7 +31,7 @@ use crate::modify::Modify;
use crate::server::{QueryServerTransaction, QueryServerWriteTransaction}; use crate::server::{QueryServerTransaction, QueryServerWriteTransaction};
use crate::value::PartialValue; use crate::value::PartialValue;
use crate::event::{CreateEvent, DeleteEvent, EventOrigin, ModifyEvent, SearchEvent}; use crate::event::{CreateEvent, DeleteEvent, Event, EventOrigin, ModifyEvent, SearchEvent};
lazy_static! { lazy_static! {
static ref CLASS_ACS: PartialValue = PartialValue::new_class("access_control_search"); static ref CLASS_ACS: PartialValue = PartialValue::new_class("access_control_search");
@ -320,7 +320,9 @@ impl AccessControlProfile {
OperationError::InvalidACPState("Missing acp_targetscope".to_string()) OperationError::InvalidACPState("Missing acp_targetscope".to_string())
})?; })?;
let receiver_i = Filter::from_rw(audit, &receiver_f, qs).map_err(|e| { let event = Event::from_internal();
let receiver_i = Filter::from_rw(audit, &event, &receiver_f, qs).map_err(|e| {
ladmin_error!(audit, "Receiver validation failed {:?}", e); ladmin_error!(audit, "Receiver validation failed {:?}", e);
e e
})?; })?;
@ -329,7 +331,7 @@ impl AccessControlProfile {
OperationError::SchemaViolation(e) OperationError::SchemaViolation(e)
})?; })?;
let targetscope_i = Filter::from_rw(audit, &targetscope_f, qs).map_err(|e| { let targetscope_i = Filter::from_rw(audit, &event, &targetscope_f, qs).map_err(|e| {
ladmin_error!(audit, "Targetscope validation failed {:?}", e); ladmin_error!(audit, "Targetscope validation failed {:?}", e);
e e
})?; })?;

View file

@ -355,13 +355,14 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
// this far. // this far.
let uat = msg.uat.clone().ok_or(OperationError::NotAuthenticated)?; let uat = msg.uat.clone().ok_or(OperationError::NotAuthenticated)?;
let srch = match SearchEvent::from_whoami_request(&mut audit, msg.uat, &qs_read) { let srch =
Ok(s) => s, match SearchEvent::from_whoami_request(&mut audit, msg.uat.as_ref(), &qs_read) {
Err(e) => { Ok(s) => s,
ladmin_error!(audit, "Failed to begin whoami: {:?}", e); Err(e) => {
return Err(e); ladmin_error!(audit, "Failed to begin whoami: {:?}", e);
} return Err(e);
}; }
};
ltrace!(audit, "Begin event {:?}", srch); ltrace!(audit, "Begin event {:?}", srch);
@ -500,7 +501,7 @@ impl Handler<InternalRadiusReadMessage> for QueryServerReadV1 {
// Make an event from the request // Make an event from the request
let srch = match SearchEvent::from_target_uuid_request( let srch = match SearchEvent::from_target_uuid_request(
&mut audit, &mut audit,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
&qs_read, &qs_read,
) { ) {
@ -568,7 +569,7 @@ impl Handler<InternalRadiusTokenReadMessage> for QueryServerReadV1 {
let rate = match RadiusAuthTokenEvent::from_parts( let rate = match RadiusAuthTokenEvent::from_parts(
&mut audit, &mut audit,
&idm_read.qs_read, &idm_read.qs_read,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
) { ) {
Ok(s) => s, Ok(s) => s,
@ -624,7 +625,7 @@ impl Handler<InternalUnixUserTokenReadMessage> for QueryServerReadV1 {
let rate = match UnixUserTokenEvent::from_parts( let rate = match UnixUserTokenEvent::from_parts(
&mut audit, &mut audit,
&idm_read.qs_read, &idm_read.qs_read,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
) { ) {
Ok(s) => s, Ok(s) => s,
@ -680,7 +681,7 @@ impl Handler<InternalUnixGroupTokenReadMessage> for QueryServerReadV1 {
let rate = match UnixGroupTokenEvent::from_parts( let rate = match UnixGroupTokenEvent::from_parts(
&mut audit, &mut audit,
&idm_read.qs_read, &idm_read.qs_read,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
) { ) {
Ok(s) => s, Ok(s) => s,
@ -725,7 +726,7 @@ impl Handler<InternalSshKeyReadMessage> for QueryServerReadV1 {
// Make an event from the request // Make an event from the request
let srch = match SearchEvent::from_target_uuid_request( let srch = match SearchEvent::from_target_uuid_request(
&mut audit, &mut audit,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
&qs_read, &qs_read,
) { ) {
@ -794,7 +795,7 @@ impl Handler<InternalSshKeyTagReadMessage> for QueryServerReadV1 {
// Make an event from the request // Make an event from the request
let srch = match SearchEvent::from_target_uuid_request( let srch = match SearchEvent::from_target_uuid_request(
&mut audit, &mut audit,
uat, uat.as_ref(),
target_uuid, target_uuid,
&qs_read, &qs_read,
) { ) {
@ -866,7 +867,7 @@ impl Handler<IdmAccountUnixAuthMessage> for QueryServerReadV1 {
let uuae = match UnixUserAuthEvent::from_parts( let uuae = match UnixUserAuthEvent::from_parts(
&mut audit, &mut audit,
&idm_write.qs_read, &idm_write.qs_read,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
msg.cred, msg.cred,
) { ) {

View file

@ -364,14 +364,20 @@ impl QueryServerWriteV1 {
e e
})?; })?;
let mdf = let mdf = match ModifyEvent::from_parts(
match ModifyEvent::from_parts(audit, uat, target_uuid, proto_ml, filter, &qs_write) { audit,
Ok(m) => m, uat.as_ref(),
Err(e) => { target_uuid,
ladmin_error!(audit, "Failed to begin modify: {:?}", e); proto_ml,
return Err(e); filter,
} &qs_write,
}; ) {
Ok(m) => m,
Err(e) => {
ladmin_error!(audit, "Failed to begin modify: {:?}", e);
return Err(e);
}
};
ltrace!(audit, "Begin modify event {:?}", mdf); ltrace!(audit, "Begin modify event {:?}", mdf);
@ -397,7 +403,7 @@ impl QueryServerWriteV1 {
let mdf = match ModifyEvent::from_internal_parts( let mdf = match ModifyEvent::from_internal_parts(
audit, audit,
uat, uat.as_ref(),
target_uuid, target_uuid,
ml, ml,
filter, filter,
@ -531,8 +537,12 @@ impl Handler<InternalDeleteMessage> for QueryServerWriteV1 {
|| { || {
let qs_write = self.qs.write(duration_from_epoch_now()); let qs_write = self.qs.write(duration_from_epoch_now());
let del = match DeleteEvent::from_parts(&mut audit, msg.uat, &msg.filter, &qs_write) let del = match DeleteEvent::from_parts(
{ &mut audit,
msg.uat.as_ref(),
&msg.filter,
&qs_write,
) {
Ok(d) => d, Ok(d) => d,
Err(e) => { Err(e) => {
ladmin_error!(audit, "Failed to begin delete: {:?}", e); ladmin_error!(audit, "Failed to begin delete: {:?}", e);
@ -567,7 +577,10 @@ impl Handler<ReviveRecycledMessage> for QueryServerWriteV1 {
let qs_write = self.qs.write(duration_from_epoch_now()); let qs_write = self.qs.write(duration_from_epoch_now());
let rev = match ReviveRecycledEvent::from_parts( let rev = match ReviveRecycledEvent::from_parts(
&mut audit, msg.uat, msg.filter, &qs_write, &mut audit,
msg.uat.as_ref(),
msg.filter,
&qs_write,
) { ) {
Ok(r) => r, Ok(r) => r,
Err(e) => { Err(e) => {
@ -632,7 +645,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
let pce = PasswordChangeEvent::from_parts( let pce = PasswordChangeEvent::from_parts(
&mut audit, &mut audit,
&idms_prox_write.qs_write, &idms_prox_write.qs_write,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
cleartext, cleartext,
msg.appid, msg.appid,
@ -654,7 +667,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
let gpe = GeneratePasswordEvent::from_parts( let gpe = GeneratePasswordEvent::from_parts(
&mut audit, &mut audit,
&idms_prox_write.qs_write, &idms_prox_write.qs_write,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
msg.appid, msg.appid,
) )
@ -675,7 +688,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
let gte = GenerateTOTPEvent::from_parts( let gte = GenerateTOTPEvent::from_parts(
&mut audit, &mut audit,
&idms_prox_write.qs_write, &idms_prox_write.qs_write,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
label, label,
) )
@ -695,7 +708,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
let vte = VerifyTOTPEvent::from_parts( let vte = VerifyTOTPEvent::from_parts(
&mut audit, &mut audit,
&idms_prox_write.qs_write, &idms_prox_write.qs_write,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
uuid, uuid,
chal, chal,
@ -789,7 +802,7 @@ impl Handler<InternalRegenerateRadiusMessage> for QueryServerWriteV1 {
let rrse = RegenerateRadiusSecretEvent::from_parts( let rrse = RegenerateRadiusSecretEvent::from_parts(
&mut audit, &mut audit,
&idms_prox_write.qs_write, &idms_prox_write.qs_write,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
) )
.map_err(|e| { .map_err(|e| {
@ -834,7 +847,7 @@ impl Handler<PurgeAttributeMessage> for QueryServerWriteV1 {
let mdf = match ModifyEvent::from_target_uuid_attr_purge( let mdf = match ModifyEvent::from_target_uuid_attr_purge(
&mut audit, &mut audit,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
&msg.attr, &msg.attr,
msg.filter, msg.filter,
@ -885,7 +898,7 @@ impl Handler<RemoveAttributeValueMessage> for QueryServerWriteV1 {
let mdf = match ModifyEvent::from_parts( let mdf = match ModifyEvent::from_parts(
&mut audit, &mut audit,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
&proto_ml, &proto_ml,
msg.filter, msg.filter,
@ -1181,7 +1194,7 @@ impl Handler<IdmAccountUnixSetCredMessage> for QueryServerWriteV1 {
let upce = UnixPasswordChangeEvent::from_parts( let upce = UnixPasswordChangeEvent::from_parts(
&mut audit, &mut audit,
&idms_prox_write.qs_write, &idms_prox_write.qs_write,
msg.uat, msg.uat.as_ref(),
target_uuid, target_uuid,
msg.cred, msg.cred,
) )

View file

@ -1248,7 +1248,10 @@ impl IdlSqlite {
let manager = SqliteConnectionManager::file(path) let manager = SqliteConnectionManager::file(path)
.with_init(move |c| { .with_init(move |c| {
c.execute_batch( c.execute_batch(
format!("PRAGMA page_size={}; VACUUM; PRAGMA journal_mode=WAL;", fstype as u32) format!(
"PRAGMA page_size={}; VACUUM; PRAGMA journal_mode=WAL;",
fstype as u32
)
.as_str(), .as_str(),
) )
}) })
@ -1259,8 +1262,8 @@ impl IdlSqlite {
// a single DB thread, else we cause consistency issues. // a single DB thread, else we cause consistency issues.
builder1.max_size(1) builder1.max_size(1)
} else { } else {
// Have to add 1 for the write thread. // Have to add 1 for the write thread, and for the interval threads
builder1.max_size(pool_size + 1) builder1.max_size(pool_size + 2)
}; };
// Look at max_size and thread_pool here for perf later // Look at max_size and thread_pool here for perf later
let pool = builder2.build(manager).map_err(|e| { let pool = builder2.build(manager).map_err(|e| {
@ -1275,7 +1278,7 @@ impl IdlSqlite {
#[allow(clippy::expect_used)] #[allow(clippy::expect_used)]
let conn = self let conn = self
.pool .pool
.get() .try_get()
.expect("Unable to get connection from pool!!!"); .expect("Unable to get connection from pool!!!");
IdlSqliteReadTransaction::new(conn) IdlSqliteReadTransaction::new(conn)
} }
@ -1284,7 +1287,7 @@ impl IdlSqlite {
#[allow(clippy::expect_used)] #[allow(clippy::expect_used)]
let conn = self let conn = self
.pool .pool
.get() .try_get()
.expect("Unable to get connection from pool!!!"); .expect("Unable to get connection from pool!!!");
IdlSqliteWriteTransaction::new(conn) IdlSqliteWriteTransaction::new(conn)
} }

View file

@ -8,6 +8,7 @@ use std::sync::Arc;
use crate::audit::AuditScope; use crate::audit::AuditScope;
use crate::be::dbentry::DbEntry; use crate::be::dbentry::DbEntry;
use crate::entry::{Entry, EntryCommitted, EntryNew, EntrySealed}; use crate::entry::{Entry, EntryCommitted, EntryNew, EntrySealed};
use crate::event::EventLimits;
use crate::filter::{Filter, FilterPlan, FilterResolved, FilterValidResolved}; use crate::filter::{Filter, FilterPlan, FilterResolved, FilterValidResolved};
use crate::value::Value; use crate::value::Value;
use concread::cowcell::*; use concread::cowcell::*;
@ -470,10 +471,10 @@ pub trait BackendTransaction {
}) })
} }
// Take filter, and AuditScope ref?
fn search( fn search(
&self, &self,
au: &mut AuditScope, au: &mut AuditScope,
erl: &EventLimits,
filt: &Filter<FilterValidResolved>, filt: &Filter<FilterValidResolved>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
// Unlike DS, even if we don't get the index back, we can just pass // Unlike DS, even if we don't get the index back, we can just pass
@ -493,25 +494,53 @@ pub trait BackendTransaction {
lfilter_info!(au, "filter executed plan -> {:?}", fplan); lfilter_info!(au, "filter executed plan -> {:?}", fplan);
// Based on the IDL we determine if limits are required at this point.
match &idl {
IDL::ALLIDS => {
if !erl.unindexed_allow {
ladmin_error!(au, "filter (search) is fully unindexed, and not allowed by resource limits");
return Err(OperationError::ResourceLimit);
}
}
IDL::Partial(idl_br) => {
if idl_br.len() > erl.search_max_filter_test {
ladmin_error!(au, "filter (search) is partial indexed and greater than search_max_filter_test allowed by resource limits");
return Err(OperationError::ResourceLimit);
}
}
IDL::PartialThreshold(_) => {
// Since we opted for this, this is not the fault
// of the user and we should not penalise them by limiting on partial.
}
IDL::Indexed(idl_br) => {
// We know this is resolved here, so we can attempt the limit
// check. This has to fold the whole index, but you know, class=pres is
// indexed ...
if idl_br.len() > erl.search_max_results {
ladmin_error!(au, "filter (search) is indexed and greater than search_max_results allowed by resource limits");
return Err(OperationError::ResourceLimit);
}
}
};
let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| {
ladmin_error!(au, "get_identry failed {:?}", e); ladmin_error!(au, "get_identry failed {:?}", e);
e e
})?; })?;
// Do other things
// Now, de-serialise the raw_entries back to entries, and populate their ID's
// if not 100% resolved.
let entries_filtered = match idl { let entries_filtered = match idl {
IDL::ALLIDS | IDL::Partial(_) => { IDL::ALLIDS => lperf_segment!(au, "be::search<entry::ftest::allids>", || {
lfilter_error!(au, "filter (search) was partially or fully unindexed.",); entries
lperf_segment!(au, "be::search<entry::ftest::allids>", || { .into_iter()
entries .filter(|e| e.entry_match_no_index(&filt))
.into_iter() .collect()
.filter(|e| e.entry_match_no_index(&filt)) }),
.collect() IDL::Partial(_) => lperf_segment!(au, "be::search<entry::ftest::partial>", || {
}) entries
} .into_iter()
.filter(|e| e.entry_match_no_index(&filt))
.collect()
}),
IDL::PartialThreshold(_) => { IDL::PartialThreshold(_) => {
lperf_trace_segment!(au, "be::search<entry::ftest::thresh>", || { lperf_trace_segment!(au, "be::search<entry::ftest::thresh>", || {
entries entries
@ -527,23 +556,12 @@ pub trait BackendTransaction {
} }
}; };
/* // If the idl was not indexed, apply the resource limit now. Avoid the needless match since the
// This is good for testing disagreements between the idl layer and the filter/entries // if statement is quick.
if cfg!(test) { if entries_filtered.len() > erl.search_max_results {
let check_raw_entries = try_audit!(au, self.get_idlayer().get_identry(au, &IDL::ALLIDS)); ladmin_error!(au, "filter (search) is resolved and greater than search_max_results allowed by resource limits");
let check_entries: Result<Vec<_>, _> = return Err(OperationError::ResourceLimit);
check_raw_entries.into_iter().map(|ide| ide.into_entry()).collect();
let check_entries = try_audit!(au, check_entries);
let f_check_entries: Vec<_> =
check_entries
.into_iter()
.filter(|e| e.entry_match_no_index(&filt))
.collect();
debug!("raw -> {:?}", entries_filtered);
debug!("check -> {:?}", f_check_entries);
assert!(f_check_entries == entries_filtered);
} }
*/
Ok(entries_filtered) Ok(entries_filtered)
}) })
@ -556,6 +574,7 @@ pub trait BackendTransaction {
fn exists( fn exists(
&self, &self,
au: &mut AuditScope, au: &mut AuditScope,
erl: &EventLimits,
filt: &Filter<FilterValidResolved>, filt: &Filter<FilterValidResolved>,
) -> Result<bool, OperationError> { ) -> Result<bool, OperationError> {
lperf_trace_segment!(au, "be::exists", || { lperf_trace_segment!(au, "be::exists", || {
@ -572,26 +591,32 @@ pub trait BackendTransaction {
lfilter_info!(au, "filter executed plan -> {:?}", fplan); lfilter_info!(au, "filter executed plan -> {:?}", fplan);
// Apply limits to the IDL.
match &idl {
IDL::ALLIDS => {
if !erl.unindexed_allow {
ladmin_error!(au, "filter (exists) is fully unindexed, and not allowed by resource limits");
return Err(OperationError::ResourceLimit);
}
}
IDL::Partial(idl_br) => {
if idl_br.len() > erl.search_max_filter_test {
ladmin_error!(au, "filter (exists) is partial indexed and greater than search_max_filter_test allowed by resource limits");
return Err(OperationError::ResourceLimit);
}
}
IDL::PartialThreshold(_) => {
// Since we opted for this, this is not the fault
// of the user and we should not penalise them.
}
IDL::Indexed(_) => {}
}
// Now, check the idl -- if it's fully resolved, we can skip this because the query // Now, check the idl -- if it's fully resolved, we can skip this because the query
// was fully indexed. // was fully indexed.
match &idl { match &idl {
IDL::Indexed(idl) => Ok(!idl.is_empty()), IDL::Indexed(idl) => Ok(!idl.is_empty()),
IDL::PartialThreshold(_) => {
let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| {
ladmin_error!(au, "get_identry failed {:?}", e);
e
})?;
// if not 100% resolved query, apply the filter test.
let entries_filtered: Vec<_> = entries
.into_iter()
.filter(|e| e.entry_match_no_index(&filt))
.collect();
Ok(!entries_filtered.is_empty())
}
_ => { _ => {
lfilter_error!(au, "filter (exists) was partially or fully unindexed",);
let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| {
ladmin_error!(au, "get_identry failed {:?}", e); ladmin_error!(au, "get_identry failed {:?}", e);
e e
@ -1372,6 +1397,7 @@ mod tests {
use super::{ use super::{
Backend, BackendTransaction, BackendWriteTransaction, FsType, OperationError, IDL, Backend, BackendTransaction, BackendWriteTransaction, FsType, OperationError, IDL,
}; };
use crate::event::EventLimits;
use crate::value::{IndexType, PartialValue, Value}; use crate::value::{IndexType, PartialValue, Value};
macro_rules! run_test { macro_rules! run_test {
@ -1438,7 +1464,8 @@ mod tests {
.expect("failed to generate filter") .expect("failed to generate filter")
.into_valid_resolved() .into_valid_resolved()
}; };
let entries = $be.search($audit, &filt).expect("failed to search"); let lims = EventLimits::unlimited();
let entries = $be.search($audit, &lims, &filt).expect("failed to search");
entries.first().is_some() entries.first().is_some()
}}; }};
} }
@ -1451,7 +1478,8 @@ mod tests {
.expect("failed to generate filter") .expect("failed to generate filter")
.into_valid_resolved() .into_valid_resolved()
}; };
let entries = $be.search($audit, &filt).expect("failed to search"); let lims = EventLimits::unlimited();
let entries = $be.search($audit, &lims, &filt).expect("failed to search");
match entries.first() { match entries.first() {
Some(ent) => ent.attribute_pres($attr), Some(ent) => ent.attribute_pres($attr),
None => false, None => false,
@ -1509,7 +1537,9 @@ mod tests {
let filt = let filt =
unsafe { filter_resolved!(f_eq("userid", PartialValue::new_utf8s("claire"))) }; unsafe { filter_resolved!(f_eq("userid", PartialValue::new_utf8s("claire"))) };
let r = be.search(audit, &filt); let lims = EventLimits::unlimited();
let r = be.search(audit, &lims, &filt);
assert!(r.expect("Search failed!").len() == 1); assert!(r.expect("Search failed!").len() == 1);
// Test empty search // Test empty search
@ -1524,6 +1554,7 @@ mod tests {
fn test_be_simple_modify() { fn test_be_simple_modify() {
run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| {
ltrace!(audit, "Simple Modify"); ltrace!(audit, "Simple Modify");
let lims = EventLimits::unlimited();
// First create some entries (3?) // First create some entries (3?)
let mut e1: Entry<EntryInit, EntryNew> = Entry::new(); let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
e1.add_ava("userid", Value::from("william")); e1.add_ava("userid", Value::from("william"));
@ -1542,7 +1573,7 @@ mod tests {
// You need to now retrieve the entries back out to get the entry id's // You need to now retrieve the entries back out to get the entry id's
let mut results = be let mut results = be
.search(audit, unsafe { &filter_resolved!(f_pres("userid")) }) .search(audit, &lims, unsafe { &filter_resolved!(f_pres("userid")) })
.expect("Failed to search"); .expect("Failed to search");
// Get these out to usable entries. // Get these out to usable entries.
@ -1597,6 +1628,7 @@ mod tests {
fn test_be_simple_delete() { fn test_be_simple_delete() {
run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| {
ltrace!(audit, "Simple Delete"); ltrace!(audit, "Simple Delete");
let lims = EventLimits::unlimited();
// First create some entries (3?) // First create some entries (3?)
let mut e1: Entry<EntryInit, EntryNew> = Entry::new(); let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -1622,7 +1654,7 @@ mod tests {
// You need to now retrieve the entries back out to get the entry id's // You need to now retrieve the entries back out to get the entry id's
let mut results = be let mut results = be
.search(audit, unsafe { &filter_resolved!(f_pres("userid")) }) .search(audit, &lims, unsafe { &filter_resolved!(f_pres("userid")) })
.expect("Failed to search"); .expect("Failed to search");
// Get these out to usable entries. // Get these out to usable entries.
@ -2458,4 +2490,155 @@ mod tests {
} }
}) })
} }
#[test]
fn test_be_limits_allids() {
run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| {
let mut lim_allow_allids = EventLimits::unlimited();
lim_allow_allids.unindexed_allow = true;
let mut lim_deny_allids = EventLimits::unlimited();
lim_deny_allids.unindexed_allow = false;
let mut e: Entry<EntryInit, EntryNew> = Entry::new();
e.add_ava("userid", Value::from("william"));
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
e.add_ava("nonexist", Value::from("x"));
let e = unsafe { e.into_sealed_new() };
let single_result = be.create(audit, vec![e.clone()]);
assert!(single_result.is_ok());
let filt = unsafe {
e.filter_from_attrs(&vec![String::from("nonexist")])
.expect("failed to generate filter")
.into_valid_resolved()
};
// check allow on allids
let res = be.search(audit, &lim_allow_allids, &filt);
assert!(res.is_ok());
let res = be.exists(audit, &lim_allow_allids, &filt);
assert!(res.is_ok());
// check deny on allids
let res = be.search(audit, &lim_deny_allids, &filt);
assert!(res == Err(OperationError::ResourceLimit));
let res = be.exists(audit, &lim_deny_allids, &filt);
assert!(res == Err(OperationError::ResourceLimit));
})
}
#[test]
fn test_be_limits_results_max() {
run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| {
let mut lim_allow = EventLimits::unlimited();
lim_allow.search_max_results = usize::MAX;
let mut lim_deny = EventLimits::unlimited();
lim_deny.search_max_results = 0;
let mut e: Entry<EntryInit, EntryNew> = Entry::new();
e.add_ava("userid", Value::from("william"));
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
e.add_ava("nonexist", Value::from("x"));
let e = unsafe { e.into_sealed_new() };
let single_result = be.create(audit, vec![e.clone()]);
assert!(single_result.is_ok());
let filt = unsafe {
e.filter_from_attrs(&vec![String::from("nonexist")])
.expect("failed to generate filter")
.into_valid_resolved()
};
// --> This is the all ids path (unindexed)
// check allow on entry max
let res = be.search(audit, &lim_allow, &filt);
assert!(res.is_ok());
let res = be.exists(audit, &lim_allow, &filt);
assert!(res.is_ok());
// check deny on entry max
let res = be.search(audit, &lim_deny, &filt);
assert!(res == Err(OperationError::ResourceLimit));
// we don't limit on exists because we never load the entries.
let res = be.exists(audit, &lim_deny, &filt);
assert!(res.is_ok());
// --> This will shortcut due to indexing.
assert!(be.reindex(audit).is_ok());
let res = be.search(audit, &lim_deny, &filt);
assert!(res == Err(OperationError::ResourceLimit));
// we don't limit on exists because we never load the entries.
let res = be.exists(audit, &lim_deny, &filt);
assert!(res.is_ok());
})
}
#[test]
fn test_be_limits_partial_filter() {
run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| {
// This relies on how we do partials, so it could be a bit sensitive.
// A partial is generated after an allids + indexed in a single and
// as we require both conditions to exist. Allids comes from unindexed
// terms. we need to ensure we don't hit partial threshold too.
//
// This means we need an and query where the first term is allids
// and the second is indexed, but without the filter shortcutting.
//
// To achieve this we need a monstrously evil query.
//
let mut lim_allow = EventLimits::unlimited();
lim_allow.search_max_filter_test = usize::MAX;
let mut lim_deny = EventLimits::unlimited();
lim_deny.search_max_filter_test = 0;
let mut e: Entry<EntryInit, EntryNew> = Entry::new();
e.add_ava("name", Value::from("william"));
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
e.add_ava("nonexist", Value::from("x"));
e.add_ava("nonexist", Value::from("y"));
let e = unsafe { e.into_sealed_new() };
let single_result = be.create(audit, vec![e.clone()]);
assert!(single_result.is_ok());
// Reindex so we have things in place for our query
assert!(be.reindex(audit).is_ok());
// 🚨 This is evil!
// The and allows us to hit "allids + indexed -> partial".
// the or terms prevent re-arrangement. They can't be folded or dead
// term elimed either.
//
// This means the f_or nonexist will become allids and the second will be indexed
// due to f_eq userid in both with the result of william.
//
// This creates a partial, and because it's the first iteration in the loop, this
// doesn't encounter partial threshold testing.
let filt = unsafe {
filter_resolved!(f_and!([
f_or!([
f_eq("nonexist", PartialValue::new_utf8s("x")),
f_eq("nonexist", PartialValue::new_utf8s("y"))
]),
f_or!([
f_eq("name", PartialValue::new_utf8s("claire")),
f_eq("name", PartialValue::new_utf8s("william"))
]),
]))
};
let res = be.search(audit, &lim_allow, &filt);
assert!(res.is_ok());
let res = be.exists(audit, &lim_allow, &filt);
assert!(res.is_ok());
// check deny on entry max
let res = be.search(audit, &lim_deny, &filt);
assert!(res == Err(OperationError::ResourceLimit));
// we don't limit on exists because we never load the entries.
let res = be.exists(audit, &lim_deny, &filt);
assert!(res == Err(OperationError::ResourceLimit));
})
}
} }

View file

@ -24,6 +24,7 @@ use crate::actors::v1_write::{CreateMessage, DeleteMessage, ModifyMessage};
// use crate::schema::SchemaTransaction; // use crate::schema::SchemaTransaction;
use actix::prelude::*; use actix::prelude::*;
use ldap3_server::simple::LdapFilter;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use uuid::Uuid; use uuid::Uuid;
@ -94,11 +95,46 @@ pub enum EventOrigin {
// Replication, // Replication,
} }
#[derive(Debug, Clone)]
/// Limits on the resources a single event can consume. These are defined per-event
/// as they are derived from the userAuthToken based on that individual session
pub struct EventLimits {
pub unindexed_allow: bool,
pub search_max_results: usize,
pub search_max_filter_test: usize,
pub filter_max_elements: usize,
// pub write_max_entries: usize,
// pub write_max_rate: usize,
// pub network_max_request: usize,
}
impl EventLimits {
pub fn unlimited() -> Self {
EventLimits {
unindexed_allow: true,
search_max_results: usize::MAX,
search_max_filter_test: usize::MAX,
filter_max_elements: usize::MAX,
}
}
// From a userauthtoken
pub fn from_uat(uat: &UserAuthToken) -> Self {
EventLimits {
unindexed_allow: uat.lim_uidx,
search_max_results: uat.lim_rmax,
search_max_filter_test: uat.lim_pmax,
filter_max_elements: uat.lim_fmax,
}
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Event { pub struct Event {
// The event's initiator aka origin source. // The event's initiator aka origin source.
// This importantly, is used for access control! // This importantly, is used for access control!
pub origin: EventOrigin, pub origin: EventOrigin,
pub(crate) limits: EventLimits,
} }
impl std::fmt::Display for Event { impl std::fmt::Display for Event {
@ -119,25 +155,10 @@ impl std::fmt::Display for Event {
} }
impl Event { impl Event {
pub fn from_ro_request(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
user_uuid: &Uuid,
) -> Result<Self, OperationError> {
qs.internal_search_uuid(audit, &user_uuid)
.map(|e| Event {
origin: EventOrigin::User(e),
})
.map_err(|e| {
ladmin_error!(audit, "from_ro_request failed {:?}", e);
e
})
}
pub fn from_ro_uat( pub fn from_ro_uat(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
ltrace!(audit, "from_ro_uat -> {:?}", uat); ltrace!(audit, "from_ro_uat -> {:?}", uat);
let uat = uat.ok_or(OperationError::NotAuthenticated)?; let uat = uat.ok_or(OperationError::NotAuthenticated)?;
@ -153,15 +174,17 @@ impl Event {
// TODO #64: Now apply claims from the uat into the Entry // TODO #64: Now apply claims from the uat into the Entry
// to allow filtering. // to allow filtering.
let limits = EventLimits::from_uat(uat);
Ok(Event { Ok(Event {
origin: EventOrigin::User(e), origin: EventOrigin::User(e),
limits,
}) })
} }
pub fn from_rw_uat( pub fn from_rw_uat(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
ltrace!(audit, "from_rw_uat -> {:?}", uat); ltrace!(audit, "from_rw_uat -> {:?}", uat);
let uat = uat.ok_or(OperationError::NotAuthenticated)?; let uat = uat.ok_or(OperationError::NotAuthenticated)?;
@ -177,37 +200,17 @@ impl Event {
// TODO #64: Now apply claims from the uat into the Entry // TODO #64: Now apply claims from the uat into the Entry
// to allow filtering. // to allow filtering.
let limits = EventLimits::from_uat(uat);
Ok(Event { Ok(Event {
origin: EventOrigin::User(e), origin: EventOrigin::User(e),
}) limits,
}
pub fn from_rw_request(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
user_uuid: &str,
) -> Result<Self, OperationError> {
// Do we need to check or load the entry from the user_uuid?
// In the future, probably yes.
//
// For now, no.
let u = Uuid::parse_str(user_uuid).map_err(|_| {
ladmin_error!(audit, "from_ro_request invalid uat uuid");
OperationError::InvalidUuid
})?;
let e = qs.internal_search_uuid(audit, &u).map_err(|e| {
ladmin_error!(audit, "from_rw_request failed {:?}", e);
e
})?;
Ok(Event {
origin: EventOrigin::User(e),
}) })
} }
pub fn from_internal() -> Self { pub fn from_internal() -> Self {
Event { Event {
origin: EventOrigin::Internal, origin: EventOrigin::Internal,
limits: EventLimits::unlimited(),
} }
} }
@ -215,6 +218,7 @@ impl Event {
pub fn from_impersonate_entry(e: Entry<EntrySealed, EntryCommitted>) -> Self { pub fn from_impersonate_entry(e: Entry<EntrySealed, EntryCommitted>) -> Self {
Event { Event {
origin: EventOrigin::User(e), origin: EventOrigin::User(e),
limits: EventLimits::unlimited(),
} }
} }
@ -263,25 +267,26 @@ impl SearchEvent {
msg: SearchMessage, msg: SearchMessage,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
match Filter::from_ro(audit, &msg.req.filter, qs) { let event = Event::from_ro_uat(audit, qs, msg.uat.as_ref())?;
Ok(f) => Ok(SearchEvent { let f = Filter::from_ro(audit, &event, &msg.req.filter, qs)?;
event: Event::from_ro_uat(audit, qs, msg.uat)?, // We do need to do this twice to account for the ignore_hidden
// We do need to do this twice to account for the ignore_hidden // changes.
// changes. let filter = f
filter: f .clone()
.clone() .into_ignore_hidden()
.into_ignore_hidden() .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, let filter_orig = f
filter_orig: f .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, Ok(SearchEvent {
// We can't get this from the SearchMessage because it's annoying with the event,
// current macro design. filter,
attrs: None, filter_orig,
}), // We can't get this from the SearchMessage because it's annoying with the
Err(e) => Err(e), // current macro design.
} attrs: None,
})
} }
pub fn from_internal_message( pub fn from_internal_message(
@ -302,23 +307,28 @@ impl SearchEvent {
} }
} }
let event = Event::from_ro_uat(audit, qs, msg.uat.as_ref())?;
let filter = msg
.filter
.clone()
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(|e| {
lrequest_error!(audit, "filter schema violation -> {:?}", e);
OperationError::SchemaViolation(e)
})?;
let filter_orig = msg.filter.validate(qs.get_schema()).map_err(|e| {
lrequest_error!(audit, "filter_orig schema violation -> {:?}", e);
OperationError::SchemaViolation(e)
})?;
Ok(SearchEvent { Ok(SearchEvent {
event: Event::from_ro_uat(audit, qs, msg.uat)?, event,
// We do need to do this twice to account for the ignore_hidden // We do need to do this twice to account for the ignore_hidden
// changes. // changes.
filter: msg filter,
.filter filter_orig,
.clone()
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(|e| {
lrequest_error!(audit, "filter schema violation -> {:?}", e);
OperationError::SchemaViolation(e)
})?,
filter_orig: msg.filter.validate(qs.get_schema()).map_err(|e| {
lrequest_error!(audit, "filter_orig schema violation -> {:?}", e);
OperationError::SchemaViolation(e)
})?,
attrs: r_attrs, attrs: r_attrs,
}) })
} }
@ -340,54 +350,65 @@ impl SearchEvent {
} }
} }
let event = Event::from_ro_uat(audit, qs, msg.uat.as_ref())?;
let filter = msg
.filter
.clone()
.into_recycled()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = msg
.filter
.into_recycled()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(SearchEvent { Ok(SearchEvent {
event: Event::from_ro_uat(audit, qs, msg.uat)?, event,
filter: msg filter,
.filter filter_orig,
.clone()
.into_recycled()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
filter_orig: msg
.filter
.into_recycled()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
attrs: r_attrs, attrs: r_attrs,
}) })
} }
pub fn from_whoami_request( pub fn from_whoami_request(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let event = Event::from_ro_uat(audit, qs, uat)?;
let filter = filter!(f_self())
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = filter_all!(f_self())
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(SearchEvent { Ok(SearchEvent {
event: Event::from_ro_uat(audit, qs, uat)?, event,
filter: filter!(f_self()) filter,
.validate(qs.get_schema()) filter_orig,
.map_err(OperationError::SchemaViolation)?,
filter_orig: filter_all!(f_self())
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
attrs: None, attrs: None,
}) })
} }
pub fn from_target_uuid_request( pub fn from_target_uuid_request(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target_uuid: Uuid, target_uuid: Uuid,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let event = Event::from_ro_uat(audit, qs, uat)?;
let filter = filter!(f_eq("uuid", PartialValue::new_uuid(target_uuid)))
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid)))
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(SearchEvent { Ok(SearchEvent {
event: Event::from_ro_uat(audit, qs, uat)?, event,
filter: filter!(f_eq("uuid", PartialValue::new_uuid(target_uuid))) filter,
.validate(qs.get_schema()) filter_orig,
.map_err(OperationError::SchemaViolation)?,
filter_orig: filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid)))
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
attrs: None, attrs: None,
}) })
} }
@ -460,20 +481,25 @@ impl SearchEvent {
pub(crate) fn new_ext_impersonate_uuid( pub(crate) fn new_ext_impersonate_uuid(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
euuid: &Uuid, euat: &UserAuthToken,
filter: &Filter<FilterInvalid>, lf: &LdapFilter,
attrs: Option<BTreeSet<String>>, attrs: Option<BTreeSet<String>>,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let event = Event::from_ro_uat(audit, qs, Some(euat))?;
// Kanidm Filter from LdapFilter
let f = Filter::from_ldap_ro(audit, &event, &lf, qs)?;
let filter = f
.clone()
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = f
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(SearchEvent { Ok(SearchEvent {
event: Event::from_ro_request(audit, qs, euuid)?, event,
filter: filter filter,
.clone() filter_orig,
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
filter_orig: filter
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
attrs, attrs,
}) })
} }
@ -496,6 +522,10 @@ impl SearchEvent {
attrs: None, attrs: None,
} }
} }
pub(crate) fn get_limits(&self) -> &EventLimits {
&self.event.limits
}
} }
// Represents the decoded entries from the protocol -> internal entry representation // Represents the decoded entries from the protocol -> internal entry representation
@ -528,7 +558,7 @@ impl CreateEvent {
// From ProtoEntry -> Entry // From ProtoEntry -> Entry
// What is the correct consuming iterator here? Can we // What is the correct consuming iterator here? Can we
// even do that? // even do that?
event: Event::from_rw_uat(audit, qs, msg.uat)?, event: Event::from_rw_uat(audit, qs, msg.uat.as_ref())?,
entries, entries,
}), }),
Err(e) => Err(e), Err(e) => Err(e),
@ -582,6 +612,10 @@ impl ExistsEvent {
filter_orig: filter.into_valid(), filter_orig: filter.into_valid(),
} }
} }
pub(crate) fn get_limits(&self) -> &EventLimits {
&self.event.limits
}
} }
#[derive(Debug)] #[derive(Debug)]
@ -599,38 +633,42 @@ impl DeleteEvent {
msg: DeleteMessage, msg: DeleteMessage,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
match Filter::from_rw(audit, &msg.req.filter, qs) { let event = Event::from_rw_uat(audit, qs, msg.uat.as_ref())?;
Ok(f) => Ok(DeleteEvent { let f = Filter::from_rw(audit, &event, &msg.req.filter, qs)?;
event: Event::from_rw_uat(audit, qs, msg.uat)?, let filter = f
filter: f .clone()
.clone() .into_ignore_hidden()
.into_ignore_hidden() .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, let filter_orig = f
filter_orig: f .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, Ok(DeleteEvent {
}), event,
Err(e) => Err(e), filter,
} filter_orig,
})
} }
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
filter: &Filter<FilterInvalid>, f: &Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let event = Event::from_rw_uat(audit, qs, uat)?;
let filter = f
.clone()
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = f
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(DeleteEvent { Ok(DeleteEvent {
event: Event::from_rw_uat(audit, qs, uat)?, event,
filter: filter filter,
.clone() filter_orig,
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
filter_orig: filter
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
}) })
} }
@ -689,32 +727,31 @@ impl ModifyEvent {
msg: ModifyMessage, msg: ModifyMessage,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
match Filter::from_rw(audit, &msg.req.filter, qs) { let event = Event::from_rw_uat(audit, qs, msg.uat.as_ref())?;
Ok(f) => match ModifyList::from(audit, &msg.req.modlist, qs) { let f = Filter::from_rw(audit, &event, &msg.req.filter, qs)?;
Ok(m) => Ok(ModifyEvent { let m = ModifyList::from(audit, &msg.req.modlist, qs)?;
event: Event::from_rw_uat(audit, qs, msg.uat)?, let filter = f
filter: f .clone()
.clone() .into_ignore_hidden()
.into_ignore_hidden() .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, let filter_orig = f
filter_orig: f .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, let modlist = m
modlist: m .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, Ok(ModifyEvent {
}), event,
Err(e) => Err(e), filter,
}, filter_orig,
modlist,
Err(e) => Err(e), })
}
} }
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target_uuid: Uuid, target_uuid: Uuid,
proto_ml: &ProtoModifyList, proto_ml: &ProtoModifyList,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
@ -724,28 +761,31 @@ impl ModifyEvent {
// Add any supplemental conditions we have. // Add any supplemental conditions we have.
let f = Filter::join_parts_and(f_uuid, filter); let f = Filter::join_parts_and(f_uuid, filter);
match ModifyList::from(audit, &proto_ml, qs) { let m = ModifyList::from(audit, &proto_ml, qs)?;
Ok(m) => Ok(ModifyEvent { let event = Event::from_rw_uat(audit, qs, uat)?;
event: Event::from_rw_uat(audit, qs, uat)?, let filter = f
filter: f .clone()
.clone() .into_ignore_hidden()
.into_ignore_hidden() .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, let filter_orig = f
filter_orig: f .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, let modlist = m
modlist: m .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?,
}), Ok(ModifyEvent {
Err(e) => Err(e), event,
} filter,
filter_orig,
modlist,
})
} }
pub fn from_internal_parts( pub fn from_internal_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target_uuid: Uuid, target_uuid: Uuid,
ml: &ModifyList<ModifyInvalid>, ml: &ModifyList<ModifyInvalid>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
@ -755,25 +795,30 @@ impl ModifyEvent {
// Add any supplemental conditions we have. // Add any supplemental conditions we have.
let f = Filter::join_parts_and(f_uuid, filter); let f = Filter::join_parts_and(f_uuid, filter);
let event = Event::from_rw_uat(audit, qs, uat)?;
let filter = f
.clone()
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = f
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let modlist = ml
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(ModifyEvent { Ok(ModifyEvent {
event: Event::from_rw_uat(audit, qs, uat)?, event,
filter: f filter,
.clone() filter_orig,
.into_ignore_hidden() modlist,
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
filter_orig: f
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
modlist: ml
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
}) })
} }
pub fn from_target_uuid_attr_purge( pub fn from_target_uuid_attr_purge(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target_uuid: Uuid, target_uuid: Uuid,
attr: &str, attr: &str,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
@ -783,19 +828,24 @@ impl ModifyEvent {
let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid))); let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid)));
// Add any supplemental conditions we have. // Add any supplemental conditions we have.
let f = Filter::join_parts_and(f_uuid, filter); let f = Filter::join_parts_and(f_uuid, filter);
let event = Event::from_rw_uat(audit, qs, uat)?;
let filter = f
.clone()
.into_ignore_hidden()
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let filter_orig = f
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
let modlist = ml
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?;
Ok(ModifyEvent { Ok(ModifyEvent {
event: Event::from_rw_uat(audit, qs, uat)?, event,
filter: f filter,
.clone() filter_orig,
.into_ignore_hidden() modlist,
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
filter_orig: f
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
modlist: ml
.validate(qs.get_schema())
.map_err(OperationError::SchemaViolation)?,
}) })
} }
@ -1085,17 +1135,16 @@ impl Message for ReviveRecycledEvent {
impl ReviveRecycledEvent { impl ReviveRecycledEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
Ok(ReviveRecycledEvent { let event = Event::from_rw_uat(audit, qs, uat)?;
event: Event::from_rw_uat(audit, qs, uat)?, let filter = filter
filter: filter .into_recycled()
.into_recycled() .validate(qs.get_schema())
.validate(qs.get_schema()) .map_err(OperationError::SchemaViolation)?;
.map_err(OperationError::SchemaViolation)?, Ok(ReviveRecycledEvent { event, filter })
})
} }
#[cfg(test)] #[cfg(test)]

View file

@ -27,6 +27,8 @@ use std::iter;
use uuid::Uuid; use uuid::Uuid;
const FILTER_DEPTH_MAX: usize = 16;
// Default filter is safe, ignores all hidden types! // Default filter is safe, ignores all hidden types!
// This is &Value so we can lazy const then clone, but perhaps we can reconsider // This is &Value so we can lazy const then clone, but perhaps we can reconsider
@ -434,13 +436,16 @@ impl Filter<FilterInvalid> {
// takes "clone_value(t, a, v) instead, but that may have a similar issue. // takes "clone_value(t, a, v) instead, but that may have a similar issue.
pub fn from_ro( pub fn from_ro(
audit: &mut AuditScope, audit: &mut AuditScope,
ev: &Event,
f: &ProtoFilter, f: &ProtoFilter,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
lperf_trace_segment!(audit, "filter::from_ro", || { lperf_trace_segment!(audit, "filter::from_ro", || {
let depth = FILTER_DEPTH_MAX;
let mut elems = ev.limits.filter_max_elements;
Ok(Filter { Ok(Filter {
state: FilterInvalid { state: FilterInvalid {
inner: FilterComp::from_ro(audit, f, qs)?, inner: FilterComp::from_ro(audit, f, qs, depth, &mut elems)?,
}, },
}) })
}) })
@ -448,13 +453,16 @@ impl Filter<FilterInvalid> {
pub fn from_rw( pub fn from_rw(
audit: &mut AuditScope, audit: &mut AuditScope,
ev: &Event,
f: &ProtoFilter, f: &ProtoFilter,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
lperf_trace_segment!(audit, "filter::from_rw", || { lperf_trace_segment!(audit, "filter::from_rw", || {
let depth = FILTER_DEPTH_MAX;
let mut elems = ev.limits.filter_max_elements;
Ok(Filter { Ok(Filter {
state: FilterInvalid { state: FilterInvalid {
inner: FilterComp::from_rw(audit, f, qs)?, inner: FilterComp::from_rw(audit, f, qs, depth, &mut elems)?,
}, },
}) })
}) })
@ -462,13 +470,16 @@ impl Filter<FilterInvalid> {
pub fn from_ldap_ro( pub fn from_ldap_ro(
audit: &mut AuditScope, audit: &mut AuditScope,
ev: &Event,
f: &LdapFilter, f: &LdapFilter,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
lperf_trace_segment!(audit, "filter::from_ldap_ro", || { lperf_trace_segment!(audit, "filter::from_ldap_ro", || {
let depth = FILTER_DEPTH_MAX;
let mut elems = ev.limits.filter_max_elements;
Ok(Filter { Ok(Filter {
state: FilterInvalid { state: FilterInvalid {
inner: FilterComp::from_ldap_ro(audit, f, qs)?, inner: FilterComp::from_ldap_ro(audit, f, qs, depth, &mut elems)?,
}, },
}) })
}) })
@ -656,7 +667,10 @@ impl FilterComp {
audit: &mut AuditScope, audit: &mut AuditScope,
f: &ProtoFilter, f: &ProtoFilter,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
depth: usize,
elems: &mut usize,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let ndepth = depth.checked_sub(1).ok_or(OperationError::ResourceLimit)?;
Ok(match f { Ok(match f {
ProtoFilter::Eq(a, v) => { ProtoFilter::Eq(a, v) => {
let nk = qs.get_schema().normalise_attr_name(a); let nk = qs.get_schema().normalise_attr_name(a);
@ -672,17 +686,32 @@ impl FilterComp {
let nk = qs.get_schema().normalise_attr_name(a); let nk = qs.get_schema().normalise_attr_name(a);
FilterComp::Pres(nk) FilterComp::Pres(nk)
} }
ProtoFilter::Or(l) => FilterComp::Or( ProtoFilter::Or(l) => {
l.iter() *elems = (*elems)
.map(|f| Self::from_ro(audit, f, qs)) .checked_sub(l.len())
.collect::<Result<Vec<_>, _>>()?, .ok_or(OperationError::ResourceLimit)?;
), FilterComp::Or(
ProtoFilter::And(l) => FilterComp::And( l.iter()
l.iter() .map(|f| Self::from_ro(audit, f, qs, ndepth, elems))
.map(|f| Self::from_ro(audit, f, qs)) .collect::<Result<Vec<_>, _>>()?,
.collect::<Result<Vec<_>, _>>()?, )
), }
ProtoFilter::AndNot(l) => FilterComp::AndNot(Box::new(Self::from_ro(audit, l, qs)?)), ProtoFilter::And(l) => {
*elems = (*elems)
.checked_sub(l.len())
.ok_or(OperationError::ResourceLimit)?;
FilterComp::And(
l.iter()
.map(|f| Self::from_ro(audit, f, qs, ndepth, elems))
.collect::<Result<Vec<_>, _>>()?,
)
}
ProtoFilter::AndNot(l) => {
*elems = (*elems)
.checked_sub(1)
.ok_or(OperationError::ResourceLimit)?;
FilterComp::AndNot(Box::new(Self::from_ro(audit, l, qs, ndepth, elems)?))
}
ProtoFilter::SelfUUID => FilterComp::SelfUUID, ProtoFilter::SelfUUID => FilterComp::SelfUUID,
}) })
} }
@ -691,7 +720,10 @@ impl FilterComp {
audit: &mut AuditScope, audit: &mut AuditScope,
f: &ProtoFilter, f: &ProtoFilter,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
depth: usize,
elems: &mut usize,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let ndepth = depth.checked_sub(1).ok_or(OperationError::ResourceLimit)?;
Ok(match f { Ok(match f {
ProtoFilter::Eq(a, v) => { ProtoFilter::Eq(a, v) => {
let nk = qs.get_schema().normalise_attr_name(a); let nk = qs.get_schema().normalise_attr_name(a);
@ -707,17 +739,33 @@ impl FilterComp {
let nk = qs.get_schema().normalise_attr_name(a); let nk = qs.get_schema().normalise_attr_name(a);
FilterComp::Pres(nk) FilterComp::Pres(nk)
} }
ProtoFilter::Or(l) => FilterComp::Or( ProtoFilter::Or(l) => {
l.iter() *elems = (*elems)
.map(|f| Self::from_rw(audit, f, qs)) .checked_sub(l.len())
.collect::<Result<Vec<_>, _>>()?, .ok_or(OperationError::ResourceLimit)?;
), FilterComp::Or(
ProtoFilter::And(l) => FilterComp::And( l.iter()
l.iter() .map(|f| Self::from_rw(audit, f, qs, ndepth, elems))
.map(|f| Self::from_rw(audit, f, qs)) .collect::<Result<Vec<_>, _>>()?,
.collect::<Result<Vec<_>, _>>()?, )
), }
ProtoFilter::AndNot(l) => FilterComp::AndNot(Box::new(Self::from_rw(audit, l, qs)?)), ProtoFilter::And(l) => {
*elems = (*elems)
.checked_sub(l.len())
.ok_or(OperationError::ResourceLimit)?;
FilterComp::And(
l.iter()
.map(|f| Self::from_rw(audit, f, qs, ndepth, elems))
.collect::<Result<Vec<_>, _>>()?,
)
}
ProtoFilter::AndNot(l) => {
*elems = (*elems)
.checked_sub(1)
.ok_or(OperationError::ResourceLimit)?;
FilterComp::AndNot(Box::new(Self::from_rw(audit, l, qs, ndepth, elems)?))
}
ProtoFilter::SelfUUID => FilterComp::SelfUUID, ProtoFilter::SelfUUID => FilterComp::SelfUUID,
}) })
} }
@ -726,19 +774,38 @@ impl FilterComp {
audit: &mut AuditScope, audit: &mut AuditScope,
f: &LdapFilter, f: &LdapFilter,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
depth: usize,
elems: &mut usize,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let ndepth = depth.checked_sub(1).ok_or(OperationError::ResourceLimit)?;
Ok(match f { Ok(match f {
LdapFilter::And(l) => FilterComp::And( LdapFilter::And(l) => {
l.iter() *elems = (*elems)
.map(|f| Self::from_ldap_ro(audit, f, qs)) .checked_sub(l.len())
.collect::<Result<Vec<_>, _>>()?, .ok_or(OperationError::ResourceLimit)?;
), FilterComp::And(
LdapFilter::Or(l) => FilterComp::Or( l.iter()
l.iter() .map(|f| Self::from_ldap_ro(audit, f, qs, ndepth, elems))
.map(|f| Self::from_ldap_ro(audit, f, qs)) .collect::<Result<Vec<_>, _>>()?,
.collect::<Result<Vec<_>, _>>()?, )
), }
LdapFilter::Not(l) => FilterComp::AndNot(Box::new(Self::from_ldap_ro(audit, l, qs)?)), LdapFilter::Or(l) => {
*elems = (*elems)
.checked_sub(l.len())
.ok_or(OperationError::ResourceLimit)?;
FilterComp::Or(
l.iter()
.map(|f| Self::from_ldap_ro(audit, f, qs, ndepth, elems))
.collect::<Result<Vec<_>, _>>()?,
)
}
LdapFilter::Not(l) => {
*elems = (*elems)
.checked_sub(1)
.ok_or(OperationError::ResourceLimit)?;
FilterComp::AndNot(Box::new(Self::from_ldap_ro(audit, l, qs, ndepth, elems)?))
}
LdapFilter::Equality(a, v) => { LdapFilter::Equality(a, v) => {
let a = ldap_attr_filter_map(a); let a = ldap_attr_filter_map(a);
let v = qs.clone_partialvalue(audit, a.as_str(), v)?; let v = qs.clone_partialvalue(audit, a.as_str(), v)?;
@ -1126,12 +1193,18 @@ impl FilterResolved {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::entry::{Entry, EntryNew, EntrySealed}; use crate::entry::{Entry, EntryInit, EntryNew, EntrySealed};
use crate::filter::{Filter, FilterInvalid}; use crate::event::{CreateEvent, Event};
use crate::value::PartialValue; use crate::filter::{Filter, FilterInvalid, FILTER_DEPTH_MAX};
use crate::server::QueryServerTransaction;
use crate::value::{PartialValue, Value};
use std::cmp::{Ordering, PartialOrd}; use std::cmp::{Ordering, PartialOrd};
use std::collections::BTreeSet; use std::collections::BTreeSet;
use kanidm_proto::v1::Filter as ProtoFilter;
use kanidm_proto::v1::OperationError;
use ldap3_server::simple::LdapFilter;
#[test] #[test]
fn test_filter_simple() { fn test_filter_simple() {
// Test construction. // Test construction.
@ -1586,4 +1659,147 @@ mod tests {
assert!(f_t2a.get_attr_set() == f_expect); assert!(f_t2a.get_attr_set() == f_expect);
} }
#[test]
fn test_filter_resolve_value() {
run_test!(|server: &QueryServer, audit: &mut AuditScope| {
let server_txn = server.write(duration_from_epoch_now());
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"attrs": {
"class": ["object", "person", "account"],
"name": ["testperson1"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"description": ["testperson"],
"displayname": ["testperson1"]
}
}"#,
);
let e2: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"attrs": {
"class": ["object", "person"],
"name": ["testperson2"],
"uuid": ["a67c0c71-0b35-4218-a6b0-22d23d131d27"],
"description": ["testperson"],
"displayname": ["testperson2"]
}
}"#,
);
let e_ts: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"attrs": {
"class": ["tombstone", "object"],
"uuid": ["9557f49c-97a5-4277-a9a5-097d17eb8317"]
}
}"#,
);
let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]);
let cr = server_txn.create(audit, &ce);
assert!(cr.is_ok());
// Resolving most times should yield expected results
let t1 = Value::new_utf8s("teststring");
let r1 = server_txn.resolve_value(audit, &t1);
assert!(r1 == Ok("teststring".to_string()));
// Resolve UUID with matching spn
let t_uuid = Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap();
let r_uuid = server_txn.resolve_value(audit, &t_uuid);
debug!("{:?}", r_uuid);
assert!(r_uuid == Ok("testperson1@example.com".to_string()));
// Resolve UUID with matching name
let t_uuid = Value::new_refer_s("a67c0c71-0b35-4218-a6b0-22d23d131d27").unwrap();
let r_uuid = server_txn.resolve_value(audit, &t_uuid);
debug!("{:?}", r_uuid);
assert!(r_uuid == Ok("testperson2".to_string()));
// Resolve UUID non-exist
let t_uuid_non = Value::new_refer_s("b83e98f0-3d2e-41d2-9796-d8d993289c86").unwrap();
let r_uuid_non = server_txn.resolve_value(audit, &t_uuid_non);
debug!("{:?}", r_uuid_non);
assert!(r_uuid_non == Ok("b83e98f0-3d2e-41d2-9796-d8d993289c86".to_string()));
// Resolve UUID to tombstone/recycled (same an non-exst)
let t_uuid_ts = Value::new_refer_s("9557f49c-97a5-4277-a9a5-097d17eb8317").unwrap();
let r_uuid_ts = server_txn.resolve_value(audit, &t_uuid_ts);
debug!("{:?}", r_uuid_ts);
assert!(r_uuid_ts == Ok("9557f49c-97a5-4277-a9a5-097d17eb8317".to_string()));
})
}
#[test]
fn test_filter_depth_limits() {
run_test!(|server: &QueryServer, audit: &mut AuditScope| {
let r_txn = server.read();
let mut inv_proto = ProtoFilter::Pres("class".to_string());
for _i in 0..(FILTER_DEPTH_MAX + 1) {
inv_proto = ProtoFilter::And(vec![inv_proto]);
}
let mut inv_ldap = LdapFilter::Present("class".to_string());
for _i in 0..(FILTER_DEPTH_MAX + 1) {
inv_ldap = LdapFilter::And(vec![inv_ldap]);
}
let ev = Event::from_internal();
// Test proto + read
let res = Filter::from_ro(audit, &ev, &inv_proto, &r_txn);
assert!(res == Err(OperationError::ResourceLimit));
// ldap
let res = Filter::from_ldap_ro(audit, &ev, &inv_ldap, &r_txn);
assert!(res == Err(OperationError::ResourceLimit));
// Can only have one db conn at a time.
std::mem::drop(r_txn);
// proto + write
let wr_txn = server.write(duration_from_epoch_now());
let res = Filter::from_rw(audit, &ev, &inv_proto, &wr_txn);
assert!(res == Err(OperationError::ResourceLimit));
})
}
#[test]
fn test_filter_max_element_limits() {
run_test!(|server: &QueryServer, audit: &mut AuditScope| {
const LIMIT: usize = 4;
let r_txn = server.read();
let inv_proto = ProtoFilter::And(
(0..(LIMIT * 2))
.map(|_| ProtoFilter::Pres("class".to_string()))
.collect(),
);
let inv_ldap = LdapFilter::And(
(0..(LIMIT * 2))
.map(|_| LdapFilter::Present("class".to_string()))
.collect(),
);
let mut ev = Event::from_internal();
ev.limits.filter_max_elements = LIMIT;
// Test proto + read
let res = Filter::from_ro(audit, &ev, &inv_proto, &r_txn);
assert!(res == Err(OperationError::ResourceLimit));
// ldap
let res = Filter::from_ldap_ro(audit, &ev, &inv_ldap, &r_txn);
assert!(res == Err(OperationError::ResourceLimit));
// Can only have one db conn at a time.
std::mem::drop(r_txn);
// proto + write
let wr_txn = server.write(duration_from_epoch_now());
let res = Filter::from_rw(audit, &ev, &inv_proto, &wr_txn);
assert!(res == Err(OperationError::ResourceLimit));
})
}
} }

View file

@ -128,7 +128,6 @@ impl Account {
// This could consume self? // This could consume self?
// The cred handler provided is what authenticated this user, so we can use it to // The cred handler provided is what authenticated this user, so we can use it to
// process what the proper claims should be. // process what the proper claims should be.
// Get the claims from the cred_h // Get the claims from the cred_h
Some(UserAuthToken { Some(UserAuthToken {
@ -139,6 +138,11 @@ impl Account {
// application: None, // application: None,
groups: self.groups.iter().map(|g| g.to_proto()).collect(), groups: self.groups.iter().map(|g| g.to_proto()).collect(),
claims: claims.iter().map(|c| c.to_proto()).collect(), claims: claims.iter().map(|c| c.to_proto()).collect(),
// What's the best way to get access to these limits with regard to claims/other?
lim_uidx: false,
lim_rmax: 128,
lim_pmax: 256,
lim_fmax: 32,
}) })
} }

View file

@ -29,7 +29,7 @@ impl PasswordChangeEvent {
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
msg: IdmAccountSetPasswordMessage, msg: IdmAccountSetPasswordMessage,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let e = Event::from_rw_uat(audit, qs, msg.uat)?; let e = Event::from_rw_uat(audit, qs, msg.uat.as_ref())?;
let u = *e.get_uuid().ok_or(OperationError::InvalidState)?; let u = *e.get_uuid().ok_or(OperationError::InvalidState)?;
Ok(PasswordChangeEvent { Ok(PasswordChangeEvent {
@ -43,7 +43,7 @@ impl PasswordChangeEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
cleartext: String, cleartext: String,
appid: Option<String>, appid: Option<String>,
@ -78,7 +78,7 @@ impl UnixPasswordChangeEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
cleartext: String, cleartext: String,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
@ -103,7 +103,7 @@ impl GeneratePasswordEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
appid: Option<String>, appid: Option<String>,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
@ -127,7 +127,7 @@ impl RegenerateRadiusSecretEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let e = Event::from_rw_uat(audit, qs, uat)?; let e = Event::from_rw_uat(audit, qs, uat)?;
@ -153,7 +153,7 @@ impl RadiusAuthTokenEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let e = Event::from_ro_uat(audit, qs, uat)?; let e = Event::from_ro_uat(audit, qs, uat)?;
@ -179,7 +179,7 @@ impl UnixUserTokenEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let e = Event::from_ro_uat(audit, qs, uat)?; let e = Event::from_ro_uat(audit, qs, uat)?;
@ -205,7 +205,7 @@ impl UnixGroupTokenEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
let e = Event::from_ro_uat(audit, qs, uat)?; let e = Event::from_ro_uat(audit, qs, uat)?;
@ -249,7 +249,7 @@ impl UnixUserAuthEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerReadTransaction, qs: &QueryServerReadTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
cleartext: String, cleartext: String,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
@ -274,7 +274,7 @@ impl GenerateTOTPEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
label: String, label: String,
) -> Result<Self, OperationError> { ) -> Result<Self, OperationError> {
@ -311,7 +311,7 @@ impl VerifyTOTPEvent {
pub fn from_parts( pub fn from_parts(
audit: &mut AuditScope, audit: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
uat: Option<UserAuthToken>, uat: Option<&UserAuthToken>,
target: Uuid, target: Uuid,
session: Uuid, session: Uuid,
chal: u32, chal: u32,

View file

@ -272,10 +272,17 @@ impl<'a> IdmServerWriteTransaction<'a> {
if lae.target == *UUID_ANONYMOUS { if lae.target == *UUID_ANONYMOUS {
// TODO: #59 We should have checked if anonymous was locked by now! // TODO: #59 We should have checked if anonymous was locked by now!
let account = Account::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?; let account = Account::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?;
// Account must be anon, so we can gen the uat.
Ok(Some(LdapBoundToken { Ok(Some(LdapBoundToken {
spn: account.spn,
uuid: *UUID_ANONYMOUS, uuid: *UUID_ANONYMOUS,
effective_uuid: *UUID_ANONYMOUS, effective_uat: account
.to_userauthtoken(&[])
.ok_or(OperationError::InvalidState)
.map_err(|e| {
ladmin_error!(au, "Unable to generate effective_uat -> {:?}", e);
e
})?,
spn: account.spn,
})) }))
} else { } else {
let account = let account =
@ -284,10 +291,26 @@ impl<'a> IdmServerWriteTransaction<'a> {
.verify_unix_credential(au, lae.cleartext.as_str())? .verify_unix_credential(au, lae.cleartext.as_str())?
.is_some() .is_some()
{ {
// Get the anon uat
let anon_entry = self
.qs_read
.internal_search_uuid(au, &UUID_ANONYMOUS)
.map_err(|e| {
ladmin_error!(au, "Failed to find effective uat for auth ldap -> {:?}", e);
e
})?;
let anon_account = Account::try_from_entry_ro(au, &anon_entry, &mut self.qs_read)?;
Ok(Some(LdapBoundToken { Ok(Some(LdapBoundToken {
spn: account.spn, spn: account.spn,
uuid: account.uuid, uuid: account.uuid,
effective_uuid: *UUID_ANONYMOUS, effective_uat: anon_account
.to_userauthtoken(&[])
.ok_or(OperationError::InvalidState)
.map_err(|e| {
ladmin_error!(au, "Unable to generate effective_uat -> {:?}", e);
e
})?,
})) }))
} else { } else {
Ok(None) Ok(None)

View file

@ -1,11 +1,10 @@
use crate::audit::AuditScope; use crate::audit::AuditScope;
use crate::constants::{STR_UUID_DOMAIN_INFO, UUID_ANONYMOUS, UUID_DOMAIN_INFO}; use crate::constants::{STR_UUID_DOMAIN_INFO, UUID_ANONYMOUS, UUID_DOMAIN_INFO};
use crate::event::SearchEvent; use crate::event::SearchEvent;
use crate::filter::Filter;
use crate::idm::event::LdapAuthEvent; use crate::idm::event::LdapAuthEvent;
use crate::idm::server::IdmServer; use crate::idm::server::IdmServer;
use crate::server::QueryServerTransaction; use crate::server::QueryServerTransaction;
use kanidm_proto::v1::OperationError; use kanidm_proto::v1::{OperationError, UserAuthToken};
use ldap3_server::simple::*; use ldap3_server::simple::*;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::iter; use std::iter;
@ -31,7 +30,7 @@ pub struct LdapBoundToken {
pub spn: String, pub spn: String,
pub uuid: Uuid, pub uuid: Uuid,
// For now, always anonymous // For now, always anonymous
pub effective_uuid: Uuid, pub effective_uat: UserAuthToken,
} }
pub struct LdapServer { pub struct LdapServer {
@ -236,13 +235,6 @@ impl LdapServer {
ladmin_info!(au, "LDAP Search Filter -> {:?}", lfilter); ladmin_info!(au, "LDAP Search Filter -> {:?}", lfilter);
// Kanidm Filter from LdapFilter
let filter =
Filter::from_ldap_ro(au, &lfilter, &idm_read.qs_read).map_err(|e| {
lrequest_error!(au, "invalid ldap filter {:?}", e);
e
})?;
// Build the event, with the permissions from effective_uuid // Build the event, with the permissions from effective_uuid
// (should always be anonymous at the moment) // (should always be anonymous at the moment)
// ! Remember, searchEvent wraps to ignore hidden for us. // ! Remember, searchEvent wraps to ignore hidden for us.
@ -250,10 +242,14 @@ impl LdapServer {
SearchEvent::new_ext_impersonate_uuid( SearchEvent::new_ext_impersonate_uuid(
au, au,
&idm_read.qs_read, &idm_read.qs_read,
&uat.effective_uuid, &uat.effective_uat,
&filter, &lfilter,
attrs, attrs,
) )
})
.map_err(|e| {
ladmin_error!(au, "failed to create search event -> {:?}", e);
e
})?; })?;
let res = idm_read.qs_read.search_ext(au, &se).map_err(|e| { let res = idm_read.qs_read.search_ext(au, &se).map_err(|e| {

View file

@ -1,4 +1,4 @@
#![deny(warnings)] // #![deny(warnings)]
#![warn(unused_extern_crates)] #![warn(unused_extern_crates)]
#![deny(clippy::unwrap_used)] #![deny(clippy::unwrap_used)]
#![deny(clippy::expect_used)] #![deny(clippy::expect_used)]

View file

@ -145,14 +145,20 @@ pub trait QueryServerTransaction {
e e
})?; })?;
let lims = se.get_limits();
// NOTE: We currently can't build search plugins due to the inability to hand // NOTE: We currently can't build search plugins due to the inability to hand
// the QS wr/ro to the plugin trait. However, there shouldn't be a need for search // the QS wr/ro to the plugin trait. However, there shouldn't be a need for search
// plugis, because all data transforms should be in the write path. // plugis, because all data transforms should be in the write path.
let res = self.get_be_txn().search(au, &vfr).map(|r| r).map_err(|e| { let res = self
ladmin_error!(au, "backend failure -> {:?}", e); .get_be_txn()
OperationError::Backend .search(au, lims, &vfr)
})?; .map(|r| r)
.map_err(|e| {
ladmin_error!(au, "backend failure -> {:?}", e);
OperationError::Backend
})?;
// Apply ACP before we let the plugins "have at it". // Apply ACP before we let the plugins "have at it".
// WARNING; for external searches this is NOT the only // WARNING; for external searches this is NOT the only
@ -176,7 +182,9 @@ pub trait QueryServerTransaction {
e e
})?; })?;
self.get_be_txn().exists(au, &vfr).map_err(|e| { let lims = ee.get_limits();
self.get_be_txn().exists(au, &lims, &vfr).map_err(|e| {
ladmin_error!(au, "backend failure -> {:?}", e); ladmin_error!(au, "backend failure -> {:?}", e);
OperationError::Backend OperationError::Backend
}) })
@ -3251,75 +3259,6 @@ mod tests {
}) })
} }
#[test]
fn test_qs_resolve_value() {
run_test!(|server: &QueryServer, audit: &mut AuditScope| {
let server_txn = server.write(duration_from_epoch_now());
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"attrs": {
"class": ["object", "person", "account"],
"name": ["testperson1"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"description": ["testperson"],
"displayname": ["testperson1"]
}
}"#,
);
let e2: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"attrs": {
"class": ["object", "person"],
"name": ["testperson2"],
"uuid": ["a67c0c71-0b35-4218-a6b0-22d23d131d27"],
"description": ["testperson"],
"displayname": ["testperson2"]
}
}"#,
);
let e_ts: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{
"attrs": {
"class": ["tombstone", "object"],
"uuid": ["9557f49c-97a5-4277-a9a5-097d17eb8317"]
}
}"#,
);
let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]);
let cr = server_txn.create(audit, &ce);
assert!(cr.is_ok());
// Resolving most times should yield expected results
let t1 = Value::new_utf8s("teststring");
let r1 = server_txn.resolve_value(audit, &t1);
assert!(r1 == Ok("teststring".to_string()));
// Resolve UUID with matching spn
let t_uuid = Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap();
let r_uuid = server_txn.resolve_value(audit, &t_uuid);
debug!("{:?}", r_uuid);
assert!(r_uuid == Ok("testperson1@example.com".to_string()));
// Resolve UUID with matching name
let t_uuid = Value::new_refer_s("a67c0c71-0b35-4218-a6b0-22d23d131d27").unwrap();
let r_uuid = server_txn.resolve_value(audit, &t_uuid);
debug!("{:?}", r_uuid);
assert!(r_uuid == Ok("testperson2".to_string()));
// Resolve UUID non-exist
let t_uuid_non = Value::new_refer_s("b83e98f0-3d2e-41d2-9796-d8d993289c86").unwrap();
let r_uuid_non = server_txn.resolve_value(audit, &t_uuid_non);
debug!("{:?}", r_uuid_non);
assert!(r_uuid_non == Ok("b83e98f0-3d2e-41d2-9796-d8d993289c86".to_string()));
// Resolve UUID to tombstone/recycled (same an non-exst)
let t_uuid_ts = Value::new_refer_s("9557f49c-97a5-4277-a9a5-097d17eb8317").unwrap();
let r_uuid_ts = server_txn.resolve_value(audit, &t_uuid_ts);
debug!("{:?}", r_uuid_ts);
assert!(r_uuid_ts == Ok("9557f49c-97a5-4277-a9a5-097d17eb8317".to_string()));
})
}
#[test] #[test]
fn test_qs_dynamic_schema_class() { fn test_qs_dynamic_schema_class() {
run_test!(|server: &QueryServer, audit: &mut AuditScope| { run_test!(|server: &QueryServer, audit: &mut AuditScope| {