mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 12:37:00 +01:00
Entry Arc Tracking to reduce memory footprint (#579)
This commit is contained in:
parent
d2bb9cead4
commit
2fbc92668c
|
@ -23,6 +23,7 @@ use std::collections::BTreeSet;
|
|||
// use hashbrown::HashSet;
|
||||
use std::cell::Cell;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced, EntrySealed};
|
||||
|
@ -498,8 +499,8 @@ pub trait AccessControlsTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
se: &SearchEvent,
|
||||
entries: Vec<Entry<EntrySealed, EntryCommitted>>,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
entries: Vec<Arc<EntrySealedCommitted>>,
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
// If this is an internal search, return our working set.
|
||||
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.ident.origin {
|
||||
IdentType::Internal => {
|
||||
|
@ -556,7 +557,7 @@ pub trait AccessControlsTransaction<'a> {
|
|||
let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set();
|
||||
|
||||
// For each entry
|
||||
let allowed_entries: Vec<Entry<EntrySealed, EntryCommitted>> = spanned!(
|
||||
let allowed_entries: Vec<Arc<EntrySealedCommitted>> = spanned!(
|
||||
"access::search_filter_entries<allowed_entries>",
|
||||
{
|
||||
lperf_segment!(
|
||||
|
@ -640,7 +641,7 @@ pub trait AccessControlsTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
se: &SearchEvent,
|
||||
entries: Vec<Entry<EntrySealed, EntryCommitted>>,
|
||||
entries: Vec<Arc<EntrySealedCommitted>>,
|
||||
) -> Result<Vec<Entry<EntryReduced, EntryCommitted>>, OperationError> {
|
||||
// If this is an internal search, do nothing. This can occur in some test cases ONLY
|
||||
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.ident.origin {
|
||||
|
@ -651,7 +652,7 @@ pub trait AccessControlsTransaction<'a> {
|
|||
// In tests we just push everything back.
|
||||
return Ok(entries
|
||||
.into_iter()
|
||||
.map(|e| unsafe { e.into_reduced() })
|
||||
.map(|e| unsafe { e.as_ref().clone().into_reduced() })
|
||||
.collect());
|
||||
} else {
|
||||
// In production we can't risk leaking data here, so we return
|
||||
|
@ -838,7 +839,7 @@ pub trait AccessControlsTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
me: &ModifyEvent,
|
||||
entries: &[Entry<EntrySealed, EntryCommitted>],
|
||||
entries: &[Arc<EntrySealedCommitted>],
|
||||
) -> Result<bool, OperationError> {
|
||||
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &me.ident.origin {
|
||||
IdentType::Internal => {
|
||||
|
@ -1164,7 +1165,7 @@ pub trait AccessControlsTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
de: &DeleteEvent,
|
||||
entries: &[Entry<EntrySealed, EntryCommitted>],
|
||||
entries: &[Arc<EntrySealedCommitted>],
|
||||
) -> Result<bool, OperationError> {
|
||||
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &de.ident.origin {
|
||||
IdentType::Internal => {
|
||||
|
@ -1478,6 +1479,7 @@ mod tests {
|
|||
};
|
||||
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent};
|
||||
use crate::prelude::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
macro_rules! acp_from_entry_err {
|
||||
(
|
||||
|
@ -1945,8 +1947,8 @@ mod tests {
|
|||
);
|
||||
let ev1 = unsafe { e1.into_sealed_committed() };
|
||||
|
||||
let expect = vec![ev1.clone()];
|
||||
let entries = vec![ev1];
|
||||
let expect = vec![Arc::new(ev1.clone())];
|
||||
let entries = vec![Arc::new(ev1)];
|
||||
|
||||
// This acp basically is "allow access to stuff, but not this".
|
||||
test_acp_search!(
|
||||
|
@ -1974,12 +1976,12 @@ mod tests {
|
|||
let e2: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON2);
|
||||
let ev2 = unsafe { e2.into_sealed_committed() };
|
||||
|
||||
let r_set = vec![ev1.clone(), ev2.clone()];
|
||||
let r_set = vec![Arc::new(ev1.clone()), Arc::new(ev2.clone())];
|
||||
|
||||
let se_admin = unsafe {
|
||||
SearchEvent::new_impersonate_entry_ser(JSON_ADMIN_V1, filter_all!(f_pres("name")))
|
||||
};
|
||||
let ex_admin = vec![ev1.clone()];
|
||||
let ex_admin = vec![Arc::new(ev1.clone())];
|
||||
|
||||
let se_anon = unsafe {
|
||||
SearchEvent::new_impersonate_entry_ser(JSON_ANONYMOUS_V1, filter_all!(f_pres("name")))
|
||||
|
@ -2057,7 +2059,7 @@ mod tests {
|
|||
// class and uuid being present.
|
||||
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
|
||||
let ev1 = unsafe { e1.into_sealed_committed() };
|
||||
let r_set = vec![ev1.clone()];
|
||||
let r_set = vec![Arc::new(ev1.clone())];
|
||||
|
||||
let ex1: Entry<EntryInit, EntryNew> =
|
||||
Entry::unsafe_from_entry_str(JSON_TESTPERSON1_REDUCED);
|
||||
|
@ -2096,7 +2098,7 @@ mod tests {
|
|||
// class and uuid being present.
|
||||
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
|
||||
let ev1 = unsafe { e1.into_sealed_committed() };
|
||||
let r_set = vec![ev1.clone()];
|
||||
let r_set = vec![Arc::new(ev1.clone())];
|
||||
|
||||
let ex1: Entry<EntryInit, EntryNew> =
|
||||
Entry::unsafe_from_entry_str(JSON_TESTPERSON1_REDUCED);
|
||||
|
@ -2159,7 +2161,7 @@ mod tests {
|
|||
fn test_access_enforce_modify() {
|
||||
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
|
||||
let ev1 = unsafe { e1.into_sealed_committed() };
|
||||
let r_set = vec![ev1.clone()];
|
||||
let r_set = vec![Arc::new(ev1.clone())];
|
||||
|
||||
// Name present
|
||||
let me_pres = unsafe {
|
||||
|
@ -2443,7 +2445,7 @@ mod tests {
|
|||
fn test_access_enforce_delete() {
|
||||
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
|
||||
let ev1 = unsafe { e1.into_sealed_committed() };
|
||||
let r_set = vec![ev1.clone()];
|
||||
let r_set = vec![Arc::new(ev1.clone())];
|
||||
|
||||
let de_admin = unsafe {
|
||||
DeleteEvent::new_impersonate_entry_ser(
|
||||
|
|
|
@ -18,6 +18,7 @@ use hashbrown::HashMap;
|
|||
use std::collections::BTreeSet;
|
||||
use std::convert::TryInto;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -49,7 +50,7 @@ enum NameCacheValue {
|
|||
|
||||
pub struct IdlArcSqlite {
|
||||
db: IdlSqlite,
|
||||
entry_cache: ARCache<u64, Box<Entry<EntrySealed, EntryCommitted>>>,
|
||||
entry_cache: ARCache<u64, Arc<EntrySealedCommitted>>,
|
||||
idl_cache: ARCache<IdlCacheKey, Box<IDLBitRange>>,
|
||||
name_cache: ARCache<NameCacheKey, NameCacheValue>,
|
||||
op_ts_max: CowCell<Option<Duration>>,
|
||||
|
@ -59,7 +60,7 @@ pub struct IdlArcSqlite {
|
|||
|
||||
pub struct IdlArcSqliteReadTransaction<'a> {
|
||||
db: IdlSqliteReadTransaction,
|
||||
entry_cache: ARCacheReadTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>,
|
||||
entry_cache: ARCacheReadTxn<'a, u64, Arc<EntrySealedCommitted>>,
|
||||
idl_cache: ARCacheReadTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
|
||||
name_cache: ARCacheReadTxn<'a, NameCacheKey, NameCacheValue>,
|
||||
allids: CowCellReadTxn<IDLBitRange>,
|
||||
|
@ -67,7 +68,7 @@ pub struct IdlArcSqliteReadTransaction<'a> {
|
|||
|
||||
pub struct IdlArcSqliteWriteTransaction<'a> {
|
||||
db: IdlSqliteWriteTransaction,
|
||||
entry_cache: ARCacheWriteTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>,
|
||||
entry_cache: ARCacheWriteTxn<'a, u64, Arc<EntrySealedCommitted>>,
|
||||
idl_cache: ARCacheWriteTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
|
||||
name_cache: ARCacheWriteTxn<'a, NameCacheKey, NameCacheValue>,
|
||||
op_ts_max: CowCellWriteTxn<'a, Option<Duration>>,
|
||||
|
@ -83,7 +84,7 @@ macro_rules! get_identry {
|
|||
$is_read_op:expr
|
||||
) => {{
|
||||
spanned!("be::idl_arc_sqlite::get_identry", {
|
||||
let mut result: Vec<Entry<_, _>> = Vec::new();
|
||||
let mut result: Vec<Arc<EntrySealedCommitted>> = Vec::new();
|
||||
match $idl {
|
||||
IdList::Partial(idli) | IdList::PartialThreshold(idli) | IdList::Indexed(idli) => {
|
||||
let mut nidl = IDLBitRange::new();
|
||||
|
@ -92,7 +93,7 @@ macro_rules! get_identry {
|
|||
// For all the id's in idl.
|
||||
// is it in the cache?
|
||||
match $self.entry_cache.get(&i) {
|
||||
Some(eref) => result.push(eref.as_ref().clone()),
|
||||
Some(eref) => result.push(eref.clone()),
|
||||
None => unsafe { nidl.push_id(i) },
|
||||
}
|
||||
});
|
||||
|
@ -103,7 +104,7 @@ macro_rules! get_identry {
|
|||
// Clone everything from db_result into the cache.
|
||||
if $is_read_op {
|
||||
db_result.iter().for_each(|e| {
|
||||
$self.entry_cache.insert(e.get_id(), Box::new(e.clone()));
|
||||
$self.entry_cache.insert(e.get_id(), e.clone());
|
||||
});
|
||||
}
|
||||
// Merge the two vecs
|
||||
|
@ -119,7 +120,7 @@ macro_rules! get_identry {
|
|||
(&idli)
|
||||
.into_iter()
|
||||
.for_each(|i| match $self.entry_cache.get(&i) {
|
||||
Some(eref) => result.push(eref.as_ref().clone()),
|
||||
Some(eref) => result.push(eref.clone()),
|
||||
None => unsafe { nidl.push_id(i) },
|
||||
});
|
||||
|
||||
|
@ -324,7 +325,7 @@ pub trait IdlArcSqliteTransaction {
|
|||
fn get_identry(
|
||||
&mut self,
|
||||
idl: &IdList,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError>;
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError>;
|
||||
|
||||
// ! TRACING INTEGRATED
|
||||
fn get_identry_raw(&self, idl: &IdList) -> Result<Vec<IdRawEntry>, OperationError>;
|
||||
|
@ -379,7 +380,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
|
|||
fn get_identry(
|
||||
&mut self,
|
||||
idl: &IdList,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
get_identry!(self, idl, true)
|
||||
}
|
||||
|
||||
|
@ -470,7 +471,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
|
|||
fn get_identry(
|
||||
&mut self,
|
||||
idl: &IdList,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
get_identry!(self, idl, false)
|
||||
}
|
||||
|
||||
|
@ -665,7 +666,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
|
|||
} else {
|
||||
(*self.allids).insert_id(e.get_id());
|
||||
self.entry_cache
|
||||
.insert_dirty(e.get_id(), Box::new(e.clone()));
|
||||
.insert_dirty(e.get_id(), Arc::new(e.clone()));
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
|
|
|
@ -11,6 +11,7 @@ use rusqlite::Connection;
|
|||
use rusqlite::OpenFlags;
|
||||
use rusqlite::OptionalExtension;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::trace;
|
||||
use uuid::Uuid;
|
||||
|
@ -113,14 +114,11 @@ pub trait IdlSqliteTransaction {
|
|||
fn get_conn(&self) -> &r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
|
||||
|
||||
// ! TRACING INTEGRATED
|
||||
fn get_identry(
|
||||
&self,
|
||||
idl: &IdList,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
fn get_identry(&self, idl: &IdList) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
spanned!("be::idl_sqlite::get_identry", {
|
||||
self.get_identry_raw(idl)?
|
||||
.into_iter()
|
||||
.map(|ide| ide.into_entry())
|
||||
.map(|ide| ide.into_entry().map(|e| Arc::new(e)))
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
|
|
@ -542,7 +542,7 @@ pub trait BackendTransaction {
|
|||
au: &mut AuditScope,
|
||||
erl: &Limits,
|
||||
filt: &Filter<FilterValidResolved>,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
let _entered = trace_span!("be::search").entered();
|
||||
// Unlike DS, even if we don't get the index back, we can just pass
|
||||
// to the in-memory filter test and be done.
|
||||
|
@ -1014,7 +1014,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
pub fn modify(
|
||||
&self,
|
||||
au: &mut AuditScope,
|
||||
pre_entries: &[Entry<EntrySealed, EntryCommitted>],
|
||||
pre_entries: &[Arc<EntrySealedCommitted>],
|
||||
post_entries: &[Entry<EntrySealed, EntryCommitted>],
|
||||
) -> Result<(), OperationError> {
|
||||
lperf_trace_segment!(au, "be::modify", || {
|
||||
|
@ -1067,14 +1067,14 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
pre_entries
|
||||
.iter()
|
||||
.zip(post_entries.iter())
|
||||
.try_for_each(|(pre, post)| self.entry_index(au, Some(pre), Some(post)))
|
||||
.try_for_each(|(pre, post)| self.entry_index(au, Some(pre.as_ref()), Some(post)))
|
||||
})
|
||||
}
|
||||
|
||||
pub fn delete(
|
||||
&self,
|
||||
au: &mut AuditScope,
|
||||
entries: &[Entry<EntrySealed, EntryCommitted>],
|
||||
entries: &[Arc<EntrySealedCommitted>],
|
||||
) -> Result<(), OperationError> {
|
||||
lperf_trace_segment!(au, "be::delete", || {
|
||||
if entries.is_empty() {
|
||||
|
@ -1688,6 +1688,7 @@ mod tests {
|
|||
use idlset::v2::IDLBitRange;
|
||||
use std::fs;
|
||||
use std::iter::FromIterator;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
use super::super::audit::AuditScope;
|
||||
|
@ -1874,20 +1875,22 @@ mod tests {
|
|||
let r1 = results.remove(0);
|
||||
let r2 = results.remove(0);
|
||||
|
||||
let mut r1 = unsafe { r1.into_invalid() };
|
||||
let mut r2 = unsafe { r2.into_invalid() };
|
||||
let mut r1 = unsafe { r1.as_ref().clone().into_invalid() };
|
||||
let mut r2 = unsafe { r2.as_ref().clone().into_invalid() };
|
||||
|
||||
// Modify no id (err)
|
||||
// This is now impossible due to the state machine design.
|
||||
// However, with some unsafe ....
|
||||
let ue1 = unsafe { e1.clone().into_sealed_committed() };
|
||||
assert!(be.modify(audit, &vec![ue1.clone()], &vec![ue1]).is_err());
|
||||
assert!(be
|
||||
.modify(audit, &vec![Arc::new(ue1.clone())], &vec![ue1])
|
||||
.is_err());
|
||||
// Modify none
|
||||
assert!(be.modify(audit, &vec![], &vec![]).is_err());
|
||||
|
||||
// Make some changes to r1, r2.
|
||||
let pre1 = unsafe { r1.clone().into_sealed_committed() };
|
||||
let pre2 = unsafe { r2.clone().into_sealed_committed() };
|
||||
let pre1 = unsafe { Arc::new(r1.clone().into_sealed_committed()) };
|
||||
let pre2 = unsafe { Arc::new(r2.clone().into_sealed_committed()) };
|
||||
r1.add_ava("desc", Value::from("modified"));
|
||||
r2.add_ava("desc", Value::from("modified"));
|
||||
|
||||
|
@ -1908,7 +1911,7 @@ mod tests {
|
|||
assert!(be
|
||||
.modify(
|
||||
audit,
|
||||
&vec![vr1.clone(), pre2.clone()],
|
||||
&vec![Arc::new(vr1.clone()), pre2.clone()],
|
||||
&vec![vr1.clone(), vr2.clone()]
|
||||
)
|
||||
.is_ok());
|
||||
|
@ -1958,7 +1961,7 @@ mod tests {
|
|||
|
||||
// Delete one
|
||||
assert!(be.delete(audit, &vec![r1.clone()]).is_ok());
|
||||
assert!(!entry_exists!(audit, be, r1));
|
||||
assert!(!entry_exists!(audit, be, r1.as_ref()));
|
||||
|
||||
// delete none (no match filter)
|
||||
assert!(be.delete(audit, &vec![]).is_err());
|
||||
|
@ -1970,18 +1973,18 @@ mod tests {
|
|||
e4.add_ava("userid", Value::from("amy"));
|
||||
e4.add_ava("uuid", Value::from("21d816b5-1f6a-4696-b7c1-6ed06d22ed81"));
|
||||
|
||||
let ve4 = unsafe { e4.clone().into_sealed_committed() };
|
||||
let ve4 = unsafe { Arc::new(e4.clone().into_sealed_committed()) };
|
||||
|
||||
assert!(be.delete(audit, &vec![ve4]).is_err());
|
||||
|
||||
assert!(entry_exists!(audit, be, r2));
|
||||
assert!(entry_exists!(audit, be, r3));
|
||||
assert!(entry_exists!(audit, be, r2.as_ref()));
|
||||
assert!(entry_exists!(audit, be, r3.as_ref()));
|
||||
|
||||
// delete batch
|
||||
assert!(be.delete(audit, &vec![r2.clone(), r3.clone()]).is_ok());
|
||||
|
||||
assert!(!entry_exists!(audit, be, r2));
|
||||
assert!(!entry_exists!(audit, be, r3));
|
||||
assert!(!entry_exists!(audit, be, r2.as_ref()));
|
||||
assert!(!entry_exists!(audit, be, r3.as_ref()));
|
||||
|
||||
// delete none (no entries left)
|
||||
// see fn delete for why this is ok, not err
|
||||
|
@ -2233,6 +2236,7 @@ mod tests {
|
|||
let e1 = unsafe { e1.into_sealed_new() };
|
||||
|
||||
let rset = be.create(audit, vec![e1.clone()]).unwrap();
|
||||
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
|
||||
|
||||
idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1]));
|
||||
|
||||
|
@ -2303,6 +2307,7 @@ mod tests {
|
|||
.create(audit, vec![e1.clone(), e2.clone(), e3.clone()])
|
||||
.unwrap();
|
||||
rset.remove(1);
|
||||
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
|
||||
|
||||
// Now remove e1, e3.
|
||||
be.delete(audit, &rset).unwrap();
|
||||
|
@ -2353,8 +2358,9 @@ mod tests {
|
|||
let e1 = unsafe { e1.into_sealed_new() };
|
||||
|
||||
let rset = be.create(audit, vec![e1.clone()]).unwrap();
|
||||
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
|
||||
// Now, alter the new entry.
|
||||
let mut ce1 = unsafe { rset[0].clone().into_invalid() };
|
||||
let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() };
|
||||
// add something.
|
||||
ce1.add_ava("tb", Value::from("test"));
|
||||
// remove something.
|
||||
|
@ -2398,8 +2404,9 @@ mod tests {
|
|||
let e1 = unsafe { e1.into_sealed_new() };
|
||||
|
||||
let rset = be.create(audit, vec![e1.clone()]).unwrap();
|
||||
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
|
||||
// Now, alter the new entry.
|
||||
let mut ce1 = unsafe { rset[0].clone().into_invalid() };
|
||||
let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() };
|
||||
ce1.purge_ava("name");
|
||||
ce1.purge_ava("uuid");
|
||||
ce1.add_ava("name", Value::from("claire"));
|
||||
|
|
|
@ -49,6 +49,7 @@ use std::collections::BTreeSet;
|
|||
// use hashbrown::HashMap as Map;
|
||||
use hashbrown::HashMap;
|
||||
use smartstring::alias::String as AttrString;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
|
@ -90,7 +91,7 @@ lazy_static! {
|
|||
|
||||
pub type EntrySealedCommitted = Entry<EntrySealed, EntryCommitted>;
|
||||
pub type EntryInvalidCommitted = Entry<EntryInvalid, EntryCommitted>;
|
||||
pub type EntryTuple = (EntrySealedCommitted, EntryInvalidCommitted);
|
||||
pub type EntryTuple = (Arc<EntrySealedCommitted>, EntryInvalidCommitted);
|
||||
|
||||
// Entry should have a lifecycle of types. This is Raw (modifiable) and Entry (verified).
|
||||
// This way, we can move between them, but only certain actions are possible on either
|
||||
|
@ -1323,31 +1324,30 @@ impl Entry<EntrySealed, EntryCommitted> {
|
|||
/// Given a set of attributes that are allowed to be seen on this entry, process and remove
|
||||
/// all other values that are NOT allowed in this query.
|
||||
pub fn reduce_attributes(
|
||||
self,
|
||||
&self,
|
||||
allowed_attrs: &BTreeSet<&str>,
|
||||
) -> Entry<EntryReduced, EntryCommitted> {
|
||||
// Remove all attrs from our tree that are NOT in the allowed set.
|
||||
|
||||
let Entry {
|
||||
valid: s_valid,
|
||||
state: s_state,
|
||||
attrs: s_attrs,
|
||||
} = self;
|
||||
|
||||
let f_attrs: Map<_, _> = s_attrs
|
||||
.into_iter()
|
||||
let f_attrs: Map<_, _> = self
|
||||
.attrs
|
||||
.iter()
|
||||
.filter_map(|(k, v)| {
|
||||
if allowed_attrs.contains(k.as_str()) {
|
||||
Some((k, v))
|
||||
Some((k.clone(), v.clone()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let valid = EntryReduced {
|
||||
uuid: self.valid.uuid,
|
||||
};
|
||||
let state = self.state.clone();
|
||||
|
||||
Entry {
|
||||
valid: EntryReduced { uuid: s_valid.uuid },
|
||||
state: s_state,
|
||||
valid,
|
||||
state,
|
||||
attrs: f_attrs,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,6 +36,9 @@ use std::collections::BTreeSet;
|
|||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct SearchResult {
|
||||
entries: Vec<ProtoEntry>,
|
||||
|
@ -224,7 +227,7 @@ impl SearchEvent {
|
|||
|
||||
#[cfg(test)]
|
||||
pub unsafe fn new_impersonate_entry(
|
||||
e: Entry<EntrySealed, EntryCommitted>,
|
||||
e: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
filter: Filter<FilterInvalid>,
|
||||
) -> Self {
|
||||
SearchEvent {
|
||||
|
@ -251,7 +254,7 @@ impl SearchEvent {
|
|||
#[cfg(test)]
|
||||
/* Impersonate a request for recycled objects */
|
||||
pub unsafe fn new_rec_impersonate_entry(
|
||||
e: Entry<EntrySealed, EntryCommitted>,
|
||||
e: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
filter: Filter<FilterInvalid>,
|
||||
) -> Self {
|
||||
let filter_orig = filter.into_valid();
|
||||
|
@ -267,7 +270,7 @@ impl SearchEvent {
|
|||
#[cfg(test)]
|
||||
/* Impersonate an external request AKA filter ts + recycle */
|
||||
pub unsafe fn new_ext_impersonate_entry(
|
||||
e: Entry<EntrySealed, EntryCommitted>,
|
||||
e: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
filter: Filter<FilterInvalid>,
|
||||
) -> Self {
|
||||
SearchEvent {
|
||||
|
@ -457,7 +460,7 @@ impl DeleteEvent {
|
|||
|
||||
#[cfg(test)]
|
||||
pub unsafe fn new_impersonate_entry(
|
||||
e: Entry<EntrySealed, EntryCommitted>,
|
||||
e: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
filter: Filter<FilterInvalid>,
|
||||
) -> Self {
|
||||
DeleteEvent {
|
||||
|
@ -646,7 +649,7 @@ impl ModifyEvent {
|
|||
|
||||
#[cfg(test)]
|
||||
pub unsafe fn new_impersonate_entry(
|
||||
e: Entry<EntrySealed, EntryCommitted>,
|
||||
e: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
filter: Filter<FilterInvalid>,
|
||||
modlist: ModifyList<ModifyInvalid>,
|
||||
) -> Self {
|
||||
|
@ -954,7 +957,7 @@ impl ReviveRecycledEvent {
|
|||
|
||||
#[cfg(test)]
|
||||
pub unsafe fn new_impersonate_entry(
|
||||
e: Entry<EntrySealed, EntryCommitted>,
|
||||
e: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
filter: Filter<FilterInvalid>,
|
||||
) -> Self {
|
||||
ReviveRecycledEvent {
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
use crate::prelude::*;
|
||||
use kanidm_proto::v1::UserAuthToken;
|
||||
use std::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Limits on the resources a single event can consume. These are defined per-event
|
||||
|
@ -41,7 +42,7 @@ impl Limits {
|
|||
#[derive(Debug, Clone)]
|
||||
/// Metadata and the entry of the current Identity which is an external account/user.
|
||||
pub struct IdentUser {
|
||||
pub entry: Entry<EntrySealed, EntryCommitted>,
|
||||
pub entry: Arc<Entry<EntrySealed, EntryCommitted>>,
|
||||
// IpAddr?
|
||||
// Other metadata?
|
||||
}
|
||||
|
@ -105,7 +106,7 @@ impl Identity {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn from_impersonate_entry(entry: Entry<EntrySealed, EntryCommitted>) -> Self {
|
||||
pub fn from_impersonate_entry(entry: Arc<Entry<EntrySealed, EntryCommitted>>) -> Self {
|
||||
Identity {
|
||||
origin: IdentType::User(IdentUser { entry }),
|
||||
limits: Limits::unlimited(),
|
||||
|
@ -115,7 +116,7 @@ impl Identity {
|
|||
#[cfg(test)]
|
||||
pub unsafe fn from_impersonate_entry_ser(e: &str) -> Self {
|
||||
let ei: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(e);
|
||||
Self::from_impersonate_entry(ei.into_sealed_committed())
|
||||
Self::from_impersonate_entry(Arc::new(ei.into_sealed_committed()))
|
||||
}
|
||||
|
||||
pub fn from_impersonate(ident: &Self) -> Self {
|
||||
|
|
|
@ -45,7 +45,10 @@ macro_rules! try_from_account_e {
|
|||
e
|
||||
})?;
|
||||
// Now convert the group entries to groups.
|
||||
let groups: Result<Vec<_>, _> = ges.iter().map(Group::try_from_entry).collect();
|
||||
let groups: Result<Vec<_>, _> = ges
|
||||
.iter()
|
||||
.map(|e| Group::try_from_entry(e.as_ref()))
|
||||
.collect();
|
||||
groups.map_err(|e| {
|
||||
admin_error!(?e, "failed to transform group entries to groups");
|
||||
e
|
||||
|
|
|
@ -12,6 +12,7 @@ use fernet::Fernet;
|
|||
use hashbrown::HashMap;
|
||||
use kanidm_proto::v1::UserAuthToken;
|
||||
use openssl::sha;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use url::{Origin, Url};
|
||||
use webauthn_rs::base64_data::Base64UrlSafeData;
|
||||
|
@ -146,10 +147,10 @@ pub struct Oauth2ResourceServersWriteTransaction<'a> {
|
|||
inner: CowCellWriteTxn<'a, Oauth2RSInner>,
|
||||
}
|
||||
|
||||
impl TryFrom<Vec<EntrySealedCommitted>> for Oauth2ResourceServers {
|
||||
impl TryFrom<Vec<Arc<EntrySealedCommitted>>> for Oauth2ResourceServers {
|
||||
type Error = OperationError;
|
||||
|
||||
fn try_from(value: Vec<EntrySealedCommitted>) -> Result<Self, Self::Error> {
|
||||
fn try_from(value: Vec<Arc<EntrySealedCommitted>>) -> Result<Self, Self::Error> {
|
||||
let fernet =
|
||||
Fernet::new(&Fernet::generate_key()).ok_or(OperationError::CryptographyError)?;
|
||||
let oauth2rs = Oauth2ResourceServers {
|
||||
|
@ -181,7 +182,7 @@ impl Oauth2ResourceServers {
|
|||
}
|
||||
|
||||
impl<'a> Oauth2ResourceServersWriteTransaction<'a> {
|
||||
pub fn reload(&mut self, value: Vec<EntrySealedCommitted>) -> Result<(), OperationError> {
|
||||
pub fn reload(&mut self, value: Vec<Arc<EntrySealedCommitted>>) -> Result<(), OperationError> {
|
||||
let rs_set: Result<HashMap<_, _>, _> = value
|
||||
.into_iter()
|
||||
.map(|ent| {
|
||||
|
|
|
@ -541,7 +541,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
|
|||
// typing and functionality so we can assess what auth types can
|
||||
// continue, and helps to keep non-needed entry specific data
|
||||
// out of the session tree.
|
||||
let account = Account::try_from_entry_ro(au, &entry, &mut self.qs_read)?;
|
||||
let account = Account::try_from_entry_ro(au, entry.as_ref(), &mut self.qs_read)?;
|
||||
|
||||
// Check the credential that the auth_session will attempt to
|
||||
// use.
|
||||
|
@ -766,7 +766,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
|
|||
.qs_read
|
||||
.internal_search_uuid(au, &uae.target)
|
||||
.and_then(|account_entry| {
|
||||
UnixUserAccount::try_from_entry_ro(au, &account_entry, &mut self.qs_read)
|
||||
UnixUserAccount::try_from_entry_ro(au, account_entry.as_ref(), &mut self.qs_read)
|
||||
})
|
||||
.map_err(|e| {
|
||||
admin_error!("Failed to start auth unix -> {:?}", e);
|
||||
|
@ -850,7 +850,8 @@ impl<'a> IdmServerAuthTransaction<'a> {
|
|||
|
||||
// if anonymous
|
||||
if lae.target == *UUID_ANONYMOUS {
|
||||
let account = Account::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?;
|
||||
let account =
|
||||
Account::try_from_entry_ro(au, account_entry.as_ref(), &mut self.qs_read)?;
|
||||
// Check if the anon account has been locked.
|
||||
if !account.is_within_valid_time(ct) {
|
||||
lsecurity!(au, "Account is not within valid time period");
|
||||
|
@ -871,7 +872,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
|
|||
}))
|
||||
} else {
|
||||
let account =
|
||||
UnixUserAccount::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?;
|
||||
UnixUserAccount::try_from_entry_ro(au, account_entry.as_ref(), &mut self.qs_read)?;
|
||||
|
||||
if !account.is_within_valid_time(ct) {
|
||||
lsecurity!(au, "Account is not within valid time period");
|
||||
|
@ -917,7 +918,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
|
|||
e
|
||||
})?;
|
||||
let anon_account =
|
||||
Account::try_from_entry_ro(au, &anon_entry, &mut self.qs_read)?;
|
||||
Account::try_from_entry_ro(au, anon_entry.as_ref(), &mut self.qs_read)?;
|
||||
|
||||
Ok(Some(LdapBoundToken {
|
||||
spn: account.spn,
|
||||
|
|
|
@ -389,7 +389,7 @@ macro_rules! try_from_account_group_e {
|
|||
]));
|
||||
let ges: Vec<_> = $qs.internal_search($au, f)?;
|
||||
let groups: Result<Vec<_>, _> = iter::once(Ok(upg))
|
||||
.chain(ges.iter().map(UnixGroup::try_from_entry))
|
||||
.chain(ges.iter().map(|e| UnixGroup::try_from_entry(e.as_ref())))
|
||||
.collect();
|
||||
groups
|
||||
}
|
||||
|
|
|
@ -167,6 +167,8 @@ impl Plugin for AttrUnique {
|
|||
Err(e) => return vec![e],
|
||||
};
|
||||
|
||||
let all_cand: Vec<_> = all_cand.into_iter().map(|e| e.as_ref().clone()).collect();
|
||||
|
||||
let uniqueattrs = {
|
||||
let schema = qs.get_schema();
|
||||
schema.get_attributes_unique()
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
// As a result, we first need to run refint to clean up all dangling references, then memberof
|
||||
// fixes the graph of memberships
|
||||
|
||||
use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntrySealed};
|
||||
use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntrySealed, EntryTuple};
|
||||
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent};
|
||||
use crate::plugins::Plugin;
|
||||
use crate::prelude::*;
|
||||
|
@ -19,6 +19,7 @@ use crate::valueset::ValueSet;
|
|||
use kanidm_proto::v1::{ConsistencyError, OperationError};
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use std::sync::Arc;
|
||||
use uuid::Uuid;
|
||||
|
||||
lazy_static! {
|
||||
|
@ -28,10 +29,6 @@ lazy_static! {
|
|||
|
||||
pub struct MemberOf;
|
||||
|
||||
type EntrySealedCommitted = Entry<EntrySealed, EntryCommitted>;
|
||||
type EntryInvalidCommitted = Entry<EntryInvalid, EntryCommitted>;
|
||||
type EntryTuple = (EntrySealedCommitted, EntryInvalidCommitted);
|
||||
|
||||
fn do_memberof(
|
||||
au: &mut AuditScope,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
|
@ -239,7 +236,7 @@ impl Plugin for MemberOf {
|
|||
fn post_modify(
|
||||
au: &mut AuditScope,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
pre_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
|
||||
cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
_me: &ModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
|
|
|
@ -7,6 +7,7 @@ use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntrySealed};
|
|||
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent};
|
||||
use crate::prelude::*;
|
||||
use kanidm_proto::v1::{ConsistencyError, OperationError};
|
||||
use std::sync::Arc;
|
||||
use tracing::trace_span;
|
||||
|
||||
mod attrunique;
|
||||
|
@ -79,7 +80,7 @@ trait Plugin {
|
|||
au: &mut AuditScope,
|
||||
_qs: &QueryServerWriteTransaction,
|
||||
// List of what we modified that was valid?
|
||||
_pre_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
_pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
|
||||
_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
_ce: &ModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
|
@ -350,7 +351,7 @@ impl Plugins {
|
|||
pub fn run_post_modify(
|
||||
au: &mut AuditScope,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
pre_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
|
||||
cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
me: &ModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
|
|
|
@ -20,6 +20,7 @@ use crate::filter::f_eq;
|
|||
use crate::modify::Modify;
|
||||
use crate::schema::SchemaTransaction;
|
||||
use kanidm_proto::v1::{ConsistencyError, PluginError};
|
||||
use std::sync::Arc;
|
||||
|
||||
// NOTE: This *must* be after base.rs!!!
|
||||
|
||||
|
@ -125,7 +126,7 @@ impl Plugin for ReferentialIntegrity {
|
|||
fn post_modify(
|
||||
au: &mut AuditScope,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
_pre_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
_pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
|
||||
_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
me: &ModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
|
|
|
@ -9,6 +9,7 @@ use crate::event::{CreateEvent, ModifyEvent};
|
|||
use crate::value::PartialValue;
|
||||
// use crate::value::{PartialValue, Value};
|
||||
use kanidm_proto::v1::{ConsistencyError, OperationError};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub struct Spn {}
|
||||
|
||||
|
@ -131,7 +132,7 @@ impl Plugin for Spn {
|
|||
au: &mut AuditScope,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
// List of what we modified that was valid?
|
||||
pre_cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
|
||||
cand: &[Entry<EntrySealed, EntryCommitted>],
|
||||
_ce: &ModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
|
|
|
@ -98,7 +98,7 @@ pub struct QueryServerWriteTransaction<'a> {
|
|||
|
||||
pub(crate) struct ModifyPartial<'a> {
|
||||
norm_cand: Vec<Entry<EntrySealed, EntryCommitted>>,
|
||||
pre_candidates: Vec<Entry<EntrySealed, EntryCommitted>>,
|
||||
pre_candidates: Vec<Arc<Entry<EntrySealed, EntryCommitted>>>,
|
||||
me: &'a ModifyEvent,
|
||||
}
|
||||
|
||||
|
@ -172,7 +172,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
se: &SearchEvent,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
spanned!("server::search", {
|
||||
lperf_segment!(audit, "server::search", || {
|
||||
if se.ident.is_internal() {
|
||||
|
@ -346,7 +346,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
filter: Filter<FilterInvalid>,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
spanned!("server::internal_search", {
|
||||
lperf_segment!(audit, "server::internal_search", || {
|
||||
let f_valid = filter
|
||||
|
@ -365,7 +365,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
f_valid: Filter<FilterValid>,
|
||||
f_intent_valid: Filter<FilterValid>,
|
||||
event: &Identity,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
spanned!("server::internal_search_valid", {
|
||||
lperf_segment!(audit, "server::internal_search_valid", || {
|
||||
let se = SearchEvent::new_impersonate(event, f_valid, f_intent_valid);
|
||||
|
@ -395,7 +395,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
filter: Filter<FilterInvalid>,
|
||||
filter_intent: Filter<FilterInvalid>,
|
||||
event: &Identity,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
let f_valid = filter
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
|
@ -433,7 +433,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
&self,
|
||||
audit: &mut AuditScope,
|
||||
uuid: &Uuid,
|
||||
) -> Result<Entry<EntrySealed, EntryCommitted>, OperationError> {
|
||||
) -> Result<Arc<EntrySealedCommitted>, OperationError> {
|
||||
spanned!("server::internal_search_uuid", {
|
||||
lperf_segment!(audit, "server::internal_search_uuid", || {
|
||||
let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid)));
|
||||
|
@ -486,7 +486,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
audit: &mut AuditScope,
|
||||
uuid: &Uuid,
|
||||
event: &Identity,
|
||||
) -> Result<Entry<EntrySealed, EntryCommitted>, OperationError> {
|
||||
) -> Result<Arc<EntrySealedCommitted>, OperationError> {
|
||||
spanned!("server::internal_search_uuid", {
|
||||
lperf_segment!(audit, "server::internal_search_uuid", || {
|
||||
let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid)));
|
||||
|
@ -788,7 +788,7 @@ pub trait QueryServerTransaction<'a> {
|
|||
fn get_oauth2rs_set(
|
||||
&self,
|
||||
audit: &mut AuditScope,
|
||||
) -> Result<Vec<EntrySealedCommitted>, OperationError> {
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
self.internal_search(
|
||||
audit,
|
||||
filter!(f_eq(
|
||||
|
@ -1303,7 +1303,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
// Invalidate and assign change id's
|
||||
.map(|er| er.clone().invalidate(self.cid.clone()))
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
ltrace!(audit, "delete: candidates -> {:?}", candidates);
|
||||
|
@ -1655,7 +1655,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// and the new modified ents.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
.map(|er| er.clone().invalidate(self.cid.clone()))
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
candidates
|
||||
|
@ -1740,17 +1740,21 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// schema or acp requires reload. Remember, this is a modify, so we need to check
|
||||
// pre and post cands.
|
||||
if !self.changed_schema.get() {
|
||||
self.changed_schema
|
||||
.set(norm_cand.iter().chain(pre_candidates.iter()).any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
}))
|
||||
self.changed_schema.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
}),
|
||||
)
|
||||
}
|
||||
if !self.changed_acp.get() {
|
||||
self.changed_acp.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter())
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP)),
|
||||
)
|
||||
}
|
||||
|
@ -1758,7 +1762,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
self.changed_oauth2.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter())
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)),
|
||||
)
|
||||
}
|
||||
|
@ -1768,8 +1772,8 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
(*cu).extend(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter())
|
||||
.map(|e| e.get_uuid()),
|
||||
.map(|e| e.get_uuid())
|
||||
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1822,7 +1826,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
self.search(audit, &se).map(|vs| {
|
||||
vs.into_iter()
|
||||
.map(|e| {
|
||||
let writeable = e.clone().invalidate(self.cid.clone());
|
||||
let writeable = e.as_ref().clone().invalidate(self.cid.clone());
|
||||
(e, writeable)
|
||||
})
|
||||
.collect()
|
||||
|
@ -1839,7 +1843,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
pub(crate) fn internal_batch_modify(
|
||||
&self,
|
||||
audit: &mut AuditScope,
|
||||
pre_candidates: Vec<Entry<EntrySealed, EntryCommitted>>,
|
||||
pre_candidates: Vec<Arc<EntrySealedCommitted>>,
|
||||
candidates: Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||
) -> Result<(), OperationError> {
|
||||
lperf_segment!(audit, "server::internal_batch_modify", || {
|
||||
|
@ -1890,17 +1894,21 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
})?;
|
||||
|
||||
if !self.changed_schema.get() {
|
||||
self.changed_schema
|
||||
.set(norm_cand.iter().chain(pre_candidates.iter()).any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
}))
|
||||
self.changed_schema.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
}),
|
||||
)
|
||||
}
|
||||
if !self.changed_acp.get() {
|
||||
self.changed_acp.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter())
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP)),
|
||||
)
|
||||
}
|
||||
|
@ -1916,8 +1924,8 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
(*cu).extend(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter())
|
||||
.map(|e| e.get_uuid()),
|
||||
.map(|e| e.get_uuid())
|
||||
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
|
||||
);
|
||||
}
|
||||
ltrace!(
|
||||
|
@ -1958,7 +1966,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// Change the value type.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
.map(|er| er.clone().invalidate(self.cid.clone()))
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
candidates.iter_mut().try_for_each(|er| {
|
||||
|
@ -2772,6 +2780,7 @@ mod tests {
|
|||
use crate::modify::{Modify, ModifyList};
|
||||
use crate::prelude::*;
|
||||
use kanidm_proto::v1::SchemaError;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
|
@ -2810,7 +2819,7 @@ mod tests {
|
|||
debug!("--> {:?}", r2);
|
||||
assert!(r2.len() == 1);
|
||||
|
||||
let expected = unsafe { vec![e.into_sealed_committed()] };
|
||||
let expected = unsafe { vec![Arc::new(e.into_sealed_committed())] };
|
||||
|
||||
assert_eq!(r2, expected);
|
||||
|
||||
|
|
Loading…
Reference in a new issue