Entry Arc Tracking to reduce memory footprint (#579)

This commit is contained in:
Firstyear 2021-09-17 12:05:33 +10:00 committed by GitHub
parent d2bb9cead4
commit 2fbc92668c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 151 additions and 123 deletions

View file

@ -23,6 +23,7 @@ use std::collections::BTreeSet;
// use hashbrown::HashSet; // use hashbrown::HashSet;
use std::cell::Cell; use std::cell::Cell;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::Arc;
use uuid::Uuid; use uuid::Uuid;
use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced, EntrySealed}; use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced, EntrySealed};
@ -498,8 +499,8 @@ pub trait AccessControlsTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
se: &SearchEvent, se: &SearchEvent,
entries: Vec<Entry<EntrySealed, EntryCommitted>>, entries: Vec<Arc<EntrySealedCommitted>>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
// If this is an internal search, return our working set. // If this is an internal search, return our working set.
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.ident.origin { let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.ident.origin {
IdentType::Internal => { IdentType::Internal => {
@ -556,7 +557,7 @@ pub trait AccessControlsTransaction<'a> {
let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set(); let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set();
// For each entry // For each entry
let allowed_entries: Vec<Entry<EntrySealed, EntryCommitted>> = spanned!( let allowed_entries: Vec<Arc<EntrySealedCommitted>> = spanned!(
"access::search_filter_entries<allowed_entries>", "access::search_filter_entries<allowed_entries>",
{ {
lperf_segment!( lperf_segment!(
@ -640,7 +641,7 @@ pub trait AccessControlsTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
se: &SearchEvent, se: &SearchEvent,
entries: Vec<Entry<EntrySealed, EntryCommitted>>, entries: Vec<Arc<EntrySealedCommitted>>,
) -> Result<Vec<Entry<EntryReduced, EntryCommitted>>, OperationError> { ) -> Result<Vec<Entry<EntryReduced, EntryCommitted>>, OperationError> {
// If this is an internal search, do nothing. This can occur in some test cases ONLY // If this is an internal search, do nothing. This can occur in some test cases ONLY
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.ident.origin { let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &se.ident.origin {
@ -651,7 +652,7 @@ pub trait AccessControlsTransaction<'a> {
// In tests we just push everything back. // In tests we just push everything back.
return Ok(entries return Ok(entries
.into_iter() .into_iter()
.map(|e| unsafe { e.into_reduced() }) .map(|e| unsafe { e.as_ref().clone().into_reduced() })
.collect()); .collect());
} else { } else {
// In production we can't risk leaking data here, so we return // In production we can't risk leaking data here, so we return
@ -838,7 +839,7 @@ pub trait AccessControlsTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
me: &ModifyEvent, me: &ModifyEvent,
entries: &[Entry<EntrySealed, EntryCommitted>], entries: &[Arc<EntrySealedCommitted>],
) -> Result<bool, OperationError> { ) -> Result<bool, OperationError> {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &me.ident.origin { let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &me.ident.origin {
IdentType::Internal => { IdentType::Internal => {
@ -1164,7 +1165,7 @@ pub trait AccessControlsTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
de: &DeleteEvent, de: &DeleteEvent,
entries: &[Entry<EntrySealed, EntryCommitted>], entries: &[Arc<EntrySealedCommitted>],
) -> Result<bool, OperationError> { ) -> Result<bool, OperationError> {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &de.ident.origin { let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &de.ident.origin {
IdentType::Internal => { IdentType::Internal => {
@ -1478,6 +1479,7 @@ mod tests {
}; };
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent}; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent};
use crate::prelude::*; use crate::prelude::*;
use std::sync::Arc;
macro_rules! acp_from_entry_err { macro_rules! acp_from_entry_err {
( (
@ -1945,8 +1947,8 @@ mod tests {
); );
let ev1 = unsafe { e1.into_sealed_committed() }; let ev1 = unsafe { e1.into_sealed_committed() };
let expect = vec![ev1.clone()]; let expect = vec![Arc::new(ev1.clone())];
let entries = vec![ev1]; let entries = vec![Arc::new(ev1)];
// This acp basically is "allow access to stuff, but not this". // This acp basically is "allow access to stuff, but not this".
test_acp_search!( test_acp_search!(
@ -1974,12 +1976,12 @@ mod tests {
let e2: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON2); let e2: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON2);
let ev2 = unsafe { e2.into_sealed_committed() }; let ev2 = unsafe { e2.into_sealed_committed() };
let r_set = vec![ev1.clone(), ev2.clone()]; let r_set = vec![Arc::new(ev1.clone()), Arc::new(ev2.clone())];
let se_admin = unsafe { let se_admin = unsafe {
SearchEvent::new_impersonate_entry_ser(JSON_ADMIN_V1, filter_all!(f_pres("name"))) SearchEvent::new_impersonate_entry_ser(JSON_ADMIN_V1, filter_all!(f_pres("name")))
}; };
let ex_admin = vec![ev1.clone()]; let ex_admin = vec![Arc::new(ev1.clone())];
let se_anon = unsafe { let se_anon = unsafe {
SearchEvent::new_impersonate_entry_ser(JSON_ANONYMOUS_V1, filter_all!(f_pres("name"))) SearchEvent::new_impersonate_entry_ser(JSON_ANONYMOUS_V1, filter_all!(f_pres("name")))
@ -2057,7 +2059,7 @@ mod tests {
// class and uuid being present. // class and uuid being present.
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1); let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
let ev1 = unsafe { e1.into_sealed_committed() }; let ev1 = unsafe { e1.into_sealed_committed() };
let r_set = vec![ev1.clone()]; let r_set = vec![Arc::new(ev1.clone())];
let ex1: Entry<EntryInit, EntryNew> = let ex1: Entry<EntryInit, EntryNew> =
Entry::unsafe_from_entry_str(JSON_TESTPERSON1_REDUCED); Entry::unsafe_from_entry_str(JSON_TESTPERSON1_REDUCED);
@ -2096,7 +2098,7 @@ mod tests {
// class and uuid being present. // class and uuid being present.
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1); let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
let ev1 = unsafe { e1.into_sealed_committed() }; let ev1 = unsafe { e1.into_sealed_committed() };
let r_set = vec![ev1.clone()]; let r_set = vec![Arc::new(ev1.clone())];
let ex1: Entry<EntryInit, EntryNew> = let ex1: Entry<EntryInit, EntryNew> =
Entry::unsafe_from_entry_str(JSON_TESTPERSON1_REDUCED); Entry::unsafe_from_entry_str(JSON_TESTPERSON1_REDUCED);
@ -2159,7 +2161,7 @@ mod tests {
fn test_access_enforce_modify() { fn test_access_enforce_modify() {
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1); let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
let ev1 = unsafe { e1.into_sealed_committed() }; let ev1 = unsafe { e1.into_sealed_committed() };
let r_set = vec![ev1.clone()]; let r_set = vec![Arc::new(ev1.clone())];
// Name present // Name present
let me_pres = unsafe { let me_pres = unsafe {
@ -2443,7 +2445,7 @@ mod tests {
fn test_access_enforce_delete() { fn test_access_enforce_delete() {
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1); let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(JSON_TESTPERSON1);
let ev1 = unsafe { e1.into_sealed_committed() }; let ev1 = unsafe { e1.into_sealed_committed() };
let r_set = vec![ev1.clone()]; let r_set = vec![Arc::new(ev1.clone())];
let de_admin = unsafe { let de_admin = unsafe {
DeleteEvent::new_impersonate_entry_ser( DeleteEvent::new_impersonate_entry_ser(

View file

@ -18,6 +18,7 @@ use hashbrown::HashMap;
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::convert::TryInto; use std::convert::TryInto;
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
@ -49,7 +50,7 @@ enum NameCacheValue {
pub struct IdlArcSqlite { pub struct IdlArcSqlite {
db: IdlSqlite, db: IdlSqlite,
entry_cache: ARCache<u64, Box<Entry<EntrySealed, EntryCommitted>>>, entry_cache: ARCache<u64, Arc<EntrySealedCommitted>>,
idl_cache: ARCache<IdlCacheKey, Box<IDLBitRange>>, idl_cache: ARCache<IdlCacheKey, Box<IDLBitRange>>,
name_cache: ARCache<NameCacheKey, NameCacheValue>, name_cache: ARCache<NameCacheKey, NameCacheValue>,
op_ts_max: CowCell<Option<Duration>>, op_ts_max: CowCell<Option<Duration>>,
@ -59,7 +60,7 @@ pub struct IdlArcSqlite {
pub struct IdlArcSqliteReadTransaction<'a> { pub struct IdlArcSqliteReadTransaction<'a> {
db: IdlSqliteReadTransaction, db: IdlSqliteReadTransaction,
entry_cache: ARCacheReadTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>, entry_cache: ARCacheReadTxn<'a, u64, Arc<EntrySealedCommitted>>,
idl_cache: ARCacheReadTxn<'a, IdlCacheKey, Box<IDLBitRange>>, idl_cache: ARCacheReadTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
name_cache: ARCacheReadTxn<'a, NameCacheKey, NameCacheValue>, name_cache: ARCacheReadTxn<'a, NameCacheKey, NameCacheValue>,
allids: CowCellReadTxn<IDLBitRange>, allids: CowCellReadTxn<IDLBitRange>,
@ -67,7 +68,7 @@ pub struct IdlArcSqliteReadTransaction<'a> {
pub struct IdlArcSqliteWriteTransaction<'a> { pub struct IdlArcSqliteWriteTransaction<'a> {
db: IdlSqliteWriteTransaction, db: IdlSqliteWriteTransaction,
entry_cache: ARCacheWriteTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>, entry_cache: ARCacheWriteTxn<'a, u64, Arc<EntrySealedCommitted>>,
idl_cache: ARCacheWriteTxn<'a, IdlCacheKey, Box<IDLBitRange>>, idl_cache: ARCacheWriteTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
name_cache: ARCacheWriteTxn<'a, NameCacheKey, NameCacheValue>, name_cache: ARCacheWriteTxn<'a, NameCacheKey, NameCacheValue>,
op_ts_max: CowCellWriteTxn<'a, Option<Duration>>, op_ts_max: CowCellWriteTxn<'a, Option<Duration>>,
@ -83,7 +84,7 @@ macro_rules! get_identry {
$is_read_op:expr $is_read_op:expr
) => {{ ) => {{
spanned!("be::idl_arc_sqlite::get_identry", { spanned!("be::idl_arc_sqlite::get_identry", {
let mut result: Vec<Entry<_, _>> = Vec::new(); let mut result: Vec<Arc<EntrySealedCommitted>> = Vec::new();
match $idl { match $idl {
IdList::Partial(idli) | IdList::PartialThreshold(idli) | IdList::Indexed(idli) => { IdList::Partial(idli) | IdList::PartialThreshold(idli) | IdList::Indexed(idli) => {
let mut nidl = IDLBitRange::new(); let mut nidl = IDLBitRange::new();
@ -92,7 +93,7 @@ macro_rules! get_identry {
// For all the id's in idl. // For all the id's in idl.
// is it in the cache? // is it in the cache?
match $self.entry_cache.get(&i) { match $self.entry_cache.get(&i) {
Some(eref) => result.push(eref.as_ref().clone()), Some(eref) => result.push(eref.clone()),
None => unsafe { nidl.push_id(i) }, None => unsafe { nidl.push_id(i) },
} }
}); });
@ -103,7 +104,7 @@ macro_rules! get_identry {
// Clone everything from db_result into the cache. // Clone everything from db_result into the cache.
if $is_read_op { if $is_read_op {
db_result.iter().for_each(|e| { db_result.iter().for_each(|e| {
$self.entry_cache.insert(e.get_id(), Box::new(e.clone())); $self.entry_cache.insert(e.get_id(), e.clone());
}); });
} }
// Merge the two vecs // Merge the two vecs
@ -119,7 +120,7 @@ macro_rules! get_identry {
(&idli) (&idli)
.into_iter() .into_iter()
.for_each(|i| match $self.entry_cache.get(&i) { .for_each(|i| match $self.entry_cache.get(&i) {
Some(eref) => result.push(eref.as_ref().clone()), Some(eref) => result.push(eref.clone()),
None => unsafe { nidl.push_id(i) }, None => unsafe { nidl.push_id(i) },
}); });
@ -324,7 +325,7 @@ pub trait IdlArcSqliteTransaction {
fn get_identry( fn get_identry(
&mut self, &mut self,
idl: &IdList, idl: &IdList,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError>; ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError>;
// ! TRACING INTEGRATED // ! TRACING INTEGRATED
fn get_identry_raw(&self, idl: &IdList) -> Result<Vec<IdRawEntry>, OperationError>; fn get_identry_raw(&self, idl: &IdList) -> Result<Vec<IdRawEntry>, OperationError>;
@ -379,7 +380,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
fn get_identry( fn get_identry(
&mut self, &mut self,
idl: &IdList, idl: &IdList,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
get_identry!(self, idl, true) get_identry!(self, idl, true)
} }
@ -470,7 +471,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
fn get_identry( fn get_identry(
&mut self, &mut self,
idl: &IdList, idl: &IdList,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
get_identry!(self, idl, false) get_identry!(self, idl, false)
} }
@ -665,7 +666,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
} else { } else {
(*self.allids).insert_id(e.get_id()); (*self.allids).insert_id(e.get_id());
self.entry_cache self.entry_cache
.insert_dirty(e.get_id(), Box::new(e.clone())); .insert_dirty(e.get_id(), Arc::new(e.clone()));
Ok(()) Ok(())
} }
}) })

View file

@ -11,6 +11,7 @@ use rusqlite::Connection;
use rusqlite::OpenFlags; use rusqlite::OpenFlags;
use rusqlite::OptionalExtension; use rusqlite::OptionalExtension;
use std::convert::{TryFrom, TryInto}; use std::convert::{TryFrom, TryInto};
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tracing::trace; use tracing::trace;
use uuid::Uuid; use uuid::Uuid;
@ -113,14 +114,11 @@ pub trait IdlSqliteTransaction {
fn get_conn(&self) -> &r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>; fn get_conn(&self) -> &r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
// ! TRACING INTEGRATED // ! TRACING INTEGRATED
fn get_identry( fn get_identry(&self, idl: &IdList) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
&self,
idl: &IdList,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
spanned!("be::idl_sqlite::get_identry", { spanned!("be::idl_sqlite::get_identry", {
self.get_identry_raw(idl)? self.get_identry_raw(idl)?
.into_iter() .into_iter()
.map(|ide| ide.into_entry()) .map(|ide| ide.into_entry().map(|e| Arc::new(e)))
.collect() .collect()
}) })
} }

View file

@ -542,7 +542,7 @@ pub trait BackendTransaction {
au: &mut AuditScope, au: &mut AuditScope,
erl: &Limits, erl: &Limits,
filt: &Filter<FilterValidResolved>, filt: &Filter<FilterValidResolved>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
let _entered = trace_span!("be::search").entered(); let _entered = trace_span!("be::search").entered();
// Unlike DS, even if we don't get the index back, we can just pass // Unlike DS, even if we don't get the index back, we can just pass
// to the in-memory filter test and be done. // to the in-memory filter test and be done.
@ -1014,7 +1014,7 @@ impl<'a> BackendWriteTransaction<'a> {
pub fn modify( pub fn modify(
&self, &self,
au: &mut AuditScope, au: &mut AuditScope,
pre_entries: &[Entry<EntrySealed, EntryCommitted>], pre_entries: &[Arc<EntrySealedCommitted>],
post_entries: &[Entry<EntrySealed, EntryCommitted>], post_entries: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {
lperf_trace_segment!(au, "be::modify", || { lperf_trace_segment!(au, "be::modify", || {
@ -1067,14 +1067,14 @@ impl<'a> BackendWriteTransaction<'a> {
pre_entries pre_entries
.iter() .iter()
.zip(post_entries.iter()) .zip(post_entries.iter())
.try_for_each(|(pre, post)| self.entry_index(au, Some(pre), Some(post))) .try_for_each(|(pre, post)| self.entry_index(au, Some(pre.as_ref()), Some(post)))
}) })
} }
pub fn delete( pub fn delete(
&self, &self,
au: &mut AuditScope, au: &mut AuditScope,
entries: &[Entry<EntrySealed, EntryCommitted>], entries: &[Arc<EntrySealedCommitted>],
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {
lperf_trace_segment!(au, "be::delete", || { lperf_trace_segment!(au, "be::delete", || {
if entries.is_empty() { if entries.is_empty() {
@ -1688,6 +1688,7 @@ mod tests {
use idlset::v2::IDLBitRange; use idlset::v2::IDLBitRange;
use std::fs; use std::fs;
use std::iter::FromIterator; use std::iter::FromIterator;
use std::sync::Arc;
use uuid::Uuid; use uuid::Uuid;
use super::super::audit::AuditScope; use super::super::audit::AuditScope;
@ -1874,20 +1875,22 @@ mod tests {
let r1 = results.remove(0); let r1 = results.remove(0);
let r2 = results.remove(0); let r2 = results.remove(0);
let mut r1 = unsafe { r1.into_invalid() }; let mut r1 = unsafe { r1.as_ref().clone().into_invalid() };
let mut r2 = unsafe { r2.into_invalid() }; let mut r2 = unsafe { r2.as_ref().clone().into_invalid() };
// Modify no id (err) // Modify no id (err)
// This is now impossible due to the state machine design. // This is now impossible due to the state machine design.
// However, with some unsafe .... // However, with some unsafe ....
let ue1 = unsafe { e1.clone().into_sealed_committed() }; let ue1 = unsafe { e1.clone().into_sealed_committed() };
assert!(be.modify(audit, &vec![ue1.clone()], &vec![ue1]).is_err()); assert!(be
.modify(audit, &vec![Arc::new(ue1.clone())], &vec![ue1])
.is_err());
// Modify none // Modify none
assert!(be.modify(audit, &vec![], &vec![]).is_err()); assert!(be.modify(audit, &vec![], &vec![]).is_err());
// Make some changes to r1, r2. // Make some changes to r1, r2.
let pre1 = unsafe { r1.clone().into_sealed_committed() }; let pre1 = unsafe { Arc::new(r1.clone().into_sealed_committed()) };
let pre2 = unsafe { r2.clone().into_sealed_committed() }; let pre2 = unsafe { Arc::new(r2.clone().into_sealed_committed()) };
r1.add_ava("desc", Value::from("modified")); r1.add_ava("desc", Value::from("modified"));
r2.add_ava("desc", Value::from("modified")); r2.add_ava("desc", Value::from("modified"));
@ -1908,7 +1911,7 @@ mod tests {
assert!(be assert!(be
.modify( .modify(
audit, audit,
&vec![vr1.clone(), pre2.clone()], &vec![Arc::new(vr1.clone()), pre2.clone()],
&vec![vr1.clone(), vr2.clone()] &vec![vr1.clone(), vr2.clone()]
) )
.is_ok()); .is_ok());
@ -1958,7 +1961,7 @@ mod tests {
// Delete one // Delete one
assert!(be.delete(audit, &vec![r1.clone()]).is_ok()); assert!(be.delete(audit, &vec![r1.clone()]).is_ok());
assert!(!entry_exists!(audit, be, r1)); assert!(!entry_exists!(audit, be, r1.as_ref()));
// delete none (no match filter) // delete none (no match filter)
assert!(be.delete(audit, &vec![]).is_err()); assert!(be.delete(audit, &vec![]).is_err());
@ -1970,18 +1973,18 @@ mod tests {
e4.add_ava("userid", Value::from("amy")); e4.add_ava("userid", Value::from("amy"));
e4.add_ava("uuid", Value::from("21d816b5-1f6a-4696-b7c1-6ed06d22ed81")); e4.add_ava("uuid", Value::from("21d816b5-1f6a-4696-b7c1-6ed06d22ed81"));
let ve4 = unsafe { e4.clone().into_sealed_committed() }; let ve4 = unsafe { Arc::new(e4.clone().into_sealed_committed()) };
assert!(be.delete(audit, &vec![ve4]).is_err()); assert!(be.delete(audit, &vec![ve4]).is_err());
assert!(entry_exists!(audit, be, r2)); assert!(entry_exists!(audit, be, r2.as_ref()));
assert!(entry_exists!(audit, be, r3)); assert!(entry_exists!(audit, be, r3.as_ref()));
// delete batch // delete batch
assert!(be.delete(audit, &vec![r2.clone(), r3.clone()]).is_ok()); assert!(be.delete(audit, &vec![r2.clone(), r3.clone()]).is_ok());
assert!(!entry_exists!(audit, be, r2)); assert!(!entry_exists!(audit, be, r2.as_ref()));
assert!(!entry_exists!(audit, be, r3)); assert!(!entry_exists!(audit, be, r3.as_ref()));
// delete none (no entries left) // delete none (no entries left)
// see fn delete for why this is ok, not err // see fn delete for why this is ok, not err
@ -2233,6 +2236,7 @@ mod tests {
let e1 = unsafe { e1.into_sealed_new() }; let e1 = unsafe { e1.into_sealed_new() };
let rset = be.create(audit, vec![e1.clone()]).unwrap(); let rset = be.create(audit, vec![e1.clone()]).unwrap();
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1])); idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1]));
@ -2303,6 +2307,7 @@ mod tests {
.create(audit, vec![e1.clone(), e2.clone(), e3.clone()]) .create(audit, vec![e1.clone(), e2.clone(), e3.clone()])
.unwrap(); .unwrap();
rset.remove(1); rset.remove(1);
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
// Now remove e1, e3. // Now remove e1, e3.
be.delete(audit, &rset).unwrap(); be.delete(audit, &rset).unwrap();
@ -2353,8 +2358,9 @@ mod tests {
let e1 = unsafe { e1.into_sealed_new() }; let e1 = unsafe { e1.into_sealed_new() };
let rset = be.create(audit, vec![e1.clone()]).unwrap(); let rset = be.create(audit, vec![e1.clone()]).unwrap();
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
// Now, alter the new entry. // Now, alter the new entry.
let mut ce1 = unsafe { rset[0].clone().into_invalid() }; let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() };
// add something. // add something.
ce1.add_ava("tb", Value::from("test")); ce1.add_ava("tb", Value::from("test"));
// remove something. // remove something.
@ -2398,8 +2404,9 @@ mod tests {
let e1 = unsafe { e1.into_sealed_new() }; let e1 = unsafe { e1.into_sealed_new() };
let rset = be.create(audit, vec![e1.clone()]).unwrap(); let rset = be.create(audit, vec![e1.clone()]).unwrap();
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
// Now, alter the new entry. // Now, alter the new entry.
let mut ce1 = unsafe { rset[0].clone().into_invalid() }; let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() };
ce1.purge_ava("name"); ce1.purge_ava("name");
ce1.purge_ava("uuid"); ce1.purge_ava("uuid");
ce1.add_ava("name", Value::from("claire")); ce1.add_ava("name", Value::from("claire"));

View file

@ -49,6 +49,7 @@ use std::collections::BTreeSet;
// use hashbrown::HashMap as Map; // use hashbrown::HashMap as Map;
use hashbrown::HashMap; use hashbrown::HashMap;
use smartstring::alias::String as AttrString; use smartstring::alias::String as AttrString;
use std::sync::Arc;
use time::OffsetDateTime; use time::OffsetDateTime;
use uuid::Uuid; use uuid::Uuid;
@ -90,7 +91,7 @@ lazy_static! {
pub type EntrySealedCommitted = Entry<EntrySealed, EntryCommitted>; pub type EntrySealedCommitted = Entry<EntrySealed, EntryCommitted>;
pub type EntryInvalidCommitted = Entry<EntryInvalid, EntryCommitted>; pub type EntryInvalidCommitted = Entry<EntryInvalid, EntryCommitted>;
pub type EntryTuple = (EntrySealedCommitted, EntryInvalidCommitted); pub type EntryTuple = (Arc<EntrySealedCommitted>, EntryInvalidCommitted);
// Entry should have a lifecycle of types. This is Raw (modifiable) and Entry (verified). // Entry should have a lifecycle of types. This is Raw (modifiable) and Entry (verified).
// This way, we can move between them, but only certain actions are possible on either // This way, we can move between them, but only certain actions are possible on either
@ -1323,31 +1324,30 @@ impl Entry<EntrySealed, EntryCommitted> {
/// Given a set of attributes that are allowed to be seen on this entry, process and remove /// Given a set of attributes that are allowed to be seen on this entry, process and remove
/// all other values that are NOT allowed in this query. /// all other values that are NOT allowed in this query.
pub fn reduce_attributes( pub fn reduce_attributes(
self, &self,
allowed_attrs: &BTreeSet<&str>, allowed_attrs: &BTreeSet<&str>,
) -> Entry<EntryReduced, EntryCommitted> { ) -> Entry<EntryReduced, EntryCommitted> {
// Remove all attrs from our tree that are NOT in the allowed set. // Remove all attrs from our tree that are NOT in the allowed set.
let f_attrs: Map<_, _> = self
let Entry { .attrs
valid: s_valid, .iter()
state: s_state,
attrs: s_attrs,
} = self;
let f_attrs: Map<_, _> = s_attrs
.into_iter()
.filter_map(|(k, v)| { .filter_map(|(k, v)| {
if allowed_attrs.contains(k.as_str()) { if allowed_attrs.contains(k.as_str()) {
Some((k, v)) Some((k.clone(), v.clone()))
} else { } else {
None None
} }
}) })
.collect(); .collect();
let valid = EntryReduced {
uuid: self.valid.uuid,
};
let state = self.state.clone();
Entry { Entry {
valid: EntryReduced { uuid: s_valid.uuid }, valid,
state: s_state, state,
attrs: f_attrs, attrs: f_attrs,
} }
} }

View file

@ -36,6 +36,9 @@ use std::collections::BTreeSet;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
#[cfg(test)]
use std::sync::Arc;
#[derive(Debug)] #[derive(Debug)]
pub struct SearchResult { pub struct SearchResult {
entries: Vec<ProtoEntry>, entries: Vec<ProtoEntry>,
@ -224,7 +227,7 @@ impl SearchEvent {
#[cfg(test)] #[cfg(test)]
pub unsafe fn new_impersonate_entry( pub unsafe fn new_impersonate_entry(
e: Entry<EntrySealed, EntryCommitted>, e: Arc<Entry<EntrySealed, EntryCommitted>>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
) -> Self { ) -> Self {
SearchEvent { SearchEvent {
@ -251,7 +254,7 @@ impl SearchEvent {
#[cfg(test)] #[cfg(test)]
/* Impersonate a request for recycled objects */ /* Impersonate a request for recycled objects */
pub unsafe fn new_rec_impersonate_entry( pub unsafe fn new_rec_impersonate_entry(
e: Entry<EntrySealed, EntryCommitted>, e: Arc<Entry<EntrySealed, EntryCommitted>>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
) -> Self { ) -> Self {
let filter_orig = filter.into_valid(); let filter_orig = filter.into_valid();
@ -267,7 +270,7 @@ impl SearchEvent {
#[cfg(test)] #[cfg(test)]
/* Impersonate an external request AKA filter ts + recycle */ /* Impersonate an external request AKA filter ts + recycle */
pub unsafe fn new_ext_impersonate_entry( pub unsafe fn new_ext_impersonate_entry(
e: Entry<EntrySealed, EntryCommitted>, e: Arc<Entry<EntrySealed, EntryCommitted>>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
) -> Self { ) -> Self {
SearchEvent { SearchEvent {
@ -457,7 +460,7 @@ impl DeleteEvent {
#[cfg(test)] #[cfg(test)]
pub unsafe fn new_impersonate_entry( pub unsafe fn new_impersonate_entry(
e: Entry<EntrySealed, EntryCommitted>, e: Arc<Entry<EntrySealed, EntryCommitted>>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
) -> Self { ) -> Self {
DeleteEvent { DeleteEvent {
@ -646,7 +649,7 @@ impl ModifyEvent {
#[cfg(test)] #[cfg(test)]
pub unsafe fn new_impersonate_entry( pub unsafe fn new_impersonate_entry(
e: Entry<EntrySealed, EntryCommitted>, e: Arc<Entry<EntrySealed, EntryCommitted>>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
modlist: ModifyList<ModifyInvalid>, modlist: ModifyList<ModifyInvalid>,
) -> Self { ) -> Self {
@ -954,7 +957,7 @@ impl ReviveRecycledEvent {
#[cfg(test)] #[cfg(test)]
pub unsafe fn new_impersonate_entry( pub unsafe fn new_impersonate_entry(
e: Entry<EntrySealed, EntryCommitted>, e: Arc<Entry<EntrySealed, EntryCommitted>>,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
) -> Self { ) -> Self {
ReviveRecycledEvent { ReviveRecycledEvent {

View file

@ -6,6 +6,7 @@
use crate::prelude::*; use crate::prelude::*;
use kanidm_proto::v1::UserAuthToken; use kanidm_proto::v1::UserAuthToken;
use std::hash::Hash; use std::hash::Hash;
use std::sync::Arc;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
/// Limits on the resources a single event can consume. These are defined per-event /// Limits on the resources a single event can consume. These are defined per-event
@ -41,7 +42,7 @@ impl Limits {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
/// Metadata and the entry of the current Identity which is an external account/user. /// Metadata and the entry of the current Identity which is an external account/user.
pub struct IdentUser { pub struct IdentUser {
pub entry: Entry<EntrySealed, EntryCommitted>, pub entry: Arc<Entry<EntrySealed, EntryCommitted>>,
// IpAddr? // IpAddr?
// Other metadata? // Other metadata?
} }
@ -105,7 +106,7 @@ impl Identity {
} }
#[cfg(test)] #[cfg(test)]
pub fn from_impersonate_entry(entry: Entry<EntrySealed, EntryCommitted>) -> Self { pub fn from_impersonate_entry(entry: Arc<Entry<EntrySealed, EntryCommitted>>) -> Self {
Identity { Identity {
origin: IdentType::User(IdentUser { entry }), origin: IdentType::User(IdentUser { entry }),
limits: Limits::unlimited(), limits: Limits::unlimited(),
@ -115,7 +116,7 @@ impl Identity {
#[cfg(test)] #[cfg(test)]
pub unsafe fn from_impersonate_entry_ser(e: &str) -> Self { pub unsafe fn from_impersonate_entry_ser(e: &str) -> Self {
let ei: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(e); let ei: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(e);
Self::from_impersonate_entry(ei.into_sealed_committed()) Self::from_impersonate_entry(Arc::new(ei.into_sealed_committed()))
} }
pub fn from_impersonate(ident: &Self) -> Self { pub fn from_impersonate(ident: &Self) -> Self {

View file

@ -45,7 +45,10 @@ macro_rules! try_from_account_e {
e e
})?; })?;
// Now convert the group entries to groups. // Now convert the group entries to groups.
let groups: Result<Vec<_>, _> = ges.iter().map(Group::try_from_entry).collect(); let groups: Result<Vec<_>, _> = ges
.iter()
.map(|e| Group::try_from_entry(e.as_ref()))
.collect();
groups.map_err(|e| { groups.map_err(|e| {
admin_error!(?e, "failed to transform group entries to groups"); admin_error!(?e, "failed to transform group entries to groups");
e e

View file

@ -12,6 +12,7 @@ use fernet::Fernet;
use hashbrown::HashMap; use hashbrown::HashMap;
use kanidm_proto::v1::UserAuthToken; use kanidm_proto::v1::UserAuthToken;
use openssl::sha; use openssl::sha;
use std::sync::Arc;
use time::OffsetDateTime; use time::OffsetDateTime;
use url::{Origin, Url}; use url::{Origin, Url};
use webauthn_rs::base64_data::Base64UrlSafeData; use webauthn_rs::base64_data::Base64UrlSafeData;
@ -146,10 +147,10 @@ pub struct Oauth2ResourceServersWriteTransaction<'a> {
inner: CowCellWriteTxn<'a, Oauth2RSInner>, inner: CowCellWriteTxn<'a, Oauth2RSInner>,
} }
impl TryFrom<Vec<EntrySealedCommitted>> for Oauth2ResourceServers { impl TryFrom<Vec<Arc<EntrySealedCommitted>>> for Oauth2ResourceServers {
type Error = OperationError; type Error = OperationError;
fn try_from(value: Vec<EntrySealedCommitted>) -> Result<Self, Self::Error> { fn try_from(value: Vec<Arc<EntrySealedCommitted>>) -> Result<Self, Self::Error> {
let fernet = let fernet =
Fernet::new(&Fernet::generate_key()).ok_or(OperationError::CryptographyError)?; Fernet::new(&Fernet::generate_key()).ok_or(OperationError::CryptographyError)?;
let oauth2rs = Oauth2ResourceServers { let oauth2rs = Oauth2ResourceServers {
@ -181,7 +182,7 @@ impl Oauth2ResourceServers {
} }
impl<'a> Oauth2ResourceServersWriteTransaction<'a> { impl<'a> Oauth2ResourceServersWriteTransaction<'a> {
pub fn reload(&mut self, value: Vec<EntrySealedCommitted>) -> Result<(), OperationError> { pub fn reload(&mut self, value: Vec<Arc<EntrySealedCommitted>>) -> Result<(), OperationError> {
let rs_set: Result<HashMap<_, _>, _> = value let rs_set: Result<HashMap<_, _>, _> = value
.into_iter() .into_iter()
.map(|ent| { .map(|ent| {

View file

@ -541,7 +541,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
// typing and functionality so we can assess what auth types can // typing and functionality so we can assess what auth types can
// continue, and helps to keep non-needed entry specific data // continue, and helps to keep non-needed entry specific data
// out of the session tree. // out of the session tree.
let account = Account::try_from_entry_ro(au, &entry, &mut self.qs_read)?; let account = Account::try_from_entry_ro(au, entry.as_ref(), &mut self.qs_read)?;
// Check the credential that the auth_session will attempt to // Check the credential that the auth_session will attempt to
// use. // use.
@ -766,7 +766,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
.qs_read .qs_read
.internal_search_uuid(au, &uae.target) .internal_search_uuid(au, &uae.target)
.and_then(|account_entry| { .and_then(|account_entry| {
UnixUserAccount::try_from_entry_ro(au, &account_entry, &mut self.qs_read) UnixUserAccount::try_from_entry_ro(au, account_entry.as_ref(), &mut self.qs_read)
}) })
.map_err(|e| { .map_err(|e| {
admin_error!("Failed to start auth unix -> {:?}", e); admin_error!("Failed to start auth unix -> {:?}", e);
@ -850,7 +850,8 @@ impl<'a> IdmServerAuthTransaction<'a> {
// if anonymous // if anonymous
if lae.target == *UUID_ANONYMOUS { if lae.target == *UUID_ANONYMOUS {
let account = Account::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?; let account =
Account::try_from_entry_ro(au, account_entry.as_ref(), &mut self.qs_read)?;
// Check if the anon account has been locked. // Check if the anon account has been locked.
if !account.is_within_valid_time(ct) { if !account.is_within_valid_time(ct) {
lsecurity!(au, "Account is not within valid time period"); lsecurity!(au, "Account is not within valid time period");
@ -871,7 +872,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
})) }))
} else { } else {
let account = let account =
UnixUserAccount::try_from_entry_ro(au, &account_entry, &mut self.qs_read)?; UnixUserAccount::try_from_entry_ro(au, account_entry.as_ref(), &mut self.qs_read)?;
if !account.is_within_valid_time(ct) { if !account.is_within_valid_time(ct) {
lsecurity!(au, "Account is not within valid time period"); lsecurity!(au, "Account is not within valid time period");
@ -917,7 +918,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
e e
})?; })?;
let anon_account = let anon_account =
Account::try_from_entry_ro(au, &anon_entry, &mut self.qs_read)?; Account::try_from_entry_ro(au, anon_entry.as_ref(), &mut self.qs_read)?;
Ok(Some(LdapBoundToken { Ok(Some(LdapBoundToken {
spn: account.spn, spn: account.spn,

View file

@ -389,7 +389,7 @@ macro_rules! try_from_account_group_e {
])); ]));
let ges: Vec<_> = $qs.internal_search($au, f)?; let ges: Vec<_> = $qs.internal_search($au, f)?;
let groups: Result<Vec<_>, _> = iter::once(Ok(upg)) let groups: Result<Vec<_>, _> = iter::once(Ok(upg))
.chain(ges.iter().map(UnixGroup::try_from_entry)) .chain(ges.iter().map(|e| UnixGroup::try_from_entry(e.as_ref())))
.collect(); .collect();
groups groups
} }

View file

@ -167,6 +167,8 @@ impl Plugin for AttrUnique {
Err(e) => return vec![e], Err(e) => return vec![e],
}; };
let all_cand: Vec<_> = all_cand.into_iter().map(|e| e.as_ref().clone()).collect();
let uniqueattrs = { let uniqueattrs = {
let schema = qs.get_schema(); let schema = qs.get_schema();
schema.get_attributes_unique() schema.get_attributes_unique()

View file

@ -10,7 +10,7 @@
// As a result, we first need to run refint to clean up all dangling references, then memberof // As a result, we first need to run refint to clean up all dangling references, then memberof
// fixes the graph of memberships // fixes the graph of memberships
use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntrySealed}; use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntrySealed, EntryTuple};
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent};
use crate::plugins::Plugin; use crate::plugins::Plugin;
use crate::prelude::*; use crate::prelude::*;
@ -19,6 +19,7 @@ use crate::valueset::ValueSet;
use kanidm_proto::v1::{ConsistencyError, OperationError}; use kanidm_proto::v1::{ConsistencyError, OperationError};
use hashbrown::HashMap; use hashbrown::HashMap;
use std::sync::Arc;
use uuid::Uuid; use uuid::Uuid;
lazy_static! { lazy_static! {
@ -28,10 +29,6 @@ lazy_static! {
pub struct MemberOf; pub struct MemberOf;
type EntrySealedCommitted = Entry<EntrySealed, EntryCommitted>;
type EntryInvalidCommitted = Entry<EntryInvalid, EntryCommitted>;
type EntryTuple = (EntrySealedCommitted, EntryInvalidCommitted);
fn do_memberof( fn do_memberof(
au: &mut AuditScope, au: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
@ -239,7 +236,7 @@ impl Plugin for MemberOf {
fn post_modify( fn post_modify(
au: &mut AuditScope, au: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
pre_cand: &[Entry<EntrySealed, EntryCommitted>], pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>], cand: &[Entry<EntrySealed, EntryCommitted>],
_me: &ModifyEvent, _me: &ModifyEvent,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {

View file

@ -7,6 +7,7 @@ use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntrySealed};
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent};
use crate::prelude::*; use crate::prelude::*;
use kanidm_proto::v1::{ConsistencyError, OperationError}; use kanidm_proto::v1::{ConsistencyError, OperationError};
use std::sync::Arc;
use tracing::trace_span; use tracing::trace_span;
mod attrunique; mod attrunique;
@ -79,7 +80,7 @@ trait Plugin {
au: &mut AuditScope, au: &mut AuditScope,
_qs: &QueryServerWriteTransaction, _qs: &QueryServerWriteTransaction,
// List of what we modified that was valid? // List of what we modified that was valid?
_pre_cand: &[Entry<EntrySealed, EntryCommitted>], _pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
_cand: &[Entry<EntrySealed, EntryCommitted>], _cand: &[Entry<EntrySealed, EntryCommitted>],
_ce: &ModifyEvent, _ce: &ModifyEvent,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {
@ -350,7 +351,7 @@ impl Plugins {
pub fn run_post_modify( pub fn run_post_modify(
au: &mut AuditScope, au: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
pre_cand: &[Entry<EntrySealed, EntryCommitted>], pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>], cand: &[Entry<EntrySealed, EntryCommitted>],
me: &ModifyEvent, me: &ModifyEvent,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {

View file

@ -20,6 +20,7 @@ use crate::filter::f_eq;
use crate::modify::Modify; use crate::modify::Modify;
use crate::schema::SchemaTransaction; use crate::schema::SchemaTransaction;
use kanidm_proto::v1::{ConsistencyError, PluginError}; use kanidm_proto::v1::{ConsistencyError, PluginError};
use std::sync::Arc;
// NOTE: This *must* be after base.rs!!! // NOTE: This *must* be after base.rs!!!
@ -125,7 +126,7 @@ impl Plugin for ReferentialIntegrity {
fn post_modify( fn post_modify(
au: &mut AuditScope, au: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
_pre_cand: &[Entry<EntrySealed, EntryCommitted>], _pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
_cand: &[Entry<EntrySealed, EntryCommitted>], _cand: &[Entry<EntrySealed, EntryCommitted>],
me: &ModifyEvent, me: &ModifyEvent,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {

View file

@ -9,6 +9,7 @@ use crate::event::{CreateEvent, ModifyEvent};
use crate::value::PartialValue; use crate::value::PartialValue;
// use crate::value::{PartialValue, Value}; // use crate::value::{PartialValue, Value};
use kanidm_proto::v1::{ConsistencyError, OperationError}; use kanidm_proto::v1::{ConsistencyError, OperationError};
use std::sync::Arc;
pub struct Spn {} pub struct Spn {}
@ -131,7 +132,7 @@ impl Plugin for Spn {
au: &mut AuditScope, au: &mut AuditScope,
qs: &QueryServerWriteTransaction, qs: &QueryServerWriteTransaction,
// List of what we modified that was valid? // List of what we modified that was valid?
pre_cand: &[Entry<EntrySealed, EntryCommitted>], pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>], cand: &[Entry<EntrySealed, EntryCommitted>],
_ce: &ModifyEvent, _ce: &ModifyEvent,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {

View file

@ -98,7 +98,7 @@ pub struct QueryServerWriteTransaction<'a> {
pub(crate) struct ModifyPartial<'a> { pub(crate) struct ModifyPartial<'a> {
norm_cand: Vec<Entry<EntrySealed, EntryCommitted>>, norm_cand: Vec<Entry<EntrySealed, EntryCommitted>>,
pre_candidates: Vec<Entry<EntrySealed, EntryCommitted>>, pre_candidates: Vec<Arc<Entry<EntrySealed, EntryCommitted>>>,
me: &'a ModifyEvent, me: &'a ModifyEvent,
} }
@ -172,7 +172,7 @@ pub trait QueryServerTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
se: &SearchEvent, se: &SearchEvent,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
spanned!("server::search", { spanned!("server::search", {
lperf_segment!(audit, "server::search", || { lperf_segment!(audit, "server::search", || {
if se.ident.is_internal() { if se.ident.is_internal() {
@ -346,7 +346,7 @@ pub trait QueryServerTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
spanned!("server::internal_search", { spanned!("server::internal_search", {
lperf_segment!(audit, "server::internal_search", || { lperf_segment!(audit, "server::internal_search", || {
let f_valid = filter let f_valid = filter
@ -365,7 +365,7 @@ pub trait QueryServerTransaction<'a> {
f_valid: Filter<FilterValid>, f_valid: Filter<FilterValid>,
f_intent_valid: Filter<FilterValid>, f_intent_valid: Filter<FilterValid>,
event: &Identity, event: &Identity,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
spanned!("server::internal_search_valid", { spanned!("server::internal_search_valid", {
lperf_segment!(audit, "server::internal_search_valid", || { lperf_segment!(audit, "server::internal_search_valid", || {
let se = SearchEvent::new_impersonate(event, f_valid, f_intent_valid); let se = SearchEvent::new_impersonate(event, f_valid, f_intent_valid);
@ -395,7 +395,7 @@ pub trait QueryServerTransaction<'a> {
filter: Filter<FilterInvalid>, filter: Filter<FilterInvalid>,
filter_intent: Filter<FilterInvalid>, filter_intent: Filter<FilterInvalid>,
event: &Identity, event: &Identity,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
let f_valid = filter let f_valid = filter
.validate(self.get_schema()) .validate(self.get_schema())
.map_err(OperationError::SchemaViolation)?; .map_err(OperationError::SchemaViolation)?;
@ -433,7 +433,7 @@ pub trait QueryServerTransaction<'a> {
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
uuid: &Uuid, uuid: &Uuid,
) -> Result<Entry<EntrySealed, EntryCommitted>, OperationError> { ) -> Result<Arc<EntrySealedCommitted>, OperationError> {
spanned!("server::internal_search_uuid", { spanned!("server::internal_search_uuid", {
lperf_segment!(audit, "server::internal_search_uuid", || { lperf_segment!(audit, "server::internal_search_uuid", || {
let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid)));
@ -486,7 +486,7 @@ pub trait QueryServerTransaction<'a> {
audit: &mut AuditScope, audit: &mut AuditScope,
uuid: &Uuid, uuid: &Uuid,
event: &Identity, event: &Identity,
) -> Result<Entry<EntrySealed, EntryCommitted>, OperationError> { ) -> Result<Arc<EntrySealedCommitted>, OperationError> {
spanned!("server::internal_search_uuid", { spanned!("server::internal_search_uuid", {
lperf_segment!(audit, "server::internal_search_uuid", || { lperf_segment!(audit, "server::internal_search_uuid", || {
let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid))); let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid)));
@ -788,7 +788,7 @@ pub trait QueryServerTransaction<'a> {
fn get_oauth2rs_set( fn get_oauth2rs_set(
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
) -> Result<Vec<EntrySealedCommitted>, OperationError> { ) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
self.internal_search( self.internal_search(
audit, audit,
filter!(f_eq( filter!(f_eq(
@ -1303,7 +1303,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
.iter() .iter()
// Invalidate and assign change id's // Invalidate and assign change id's
.map(|er| er.clone().invalidate(self.cid.clone())) .map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
.collect(); .collect();
ltrace!(audit, "delete: candidates -> {:?}", candidates); ltrace!(audit, "delete: candidates -> {:?}", candidates);
@ -1655,7 +1655,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// and the new modified ents. // and the new modified ents.
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
.iter() .iter()
.map(|er| er.clone().invalidate(self.cid.clone())) .map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
.collect(); .collect();
candidates candidates
@ -1740,17 +1740,21 @@ impl<'a> QueryServerWriteTransaction<'a> {
// schema or acp requires reload. Remember, this is a modify, so we need to check // schema or acp requires reload. Remember, this is a modify, so we need to check
// pre and post cands. // pre and post cands.
if !self.changed_schema.get() { if !self.changed_schema.get() {
self.changed_schema self.changed_schema.set(
.set(norm_cand.iter().chain(pre_candidates.iter()).any(|e| { norm_cand
.iter()
.chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| {
e.attribute_equality("class", &PVCLASS_CLASSTYPE) e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
})) }),
)
} }
if !self.changed_acp.get() { if !self.changed_acp.get() {
self.changed_acp.set( self.changed_acp.set(
norm_cand norm_cand
.iter() .iter()
.chain(pre_candidates.iter()) .chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| e.attribute_equality("class", &PVCLASS_ACP)), .any(|e| e.attribute_equality("class", &PVCLASS_ACP)),
) )
} }
@ -1758,7 +1762,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.changed_oauth2.set( self.changed_oauth2.set(
norm_cand norm_cand
.iter() .iter()
.chain(pre_candidates.iter()) .chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)),
) )
} }
@ -1768,8 +1772,8 @@ impl<'a> QueryServerWriteTransaction<'a> {
(*cu).extend( (*cu).extend(
norm_cand norm_cand
.iter() .iter()
.chain(pre_candidates.iter()) .map(|e| e.get_uuid())
.map(|e| e.get_uuid()), .chain(pre_candidates.iter().map(|e| e.get_uuid())),
); );
} }
@ -1822,7 +1826,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.search(audit, &se).map(|vs| { self.search(audit, &se).map(|vs| {
vs.into_iter() vs.into_iter()
.map(|e| { .map(|e| {
let writeable = e.clone().invalidate(self.cid.clone()); let writeable = e.as_ref().clone().invalidate(self.cid.clone());
(e, writeable) (e, writeable)
}) })
.collect() .collect()
@ -1839,7 +1843,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
pub(crate) fn internal_batch_modify( pub(crate) fn internal_batch_modify(
&self, &self,
audit: &mut AuditScope, audit: &mut AuditScope,
pre_candidates: Vec<Entry<EntrySealed, EntryCommitted>>, pre_candidates: Vec<Arc<EntrySealedCommitted>>,
candidates: Vec<Entry<EntryInvalid, EntryCommitted>>, candidates: Vec<Entry<EntryInvalid, EntryCommitted>>,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {
lperf_segment!(audit, "server::internal_batch_modify", || { lperf_segment!(audit, "server::internal_batch_modify", || {
@ -1890,17 +1894,21 @@ impl<'a> QueryServerWriteTransaction<'a> {
})?; })?;
if !self.changed_schema.get() { if !self.changed_schema.get() {
self.changed_schema self.changed_schema.set(
.set(norm_cand.iter().chain(pre_candidates.iter()).any(|e| { norm_cand
.iter()
.chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| {
e.attribute_equality("class", &PVCLASS_CLASSTYPE) e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
})) }),
)
} }
if !self.changed_acp.get() { if !self.changed_acp.get() {
self.changed_acp.set( self.changed_acp.set(
norm_cand norm_cand
.iter() .iter()
.chain(pre_candidates.iter()) .chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| e.attribute_equality("class", &PVCLASS_ACP)), .any(|e| e.attribute_equality("class", &PVCLASS_ACP)),
) )
} }
@ -1916,8 +1924,8 @@ impl<'a> QueryServerWriteTransaction<'a> {
(*cu).extend( (*cu).extend(
norm_cand norm_cand
.iter() .iter()
.chain(pre_candidates.iter()) .map(|e| e.get_uuid())
.map(|e| e.get_uuid()), .chain(pre_candidates.iter().map(|e| e.get_uuid())),
); );
} }
ltrace!( ltrace!(
@ -1958,7 +1966,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Change the value type. // Change the value type.
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
.iter() .iter()
.map(|er| er.clone().invalidate(self.cid.clone())) .map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
.collect(); .collect();
candidates.iter_mut().try_for_each(|er| { candidates.iter_mut().try_for_each(|er| {
@ -2772,6 +2780,7 @@ mod tests {
use crate::modify::{Modify, ModifyList}; use crate::modify::{Modify, ModifyList};
use crate::prelude::*; use crate::prelude::*;
use kanidm_proto::v1::SchemaError; use kanidm_proto::v1::SchemaError;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
#[test] #[test]
@ -2810,7 +2819,7 @@ mod tests {
debug!("--> {:?}", r2); debug!("--> {:?}", r2);
assert!(r2.len() == 1); assert!(r2.len() == 1);
let expected = unsafe { vec![e.into_sealed_committed()] }; let expected = unsafe { vec![Arc::new(e.into_sealed_committed())] };
assert_eq!(r2, expected); assert_eq!(r2, expected);