From 1080e5d0b402d27f02643ca4a64f50dcb2d975e9 Mon Sep 17 00:00:00 2001 From: Firstyear Date: Thu, 26 Aug 2021 11:48:03 +1000 Subject: [PATCH] Start to remove audit scope :) (#574) --- kanidmd/src/lib/be/idl_arc_sqlite.rs | 779 ++++++++++----------------- kanidmd/src/lib/be/idl_sqlite.rs | 466 ++++++---------- kanidmd/src/lib/be/mod.rs | 402 +++++--------- kanidmd/src/lib/core/mod.rs | 8 +- kanidmd/src/lib/entry.rs | 14 +- kanidmd/src/lib/server.rs | 14 +- 6 files changed, 591 insertions(+), 1092 deletions(-) diff --git a/kanidmd/src/lib/be/idl_arc_sqlite.rs b/kanidmd/src/lib/be/idl_arc_sqlite.rs index bea4b3858..cd1d27129 100644 --- a/kanidmd/src/lib/be/idl_arc_sqlite.rs +++ b/kanidmd/src/lib/be/idl_arc_sqlite.rs @@ -1,4 +1,3 @@ -use crate::audit::AuditScope; use crate::be::idl_sqlite::{ IdlSqlite, IdlSqliteReadTransaction, IdlSqliteTransaction, IdlSqliteWriteTransaction, }; @@ -80,7 +79,6 @@ pub struct IdlArcSqliteWriteTransaction<'a> { macro_rules! get_identry { ( $self:expr, - $au:expr, $idl:expr, $is_read_op:expr ) => {{ @@ -101,7 +99,7 @@ macro_rules! get_identry { if !nidl.is_empty() { // Now, get anything from nidl that is needed. - let mut db_result = $self.db.get_identry($au, &IdList::Partial(nidl))?; + let mut db_result = $self.db.get_identry(&IdList::Partial(nidl))?; // Clone everything from db_result into the cache. if $is_read_op { db_result.iter().for_each(|e| { @@ -127,7 +125,7 @@ macro_rules! get_identry { if !nidl.is_empty() { // Now, get anything from nidl that is needed. - let mut db_result = $self.db.get_identry($au, &IdList::Partial(nidl))?; + let mut db_result = $self.db.get_identry(&IdList::Partial(nidl))?; // Merge the two vecs result.append(&mut db_result); } @@ -143,11 +141,10 @@ macro_rules! get_identry { macro_rules! get_identry_raw { ( $self:expr, - $au:expr, $idl:expr ) => {{ // As a cache we have no concept of this, so we just bypass to the db. - $self.db.get_identry_raw($au, $idl) + $self.db.get_identry_raw($idl) }}; } @@ -155,12 +152,11 @@ macro_rules! get_identry_raw { macro_rules! exists_idx { ( $self:expr, - $audit:expr, $attr:expr, $itype:expr ) => {{ // As a cache we have no concept of this, so we just bypass to the db. - $self.db.exists_idx($audit, $attr, $itype) + $self.db.exists_idx($attr, $itype) }}; } @@ -168,13 +164,11 @@ macro_rules! exists_idx { macro_rules! get_idl { ( $self:expr, - $audit:expr, $attr:expr, $itype:expr, $idx_key:expr ) => {{ spanned!("be::idl_arc_sqlite::get_idl", { - lperf_trace_segment!($audit, "be::idl_arc_sqlite::get_idl", || { // SEE ALSO #259: Find a way to implement borrow for this properly. // I don't think this is possible. When we make this dyn, the arc // needs the dyn trait to be sized so that it *could* claim a clone @@ -202,17 +196,10 @@ macro_rules! get_idl { $itype, $attr, ); - ltrace!( - $audit, - "Got cached idl for index {:?} {:?} -> {}", - $itype, - $attr, - data - ); return Ok(Some(data.as_ref().clone())); } // If miss, get from db *and* insert to the cache. - let db_r = $self.db.get_idl($audit, $attr, $itype, $idx_key)?; + let db_r = $self.db.get_idl($attr, $itype, $idx_key)?; if let Some(ref idl) = db_r { let ncache_key = IdlCacheKey { a: $attr.into(), @@ -222,7 +209,6 @@ macro_rules! get_idl { $self.idl_cache.insert(ncache_key, Box::new(idl.clone())) } Ok(db_r) - }) }) }}; } @@ -231,27 +217,23 @@ macro_rules! get_idl { macro_rules! name2uuid { ( $self:expr, - $audit:expr, $name:expr ) => {{ spanned!("be::idl_arc_sqlite::name2uuid", { - lperf_trace_segment!($audit, "be::idl_arc_sqlite::name2uuid", || { - let cache_key = NameCacheKey::Name2Uuid($name.to_string()); - let cache_r = $self.name_cache.get(&cache_key); - if let Some(NameCacheValue::U(uuid)) = cache_r { - trace!("Got cached uuid for name2uuid"); - ltrace!($audit, "Got cached uuid for name2uuid"); - return Ok(Some(uuid.clone())); - } + let cache_key = NameCacheKey::Name2Uuid($name.to_string()); + let cache_r = $self.name_cache.get(&cache_key); + if let Some(NameCacheValue::U(uuid)) = cache_r { + trace!("Got cached uuid for name2uuid"); + return Ok(Some(uuid.clone())); + } - let db_r = $self.db.name2uuid($audit, $name)?; - if let Some(uuid) = db_r { - $self - .name_cache - .insert(cache_key, NameCacheValue::U(uuid.clone())) - } - Ok(db_r) - }) + let db_r = $self.db.name2uuid($name)?; + if let Some(uuid) = db_r { + $self + .name_cache + .insert(cache_key, NameCacheValue::U(uuid.clone())) + } + Ok(db_r) }) }}; } @@ -260,27 +242,23 @@ macro_rules! name2uuid { macro_rules! uuid2spn { ( $self:expr, - $audit:expr, $uuid:expr ) => {{ spanned!("be::idl_arc_sqlite::uuid2spn", { - lperf_trace_segment!($audit, "be::idl_arc_sqlite::name2uuid", || { - let cache_key = NameCacheKey::Uuid2Spn(*$uuid); - let cache_r = $self.name_cache.get(&cache_key); - if let Some(NameCacheValue::S(ref spn)) = cache_r { - trace!("Got cached spn for uuid2spn"); - ltrace!($audit, "Got cached spn for uuid2spn"); - return Ok(Some(spn.as_ref().clone())); - } + let cache_key = NameCacheKey::Uuid2Spn(*$uuid); + let cache_r = $self.name_cache.get(&cache_key); + if let Some(NameCacheValue::S(ref spn)) = cache_r { + trace!("Got cached spn for uuid2spn"); + return Ok(Some(spn.as_ref().clone())); + } - let db_r = $self.db.uuid2spn($audit, $uuid)?; - if let Some(ref data) = db_r { - $self - .name_cache - .insert(cache_key, NameCacheValue::S(Box::new(data.clone()))) - } - Ok(db_r) - }) + let db_r = $self.db.uuid2spn($uuid)?; + if let Some(ref data) = db_r { + $self + .name_cache + .insert(cache_key, NameCacheValue::S(Box::new(data.clone()))) + } + Ok(db_r) }) }}; } @@ -289,27 +267,23 @@ macro_rules! uuid2spn { macro_rules! uuid2rdn { ( $self:expr, - $audit:expr, $uuid:expr ) => {{ spanned!("be::idl_arc_sqlite::uuid2rdn", { - lperf_trace_segment!($audit, "be::idl_arc_sqlite::name2uuid", || { - let cache_key = NameCacheKey::Uuid2Rdn(*$uuid); - let cache_r = $self.name_cache.get(&cache_key); - if let Some(NameCacheValue::R(ref rdn)) = cache_r { - trace!("Got cached rdn for uuid2rdn"); - ltrace!($audit, "Got cached rdn for uuid2rdn"); - return Ok(Some(rdn.clone())); - } + let cache_key = NameCacheKey::Uuid2Rdn(*$uuid); + let cache_r = $self.name_cache.get(&cache_key); + if let Some(NameCacheValue::R(ref rdn)) = cache_r { + trace!("Got cached rdn for uuid2rdn"); + return Ok(Some(rdn.clone())); + } - let db_r = $self.db.uuid2rdn($audit, $uuid)?; - if let Some(ref data) = db_r { - $self - .name_cache - .insert(cache_key, NameCacheValue::R(data.clone())) - } - Ok(db_r) - }) + let db_r = $self.db.uuid2rdn($uuid)?; + if let Some(ref data) = db_r { + $self + .name_cache + .insert(cache_key, NameCacheValue::R(data.clone())) + } + Ok(db_r) }) }}; } @@ -317,17 +291,15 @@ macro_rules! uuid2rdn { // ! TRACING INTEGRATED macro_rules! verify { ( - $self:expr, - $audit:expr + $self:expr ) => {{ let mut r = $self.db.verify(); if r.is_empty() && !$self.is_dirty() { // Check allids. - match $self.db.get_allids($audit) { + match $self.db.get_allids() { Ok(db_allids) => { if !db_allids.is_compressed() || !(*($self).allids).is_compressed() { admin_warn!("Inconsistent ALLIDS compression state"); - ladmin_warning!($audit, "Inconsistent ALLIDS compression state"); r.push(Err(ConsistencyError::BackendAllIdsSync)) } if db_allids != (*($self).allids) { @@ -337,18 +309,6 @@ macro_rules! verify { arc_allids = ?(&(*($self).allids)).andnot(&db_allids), "Inconsistent ALLIDS set" ); - - ladmin_warning!($audit, "Inconsistent ALLIDS set"); - ladmin_warning!( - $audit, - "db_allids: {:?}", - (&db_allids).andnot(&($self).allids) - ); - ladmin_warning!( - $audit, - "arc_allids: {:?}", - (&(*($self).allids)).andnot(&db_allids) - ); r.push(Err(ConsistencyError::BackendAllIdsSync)) } } @@ -363,29 +323,18 @@ pub trait IdlArcSqliteTransaction { // ! TRACING INTEGRATED fn get_identry( &mut self, - au: &mut AuditScope, idl: &IdList, ) -> Result>, OperationError>; // ! TRACING INTEGRATED - fn get_identry_raw( - &self, - au: &mut AuditScope, - idl: &IdList, - ) -> Result, OperationError>; + fn get_identry_raw(&self, idl: &IdList) -> Result, OperationError>; // ! TRACING INTEGRATED - fn exists_idx( - &mut self, - audit: &mut AuditScope, - attr: &str, - itype: &IndexType, - ) -> Result; + fn exists_idx(&mut self, attr: &str, itype: &IndexType) -> Result; // ! TRACING INTEGRATED fn get_idl( &mut self, - audit: &mut AuditScope, attr: &str, itype: &IndexType, idx_key: &str, @@ -396,90 +345,62 @@ pub trait IdlArcSqliteTransaction { fn get_db_d_uuid(&self) -> Result, OperationError>; // ! TRACING INTEGRATED - fn verify(&self, audit: &mut AuditScope) -> Vec>; + fn verify(&self) -> Vec>; fn is_dirty(&self) -> bool; // ! TRACING INTEGRATED - fn name2uuid( - &mut self, - audit: &mut AuditScope, - name: &str, - ) -> Result, OperationError>; + fn name2uuid(&mut self, name: &str) -> Result, OperationError>; // ! TRACING INTEGRATED - fn uuid2spn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError>; + fn uuid2spn(&mut self, uuid: &Uuid) -> Result, OperationError>; // ! TRACING INTEGRATED - fn uuid2rdn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError>; + fn uuid2rdn(&mut self, uuid: &Uuid) -> Result, OperationError>; // ! TRACING INTEGRATED - fn list_idxs(&self, audit: &mut AuditScope) -> Result, OperationError>; + fn list_idxs(&self) -> Result, OperationError>; // ! TRACING INTEGRATED - fn list_id2entry(&self, audit: &mut AuditScope) -> Result, OperationError>; + fn list_id2entry(&self) -> Result, OperationError>; // ! TRACING INTEGRATED fn list_index_content( &self, - audit: &mut AuditScope, index_name: &str, ) -> Result, OperationError>; // ! TRACING INTEGRATED - fn get_id2entry( - &self, - audit: &mut AuditScope, - id: u64, - ) -> Result<(u64, String), OperationError>; + fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError>; } impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> { // ! TRACING INTEGRATED fn get_identry( &mut self, - au: &mut AuditScope, idl: &IdList, ) -> Result>, OperationError> { - get_identry!(self, au, idl, true) + get_identry!(self, idl, true) } // ! TRACING INTEGRATED - fn get_identry_raw( - &self, - au: &mut AuditScope, - idl: &IdList, - ) -> Result, OperationError> { - get_identry_raw!(self, au, idl) + fn get_identry_raw(&self, idl: &IdList) -> Result, OperationError> { + get_identry_raw!(self, idl) } // ! TRACING INTEGRATED - fn exists_idx( - &mut self, - audit: &mut AuditScope, - attr: &str, - itype: &IndexType, - ) -> Result { - exists_idx!(self, audit, attr, itype) + fn exists_idx(&mut self, attr: &str, itype: &IndexType) -> Result { + exists_idx!(self, attr, itype) } // ! TRACING INTEGRATED fn get_idl( &mut self, - audit: &mut AuditScope, attr: &str, itype: &IndexType, idx_key: &str, ) -> Result, OperationError> { - get_idl!(self, audit, attr, itype, idx_key) + get_idl!(self, attr, itype, idx_key) } // ! TRACING INTEGRATED @@ -493,8 +414,8 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> { } // ! TRACING INTEGRATED - fn verify(&self, audit: &mut AuditScope) -> Vec> { - verify!(self, audit) + fn verify(&self) -> Vec> { + verify!(self) } fn is_dirty(&self) -> bool { @@ -502,62 +423,45 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> { } // ! TRACING INTEGRATED - fn name2uuid( - &mut self, - audit: &mut AuditScope, - name: &str, - ) -> Result, OperationError> { - name2uuid!(self, audit, name) + fn name2uuid(&mut self, name: &str) -> Result, OperationError> { + name2uuid!(self, name) } // ! TRACING INTEGRATED - fn uuid2spn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { - uuid2spn!(self, audit, uuid) + fn uuid2spn(&mut self, uuid: &Uuid) -> Result, OperationError> { + uuid2spn!(self, uuid) } // ! TRACING INTEGRATED - fn uuid2rdn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { - uuid2rdn!(self, audit, uuid) + fn uuid2rdn(&mut self, uuid: &Uuid) -> Result, OperationError> { + uuid2rdn!(self, uuid) } // ! TRACING INTEGRATED - fn list_idxs(&self, audit: &mut AuditScope) -> Result, OperationError> { + fn list_idxs(&self) -> Result, OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.list_idxs(audit) + self.db.list_idxs() } // ! TRACING INTEGRATED - fn list_id2entry(&self, audit: &mut AuditScope) -> Result, OperationError> { + fn list_id2entry(&self) -> Result, OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.list_id2entry(audit) + self.db.list_id2entry() } // ! TRACING INTEGRATED fn list_index_content( &self, - audit: &mut AuditScope, index_name: &str, ) -> Result, OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.list_index_content(audit, index_name) + self.db.list_index_content(index_name) } // ! TRACING INTEGRATED - fn get_id2entry( - &self, - audit: &mut AuditScope, - id: u64, - ) -> Result<(u64, String), OperationError> { + fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.get_id2entry(audit, id) + self.db.get_id2entry(id) } } @@ -565,40 +469,29 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> { // ! TRACING INTEGRATED fn get_identry( &mut self, - au: &mut AuditScope, idl: &IdList, ) -> Result>, OperationError> { - get_identry!(self, au, idl, false) + get_identry!(self, idl, false) } // ! TRACING INTEGRATED - fn get_identry_raw( - &self, - au: &mut AuditScope, - idl: &IdList, - ) -> Result, OperationError> { - get_identry_raw!(self, au, idl) + fn get_identry_raw(&self, idl: &IdList) -> Result, OperationError> { + get_identry_raw!(self, idl) } // ! TRACING INTEGRATED - fn exists_idx( - &mut self, - audit: &mut AuditScope, - attr: &str, - itype: &IndexType, - ) -> Result { - exists_idx!(self, audit, attr, itype) + fn exists_idx(&mut self, attr: &str, itype: &IndexType) -> Result { + exists_idx!(self, attr, itype) } // ! TRACING INTEGRATED fn get_idl( &mut self, - audit: &mut AuditScope, attr: &str, itype: &IndexType, idx_key: &str, ) -> Result, OperationError> { - get_idl!(self, audit, attr, itype, idx_key) + get_idl!(self, attr, itype, idx_key) } // ! TRACING INTEGRATED @@ -612,8 +505,8 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> { } // ! TRACING INTEGRATED - fn verify(&self, audit: &mut AuditScope) -> Vec> { - verify!(self, audit) + fn verify(&self) -> Vec> { + verify!(self) } fn is_dirty(&self) -> bool { @@ -621,167 +514,131 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> { } // ! TRACING INTEGRATED - fn name2uuid( - &mut self, - audit: &mut AuditScope, - name: &str, - ) -> Result, OperationError> { - name2uuid!(self, audit, name) + fn name2uuid(&mut self, name: &str) -> Result, OperationError> { + name2uuid!(self, name) } // ! TRACING INTEGRATED - fn uuid2spn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { - uuid2spn!(self, audit, uuid) + fn uuid2spn(&mut self, uuid: &Uuid) -> Result, OperationError> { + uuid2spn!(self, uuid) } // ! TRACING INTEGRATED - fn uuid2rdn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { - uuid2rdn!(self, audit, uuid) + fn uuid2rdn(&mut self, uuid: &Uuid) -> Result, OperationError> { + uuid2rdn!(self, uuid) } // ! TRACING INTEGRATED - fn list_idxs(&self, audit: &mut AuditScope) -> Result, OperationError> { + fn list_idxs(&self) -> Result, OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.list_idxs(audit) + self.db.list_idxs() } // ! TRACING INTEGRATED - fn list_id2entry(&self, audit: &mut AuditScope) -> Result, OperationError> { + fn list_id2entry(&self) -> Result, OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.list_id2entry(audit) + self.db.list_id2entry() } // ! TRACING INTEGRATED fn list_index_content( &self, - audit: &mut AuditScope, index_name: &str, ) -> Result, OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.list_index_content(audit, index_name) + self.db.list_index_content(index_name) } // ! TRACING INTEGRATED - fn get_id2entry( - &self, - audit: &mut AuditScope, - id: u64, - ) -> Result<(u64, String), OperationError> { + fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError> { // This is only used in tests or debug tools, so bypass the cache. - self.db.get_id2entry(audit, id) + self.db.get_id2entry(id) } } impl<'a> IdlArcSqliteWriteTransaction<'a> { // ! TRACING INTEGRATED - pub fn commit(self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn commit(self) -> Result<(), OperationError> { spanned!("be::idl_arc_sqlite::commit", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit", || { - let IdlArcSqliteWriteTransaction { - db, - mut entry_cache, - mut idl_cache, - mut name_cache, - op_ts_max, - allids, - maxid, - } = self; + let IdlArcSqliteWriteTransaction { + db, + mut entry_cache, + mut idl_cache, + mut name_cache, + op_ts_max, + allids, + maxid, + } = self; - // Write any dirty items to the disk. - spanned!("be::idl_arc_sqlite::commit", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit", || { - entry_cache - .iter_mut_mark_clean() - .try_for_each(|(k, v)| match v { - Some(e) => db.write_identry(audit, e), - None => db.delete_identry(audit, *k), - }) + // Write any dirty items to the disk. + spanned!("be::idl_arc_sqlite::commit", { + entry_cache + .iter_mut_mark_clean() + .try_for_each(|(k, v)| match v { + Some(e) => db.write_identry(e), + None => db.delete_identry(*k), }) - }) - .map_err(|e| { - admin_error!(?e, "Failed to sync entry cache to sqlite"); - ladmin_error!(audit, "Failed to sync entry cache to sqlite {:?}", e); - e - })?; + }) + .map_err(|e| { + admin_error!(?e, "Failed to sync entry cache to sqlite"); + e + })?; - spanned!("be::idl_arc_sqlite::commit", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit", || { - idl_cache.iter_mut_mark_clean().try_for_each(|(k, v)| { - match v { - Some(idl) => { - db.write_idl(audit, k.a.as_str(), &k.i, k.k.as_str(), idl) - } - #[allow(clippy::unreachable)] - None => { - // Due to how we remove items, we always write an empty idl - // to the cache, so this should never be none. - // - // If it is none, this means we have memory corruption so we MUST - // panic. - // Why is `v` the `Option` type then? - unreachable!(); - } - } - }) + spanned!("be::idl_arc_sqlite::commit", { + idl_cache.iter_mut_mark_clean().try_for_each(|(k, v)| { + match v { + Some(idl) => db.write_idl(k.a.as_str(), &k.i, k.k.as_str(), idl), + #[allow(clippy::unreachable)] + None => { + // Due to how we remove items, we always write an empty idl + // to the cache, so this should never be none. + // + // If it is none, this means we have memory corruption so we MUST + // panic. + // Why is `v` the `Option` type then? + unreachable!(); + } + } + }) + }) + .map_err(|e| { + admin_error!(?e, "Failed to sync idl cache to sqlite"); + e + })?; + + spanned!("be::idl_arc_sqlite::commit", { + name_cache + .iter_mut_mark_clean() + .try_for_each(|(k, v)| match (k, v) { + (NameCacheKey::Name2Uuid(k), Some(NameCacheValue::U(v))) => { + db.write_name2uuid_add(k, v) + } + (NameCacheKey::Name2Uuid(k), None) => db.write_name2uuid_rem(k), + (NameCacheKey::Uuid2Spn(uuid), Some(NameCacheValue::S(v))) => { + db.write_uuid2spn(uuid, Some(v)) + } + (NameCacheKey::Uuid2Spn(uuid), None) => db.write_uuid2spn(uuid, None), + (NameCacheKey::Uuid2Rdn(uuid), Some(NameCacheValue::R(v))) => { + db.write_uuid2rdn(uuid, Some(v)) + } + (NameCacheKey::Uuid2Rdn(uuid), None) => db.write_uuid2rdn(uuid, None), + + _ => Err(OperationError::InvalidCacheState), }) - }) - .map_err(|e| { - admin_error!(?e, "Failed to sync idl cache to sqlite"); - ladmin_error!(audit, "Failed to sync idl cache to sqlite {:?}", e); - e - })?; + }) + .map_err(|e| { + admin_error!(?e, "Failed to sync name cache to sqlite"); + e + })?; - spanned!("be::idl_arc_sqlite::commit", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit", || { - name_cache - .iter_mut_mark_clean() - .try_for_each(|(k, v)| match (k, v) { - (NameCacheKey::Name2Uuid(k), Some(NameCacheValue::U(v))) => { - db.write_name2uuid_add(audit, k, v) - } - (NameCacheKey::Name2Uuid(k), None) => { - db.write_name2uuid_rem(audit, k) - } - (NameCacheKey::Uuid2Spn(uuid), Some(NameCacheValue::S(v))) => { - db.write_uuid2spn(audit, uuid, Some(v)) - } - (NameCacheKey::Uuid2Spn(uuid), None) => { - db.write_uuid2spn(audit, uuid, None) - } - (NameCacheKey::Uuid2Rdn(uuid), Some(NameCacheValue::R(v))) => { - db.write_uuid2rdn(audit, uuid, Some(v)) - } - (NameCacheKey::Uuid2Rdn(uuid), None) => { - db.write_uuid2rdn(audit, uuid, None) - } - - _ => Err(OperationError::InvalidCacheState), - }) - }) - }) - .map_err(|e| { - admin_error!(?e, "Failed to sync name cache to sqlite"); - ladmin_error!(audit, "Failed to sync name cache to sqlite {:?}", e); - e - })?; - - // Undo the caches in the reverse order. - db.commit(audit).map(|()| { - op_ts_max.commit(); - name_cache.commit(); - idl_cache.commit(); - entry_cache.commit(); - allids.commit(); - maxid.commit(); - }) + // Undo the caches in the reverse order. + db.commit().map(|()| { + op_ts_max.commit(); + name_cache.commit(); + idl_cache.commit(); + entry_cache.commit(); + allids.commit(); + maxid.commit(); }) }) } @@ -796,38 +653,27 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } // ! TRACING INTEGRATED - pub fn write_identries<'b, I>( - &'b mut self, - au: &mut AuditScope, - mut entries: I, - ) -> Result<(), OperationError> + pub fn write_identries<'b, I>(&'b mut self, mut entries: I) -> Result<(), OperationError> where I: Iterator>, { spanned!("be::idl_arc_sqlite::write_identries", { - lperf_trace_segment!(au, "be::idl_arc_sqlite::write_identries", || { - entries.try_for_each(|e| { - trace!("Inserting {:?} to cache", e.get_id()); - ltrace!(au, "Inserting {:?} to cache", e.get_id()); - if e.get_id() == 0 { - Err(OperationError::InvalidEntryId) - } else { - (*self.allids).insert_id(e.get_id()); - self.entry_cache - .insert_dirty(e.get_id(), Box::new(e.clone())); - Ok(()) - } - }) + entries.try_for_each(|e| { + trace!("Inserting {:?} to cache", e.get_id()); + if e.get_id() == 0 { + Err(OperationError::InvalidEntryId) + } else { + (*self.allids).insert_id(e.get_id()); + self.entry_cache + .insert_dirty(e.get_id(), Box::new(e.clone())); + Ok(()) + } }) }) } // ! TRACING INTEGRATED - pub fn write_identries_raw( - &mut self, - audit: &mut AuditScope, - entries: I, - ) -> Result<(), OperationError> + pub fn write_identries_raw(&mut self, entries: I) -> Result<(), OperationError> where I: Iterator, { @@ -835,8 +681,8 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { self.entry_cache.clear(); // Write the raw ents self.db - .write_identries_raw(audit, entries) - .and_then(|()| self.db.get_allids(audit)) + .write_identries_raw(entries) + .and_then(|()| self.db.get_allids()) .map(|mut ids| { // Update allids since we cleared them and need to reset it in the cache. std::mem::swap(self.allids.deref_mut(), &mut ids); @@ -844,27 +690,20 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } // ! TRACING INTEGRATED - pub fn delete_identry( - &mut self, - au: &mut AuditScope, - mut idl: I, - ) -> Result<(), OperationError> + pub fn delete_identry(&mut self, mut idl: I) -> Result<(), OperationError> where I: Iterator, { spanned!("be::idl_arc_sqlite::delete_identry", { - lperf_trace_segment!(au, "be::idl_arc_sqlite::delete_identry", || { - idl.try_for_each(|i| { - trace!("Removing {:?} from cache", i); - ltrace!(au, "Removing {:?} from cache", i); - if i == 0 { - Err(OperationError::InvalidEntryId) - } else { - (*self.allids).remove_id(i); - self.entry_cache.remove_dirty(i); - Ok(()) - } - }) + idl.try_for_each(|i| { + trace!("Removing {:?} from cache", i); + if i == 0 { + Err(OperationError::InvalidEntryId) + } else { + (*self.allids).remove_id(i); + self.entry_cache.remove_dirty(i); + Ok(()) + } }) }) } @@ -872,62 +711,51 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { // ! TRACING INTEGRATED pub fn write_idl( &mut self, - audit: &mut AuditScope, attr: &str, itype: &IndexType, idx_key: &str, idl: &IDLBitRange, ) -> Result<(), OperationError> { spanned!("be::idl_arc_sqlite::write_idl", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_idl", || { - let cache_key = IdlCacheKey { - a: attr.into(), - i: itype.clone(), - k: idx_key.into(), - }; - // On idl == 0 the db will remove this, and synthesise an empty IdList on a miss - // but we can cache this as a new empty IdList instead, so that we can avoid the - // db lookup on this idl. - if idl.is_empty() { - self.idl_cache - .insert_dirty(cache_key, Box::new(IDLBitRange::new())); - } else { - self.idl_cache - .insert_dirty(cache_key, Box::new(idl.clone())); - } - // self.db.write_idl(audit, attr, itype, idx_key, idl) - Ok(()) - }) + let cache_key = IdlCacheKey { + a: attr.into(), + i: itype.clone(), + k: idx_key.into(), + }; + // On idl == 0 the db will remove this, and synthesise an empty IdList on a miss + // but we can cache this as a new empty IdList instead, so that we can avoid the + // db lookup on this idl. + if idl.is_empty() { + self.idl_cache + .insert_dirty(cache_key, Box::new(IDLBitRange::new())); + } else { + self.idl_cache + .insert_dirty(cache_key, Box::new(idl.clone())); + } + // self.db.write_idl(audit, attr, itype, idx_key, idl) + Ok(()) }) } // ! TRACING INTEGRATED - pub fn optimise_dirty_idls(&mut self, audit: &mut AuditScope) { + pub fn optimise_dirty_idls(&mut self) { self.idl_cache.iter_mut_dirty().for_each(|(k, maybe_idl)| { if let Some(idl) = maybe_idl { if idl.maybe_compress() { filter_info!(?k, "Compressed idl"); - lfilter_info!(audit, "Compressed idl -> {:?} ", k); } } }) } // ! TRACING INTEGRATED - pub fn is_idx_slopeyness_generated( - &self, - audit: &mut AuditScope, - ) -> Result { - self.db.is_idx_slopeyness_generated(audit) + pub fn is_idx_slopeyness_generated(&self) -> Result { + self.db.is_idx_slopeyness_generated() } // ! TRACING INTEGRATED - pub fn get_idx_slope( - &self, - audit: &mut AuditScope, - ikey: &IdxKey, - ) -> Result, OperationError> { - self.db.get_idx_slope(audit, ikey) + pub fn get_idx_slope(&self, ikey: &IdxKey) -> Result, OperationError> { + self.db.get_idx_slope(ikey) } /// Index Slope Analysis. For the purpose of external modules you can consider this as a @@ -938,7 +766,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { /// be better than class=*, but comparing name=foo to spn=foo is "much over muchness" since /// both are really fast. // ! TRACING INTEGRATED - pub fn analyse_idx_slopes(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn analyse_idx_slopes(&mut self) -> Result<(), OperationError> { /* * Inside of this analysis there are two major factors we need to understand * @@ -1120,9 +948,8 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { }) .collect(); trace!(?slopes, "Generated slopes"); - ltrace!(audit, "Generated slopes -> {:?}", slopes); // Write the data down - self.db.store_idx_slope_analysis(audit, &slopes) + self.db.store_idx_slope_analysis(&slopes) } fn calculate_sd_slope(data: Vec) -> IdxSlope { @@ -1174,148 +1001,94 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } // ! TRACING INTEGRATED - pub fn create_name2uuid(&self, audit: &mut AuditScope) -> Result<(), OperationError> { - self.db.create_name2uuid(audit) + pub fn create_name2uuid(&self) -> Result<(), OperationError> { + self.db.create_name2uuid() } // ! TRACING INTEGRATED pub fn write_name2uuid_add( &mut self, - audit: &mut AuditScope, uuid: &Uuid, add: BTreeSet, ) -> Result<(), OperationError> { spanned!("be::idl_arc_sqlite::write_name2uuid_add", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || { - /* - self.db - .write_name2uuid_add(audit, uuid, &add) - .and_then(|_| { - */ - - // why not just a for loop here... - add.into_iter().for_each(|k| { - let cache_key = NameCacheKey::Name2Uuid(k); - let cache_value = NameCacheValue::U(*uuid); - self.name_cache.insert_dirty(cache_key, cache_value) - }); - Ok(()) - /* - }) - */ - }) + add.into_iter().for_each(|k| { + let cache_key = NameCacheKey::Name2Uuid(k); + let cache_value = NameCacheValue::U(*uuid); + self.name_cache.insert_dirty(cache_key, cache_value) + }); + Ok(()) }) } // ! TRACING INTEGRATED - pub fn write_name2uuid_rem( - &mut self, - audit: &mut AuditScope, - rem: BTreeSet, - ) -> Result<(), OperationError> { + pub fn write_name2uuid_rem(&mut self, rem: BTreeSet) -> Result<(), OperationError> { spanned!("be::idl_arc_sqlite::write_name2uuid_rem", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || { - // self.db.write_name2uuid_rem(audit, &rem).and_then(|_| { - rem.into_iter().for_each(|k| { - // why not just a for loop here... - let cache_key = NameCacheKey::Name2Uuid(k); - self.name_cache.remove_dirty(cache_key) - }); - Ok(()) - // }) - }) + // self.db.write_name2uuid_rem(audit, &rem).and_then(|_| { + rem.into_iter().for_each(|k| { + // why not just a for loop here... + let cache_key = NameCacheKey::Name2Uuid(k); + self.name_cache.remove_dirty(cache_key) + }); + Ok(()) + // }) }) } // ! TRACING INTEGRATED - pub fn create_uuid2spn(&self, audit: &mut AuditScope) -> Result<(), OperationError> { - self.db.create_uuid2spn(audit) + pub fn create_uuid2spn(&self) -> Result<(), OperationError> { + self.db.create_uuid2spn() } // ! TRACING INTEGRATED - pub fn write_uuid2spn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - k: Option, - ) -> Result<(), OperationError> { + pub fn write_uuid2spn(&mut self, uuid: &Uuid, k: Option) -> Result<(), OperationError> { spanned!("be::idl_arc_sqlite::write_uuid2spn", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_uuid2spn", || { - /* - self.db - .write_uuid2spn(audit, uuid, k.as_ref()) - .and_then(|_| { - */ - let cache_key = NameCacheKey::Uuid2Spn(*uuid); - match k { - Some(v) => self - .name_cache - .insert_dirty(cache_key, NameCacheValue::S(Box::new(v))), - None => self.name_cache.remove_dirty(cache_key), - } - Ok(()) - /* - }) - */ - }) + let cache_key = NameCacheKey::Uuid2Spn(*uuid); + match k { + Some(v) => self + .name_cache + .insert_dirty(cache_key, NameCacheValue::S(Box::new(v))), + None => self.name_cache.remove_dirty(cache_key), + } + Ok(()) }) } // ! TRACING INTEGRATED - pub fn create_uuid2rdn(&self, audit: &mut AuditScope) -> Result<(), OperationError> { - self.db.create_uuid2rdn(audit) + pub fn create_uuid2rdn(&self) -> Result<(), OperationError> { + self.db.create_uuid2rdn() } // ! TRACING INTEGRATED - pub fn write_uuid2rdn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - k: Option, - ) -> Result<(), OperationError> { + pub fn write_uuid2rdn(&mut self, uuid: &Uuid, k: Option) -> Result<(), OperationError> { spanned!("be::idl_arc_sqlite::write_uuid2rdn", { - lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_uuid2rdn", || { - /* - self.db - .write_uuid2rdn(audit, uuid, k.as_ref()) - .and_then(|_| { - */ - let cache_key = NameCacheKey::Uuid2Rdn(*uuid); - match k { - Some(s) => self - .name_cache - .insert_dirty(cache_key, NameCacheValue::R(s)), - None => self.name_cache.remove_dirty(cache_key), - } - Ok(()) - /* - }) - */ - }) + let cache_key = NameCacheKey::Uuid2Rdn(*uuid); + match k { + Some(s) => self + .name_cache + .insert_dirty(cache_key, NameCacheValue::R(s)), + None => self.name_cache.remove_dirty(cache_key), + } + Ok(()) }) } // ! TRACING INTEGRATED - pub fn create_idx( - &self, - audit: &mut AuditScope, - attr: &str, - itype: &IndexType, - ) -> Result<(), OperationError> { + pub fn create_idx(&self, attr: &str, itype: &IndexType) -> Result<(), OperationError> { // We don't need to affect this, so pass it down. - self.db.create_idx(audit, attr, itype) + self.db.create_idx(attr, itype) } // ! TRACING INTEGRATED - pub unsafe fn purge_idxs(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { - self.db.purge_idxs(audit).map(|()| { + pub unsafe fn purge_idxs(&mut self) -> Result<(), OperationError> { + self.db.purge_idxs().map(|()| { self.idl_cache.clear(); }) } // ! TRACING INTEGRATED - pub unsafe fn purge_id2entry(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { - self.db.purge_id2entry(audit).map(|()| { + pub unsafe fn purge_id2entry(&mut self) -> Result<(), OperationError> { + self.db.purge_id2entry().map(|()| { let mut ids = IDLBitRange::new(); ids.compress(); std::mem::swap(self.allids.deref_mut(), &mut ids); @@ -1356,10 +1129,10 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } // ! TRACING INTEGRATED - pub fn setup(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn setup(&mut self) -> Result<(), OperationError> { self.db - .setup(audit) - .and_then(|()| self.db.get_allids(audit)) + .setup() + .and_then(|()| self.db.get_allids()) .map(|mut ids| { std::mem::swap(self.allids.deref_mut(), &mut ids); }) @@ -1372,18 +1145,14 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { impl IdlArcSqlite { // ! TRACING INTEGRATED - pub fn new( - audit: &mut AuditScope, - cfg: &BackendConfig, - vacuum: bool, - ) -> Result { - let db = IdlSqlite::new(audit, cfg, vacuum)?; + pub fn new(cfg: &BackendConfig, vacuum: bool) -> Result { + let db = IdlSqlite::new(cfg, vacuum)?; // Autotune heuristic. let mut cache_size = cfg.arcsize.unwrap_or_else(|| { // For now I've noticed about 20% of the number of entries // works well, but it may not be perfect ... - db.get_allids_count(audit) + db.get_allids_count() .map(|c| { let tmpsize = (c / 5) as usize; // if our calculation's too small anyway, just set it to the minimum target @@ -1398,12 +1167,6 @@ impl IdlArcSqlite { new = DEFAULT_CACHE_TARGET, "Configured Arc Cache size too low, increasing..." ); - ladmin_warning!( - audit, - "Configured Arc Cache size too low {} - setting to {} ...", - &cache_size, - DEFAULT_CACHE_TARGET - ); cache_size = DEFAULT_CACHE_TARGET; // this being above the log was an uncaught bug } diff --git a/kanidmd/src/lib/be/idl_sqlite.rs b/kanidmd/src/lib/be/idl_sqlite.rs index 0c0d51d83..74c731e11 100644 --- a/kanidmd/src/lib/be/idl_sqlite.rs +++ b/kanidmd/src/lib/be/idl_sqlite.rs @@ -1,4 +1,3 @@ -use crate::audit::AuditScope; use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope}; use crate::entry::{Entry, EntryCommitted, EntrySealed}; use crate::prelude::*; @@ -116,25 +115,18 @@ pub trait IdlSqliteTransaction { // ! TRACING INTEGRATED fn get_identry( &self, - au: &mut AuditScope, idl: &IdList, ) -> Result>, OperationError> { spanned!("be::idl_sqlite::get_identry", { - lperf_trace_segment!(au, "be::idl_sqlite::get_identry", || { - self.get_identry_raw(au, idl)? - .into_iter() - .map(|ide| ide.into_entry(au)) - .collect() - }) + self.get_identry_raw(idl)? + .into_iter() + .map(|ide| ide.into_entry()) + .collect() }) } // ! TRACING INTEGRATED - fn get_identry_raw( - &self, - _au: &mut AuditScope, - idl: &IdList, - ) -> Result, OperationError> { + fn get_identry_raw(&self, idl: &IdList) -> Result, OperationError> { // is the idl allids? match idl { IdList::AllIds => { @@ -204,7 +196,7 @@ pub trait IdlSqliteTransaction { } // ! TRACING INTEGRATED - fn exists_table(&self, _audit: &mut AuditScope, tname: &str) -> Result { + fn exists_table(&self, tname: &str) -> Result { let mut stmt = self .get_conn() .prepare("SELECT COUNT(name) from sqlite_master where name = :tname") @@ -220,153 +212,122 @@ pub trait IdlSqliteTransaction { } // ! TRACING INTEGRATED - fn exists_idx( - &self, - audit: &mut AuditScope, - attr: &str, - itype: &IndexType, - ) -> Result { + fn exists_idx(&self, attr: &str, itype: &IndexType) -> Result { let tname = format!("idx_{}_{}", itype.as_idx_str(), attr); - self.exists_table(audit, &tname) + self.exists_table(&tname) } // ! TRACING INTEGRATED fn get_idl( &self, - audit: &mut AuditScope, attr: &str, itype: &IndexType, idx_key: &str, ) -> Result, OperationError> { spanned!("be::idl_sqlite::get_idl", { - lperf_trace_segment!(audit, "be::idl_sqlite::get_idl", || { - if !(self.exists_idx(audit, attr, itype)?) { - filter_error!("Index {:?} {:?} not found", itype, attr); - lfilter_error!(audit, "Index {:?} {:?} not found", itype, attr); - return Ok(None); - } - // The table exists - lets now get the actual index itself. + if !(self.exists_idx(attr, itype)?) { + filter_error!("Index {:?} {:?} not found", itype, attr); + return Ok(None); + } + // The table exists - lets now get the actual index itself. - let query = format!( - "SELECT idl FROM idx_{}_{} WHERE key = :idx_key", - itype.as_idx_str(), - attr - ); - let mut stmt = self - .get_conn() - .prepare(query.as_str()) - .map_err(sqlite_error)?; - let idl_raw: Option> = stmt - .query_row(&[(":idx_key", &idx_key)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + let query = format!( + "SELECT idl FROM idx_{}_{} WHERE key = :idx_key", + itype.as_idx_str(), + attr + ); + let mut stmt = self + .get_conn() + .prepare(query.as_str()) + .map_err(sqlite_error)?; + let idl_raw: Option> = stmt + .query_row(&[(":idx_key", &idx_key)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - let idl = match idl_raw { - Some(d) => serde_cbor::from_slice(d.as_slice()).map_err(serde_cbor_error)?, - // We don't have this value, it must be empty (or we - // have a corrupted index ..... - None => IDLBitRange::new(), - }; - trace!(%idl, "Got idl for index {:?} {:?}", itype, attr); - ltrace!(audit, "Got idl for index {:?} {:?} -> {}", itype, attr, idl); + let idl = match idl_raw { + Some(d) => serde_cbor::from_slice(d.as_slice()).map_err(serde_cbor_error)?, + // We don't have this value, it must be empty (or we + // have a corrupted index ..... + None => IDLBitRange::new(), + }; + trace!(%idl, "Got idl for index {:?} {:?}", itype, attr); - Ok(Some(idl)) - }) + Ok(Some(idl)) }) } // ! TRACING INTEGRATED - fn name2uuid( - &mut self, - audit: &mut AuditScope, - name: &str, - ) -> Result, OperationError> { + fn name2uuid(&mut self, name: &str) -> Result, OperationError> { spanned!("be::idl_sqlite::name2uuid", { - lperf_trace_segment!(audit, "be::idl_sqlite::name2uuid", || { - // The table exists - lets now get the actual index itself. - let mut stmt = self - .get_conn() - .prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name") - .map_err(sqlite_error)?; - let uuid_raw: Option = stmt - .query_row(&[(":name", &name)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + // The table exists - lets now get the actual index itself. + let mut stmt = self + .get_conn() + .prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name") + .map_err(sqlite_error)?; + let uuid_raw: Option = stmt + .query_row(&[(":name", &name)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - let uuid = uuid_raw.as_ref().and_then(|u| Uuid::parse_str(u).ok()); - trace!(%name, ?uuid, "Got uuid for index"); - ltrace!(audit, "Got uuid for index name {} -> {:?}", name, uuid); + let uuid = uuid_raw.as_ref().and_then(|u| Uuid::parse_str(u).ok()); + trace!(%name, ?uuid, "Got uuid for index"); - Ok(uuid) - }) + Ok(uuid) }) } // ! TRACING INTEGRATED - fn uuid2spn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { + fn uuid2spn(&mut self, uuid: &Uuid) -> Result, OperationError> { spanned!("be::idl_sqlite::uuid2spn", { - lperf_trace_segment!(audit, "be::idl_sqlite::uuid2spn", || { - let uuids = uuid.to_hyphenated_ref().to_string(); - // The table exists - lets now get the actual index itself. - let mut stmt = self - .get_conn() - .prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid") - .map_err(sqlite_error)?; - let spn_raw: Option> = stmt - .query_row(&[(":uuid", &uuids)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + let uuids = uuid.to_hyphenated_ref().to_string(); + // The table exists - lets now get the actual index itself. + let mut stmt = self + .get_conn() + .prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid") + .map_err(sqlite_error)?; + let spn_raw: Option> = stmt + .query_row(&[(":uuid", &uuids)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - let spn: Option = match spn_raw { - Some(d) => { - let dbv = serde_cbor::from_slice(d.as_slice()).map_err(serde_cbor_error)?; - let spn = Value::from_db_valuev1(dbv) - .map_err(|_| OperationError::CorruptedIndex("uuid2spn".to_string()))?; - Some(spn) - } - None => None, - }; + let spn: Option = match spn_raw { + Some(d) => { + let dbv = serde_cbor::from_slice(d.as_slice()).map_err(serde_cbor_error)?; + let spn = Value::from_db_valuev1(dbv) + .map_err(|_| OperationError::CorruptedIndex("uuid2spn".to_string()))?; + Some(spn) + } + None => None, + }; - trace!(?uuid, ?spn, "Got spn for uuid"); - ltrace!(audit, "Got spn for uuid {:?} -> {:?}", uuid, spn); + trace!(?uuid, ?spn, "Got spn for uuid"); - Ok(spn) - }) + Ok(spn) }) } // ! TRACING INTEGRATED - fn uuid2rdn( - &mut self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { + fn uuid2rdn(&mut self, uuid: &Uuid) -> Result, OperationError> { spanned!("be::idl_sqlite::uuid2rdn", { - lperf_trace_segment!(audit, "be::idl_sqlite::uuid2rdn", || { - let uuids = uuid.to_hyphenated_ref().to_string(); - // The table exists - lets now get the actual index itself. - let mut stmt = self - .get_conn() - .prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid") - .map_err(sqlite_error)?; - let rdn: Option = stmt - .query_row(&[(":uuid", &uuids)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + let uuids = uuid.to_hyphenated_ref().to_string(); + // The table exists - lets now get the actual index itself. + let mut stmt = self + .get_conn() + .prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid") + .map_err(sqlite_error)?; + let rdn: Option = stmt + .query_row(&[(":uuid", &uuids)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - trace!(?uuid, ?rdn, "Got rdn for uuid"); - ltrace!(audit, "Got rdn for uuid {:?} -> {:?}", uuid, rdn); + trace!(?uuid, ?rdn, "Got rdn for uuid"); - Ok(rdn) - }) + Ok(rdn) }) } @@ -427,9 +388,8 @@ pub trait IdlSqliteTransaction { } // ! TRACING INTEGRATED - fn get_allids(&self, au: &mut AuditScope) -> Result { + fn get_allids(&self) -> Result { trace!("Building allids..."); - ltrace!(au, "Building allids..."); let mut stmt = self .get_conn() .prepare("SELECT id FROM id2entry") @@ -441,7 +401,6 @@ pub trait IdlSqliteTransaction { // Convert the idsqlite to id raw id.try_into().map_err(|e| { admin_error!(?e, "I64 Parse Error"); - ladmin_error!(au, "I64 Parse Error {:?}", e); OperationError::SqliteError }) }) @@ -454,7 +413,7 @@ pub trait IdlSqliteTransaction { } // ! TRACING INTEGRATED - fn list_idxs(&self, _audit: &mut AuditScope) -> Result, OperationError> { + fn list_idxs(&self) -> Result, OperationError> { let mut stmt = self .get_conn() .prepare("SELECT name from sqlite_master where type='table' and name GLOB 'idx_*'") @@ -465,30 +424,23 @@ pub trait IdlSqliteTransaction { } // ! TRACING INTEGRATED - fn list_id2entry(&self, audit: &mut AuditScope) -> Result, OperationError> { - let allids = self.get_identry_raw(audit, &IdList::AllIds)?; + fn list_id2entry(&self) -> Result, OperationError> { + let allids = self.get_identry_raw(&IdList::AllIds)?; allids .into_iter() - .map(|data| { - data.into_dbentry(audit) - .map(|(id, db_e)| (id, db_e.to_string())) - }) + .map(|data| data.into_dbentry().map(|(id, db_e)| (id, db_e.to_string()))) .collect() } // ! TRACING INTEGRATED - fn get_id2entry( - &self, - audit: &mut AuditScope, - id: u64, - ) -> Result<(u64, String), OperationError> { + fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError> { let idl = IdList::Indexed(IDLBitRange::from_u64(id)); - let mut allids = self.get_identry_raw(audit, &idl)?; + let mut allids = self.get_identry_raw(&idl)?; allids .pop() .ok_or(OperationError::InvalidEntryId) .and_then(|data| { - data.into_dbentry(audit) + data.into_dbentry() .map(|(id, db_e)| (id, format!("{:?}", db_e))) }) } @@ -496,7 +448,6 @@ pub trait IdlSqliteTransaction { // ! TRACING INTEGRATED fn list_index_content( &self, - _audit: &mut AuditScope, index_name: &str, ) -> Result, OperationError> { // TODO: Once we have slopes we can add .exists_table, and assert @@ -624,22 +575,19 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn commit(mut self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn commit(mut self) -> Result<(), OperationError> { spanned!("be::idl_sqlite::commit", { - lperf_trace_segment!(audit, "be::idl_sqlite::commit", || { - // ltrace!(audit, "Commiting BE WR txn"); - assert!(!self.committed); - self.committed = true; + trace!("Commiting BE WR txn"); + assert!(!self.committed); + self.committed = true; - self.conn - .execute("COMMIT TRANSACTION", []) - .map(|_| ()) - .map_err(|e| { - admin_error!(?e, "CRITICAL: failed to commit sqlite txn"); - ladmin_error!(audit, "CRITICAL: failed to commit sqlite txn -> {:?}", e); - OperationError::BackendEngine - }) - }) + self.conn + .execute("COMMIT TRANSACTION", []) + .map(|_| ()) + .map_err(|e| { + admin_error!(?e, "CRITICAL: failed to commit sqlite txn"); + OperationError::BackendEngine + }) }) } @@ -694,7 +642,6 @@ impl IdlSqliteWriteTransaction { // ! TRACING INTEGRATED pub fn write_identry( &self, - au: &mut AuditScope, entry: &Entry, ) -> Result<(), OperationError> { let dbe = entry.to_dbentry(); @@ -705,15 +652,11 @@ impl IdlSqliteWriteTransaction { data, }); - self.write_identries_raw(au, raw_entries) + self.write_identries_raw(raw_entries) } // ! TRACING INTEGRATED - pub fn write_identries_raw( - &self, - _au: &mut AuditScope, - mut entries: I, - ) -> Result<(), OperationError> + pub fn write_identries_raw(&self, mut entries: I) -> Result<(), OperationError> where I: Iterator, { @@ -773,8 +716,7 @@ impl IdlSqliteWriteTransaction { */ // ! TRACING INTEGRATED - pub fn delete_identry(&self, _au: &mut AuditScope, id: u64) -> Result<(), OperationError> { - // lperf_trace_segment!(au, "be::idl_sqlite::delete_identry", || { + pub fn delete_identry(&self, id: u64) -> Result<(), OperationError> { let mut stmt = self .conn .prepare("DELETE FROM id2entry WHERE id = :id") @@ -794,66 +736,60 @@ impl IdlSqliteWriteTransaction { debug_assert!(iid > 0); stmt.execute(&[&iid]).map(|_| ()).map_err(sqlite_error) - // }) } // ! TRACING INTEGRATED pub fn write_idl( &self, - audit: &mut AuditScope, attr: &str, itype: &IndexType, idx_key: &str, idl: &IDLBitRange, ) -> Result<(), OperationError> { spanned!("be::idl_sqlite::write_idl", { - lperf_trace_segment!(audit, "be::idl_sqlite::write_idl", || { - if idl.is_empty() { - trace!(?idl, "purging idl"); - ltrace!(audit, "purging idl -> {:?}", idl); - // delete it - // Delete this idx_key from the table. - let query = format!( - "DELETE FROM idx_{}_{} WHERE key = :key", - itype.as_idx_str(), - attr - ); + if idl.is_empty() { + trace!(?idl, "purging idl"); + // delete it + // Delete this idx_key from the table. + let query = format!( + "DELETE FROM idx_{}_{} WHERE key = :key", + itype.as_idx_str(), + attr + ); - self.conn - .prepare(query.as_str()) - .and_then(|mut stmt| stmt.execute(&[(":key", &idx_key)])) - .map_err(sqlite_error) - } else { - trace!(?idl, "writing idl"); - ltrace!(audit, "writing idl -> {}", idl); - // Serialise the IdList to Vec - let idl_raw = serde_cbor::to_vec(idl).map_err(serde_cbor_error)?; + self.conn + .prepare(query.as_str()) + .and_then(|mut stmt| stmt.execute(&[(":key", &idx_key)])) + .map_err(sqlite_error) + } else { + trace!(?idl, "writing idl"); + // Serialise the IdList to Vec + let idl_raw = serde_cbor::to_vec(idl).map_err(serde_cbor_error)?; - // update or create it. - let query = format!( - "INSERT OR REPLACE INTO idx_{}_{} (key, idl) VALUES(:key, :idl)", - itype.as_idx_str(), - attr - ); + // update or create it. + let query = format!( + "INSERT OR REPLACE INTO idx_{}_{} (key, idl) VALUES(:key, :idl)", + itype.as_idx_str(), + attr + ); - self.conn - .prepare(query.as_str()) - .and_then(|mut stmt| { - stmt.execute(named_params! { - ":key": &idx_key, - ":idl": &idl_raw - }) + self.conn + .prepare(query.as_str()) + .and_then(|mut stmt| { + stmt.execute(named_params! { + ":key": &idx_key, + ":idl": &idl_raw }) - .map_err(sqlite_error) - } - // Get rid of the sqlite rows usize - .map(|_| ()) - }) + }) + .map_err(sqlite_error) + } + // Get rid of the sqlite rows usize + .map(|_| ()) }) } // ! TRACING INTEGRATED - pub fn create_name2uuid(&self, _audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn create_name2uuid(&self) -> Result<(), OperationError> { self.conn .execute( "CREATE TABLE IF NOT EXISTS idx_name2uuid (name TEXT PRIMARY KEY, uuid TEXT)", @@ -864,12 +800,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn write_name2uuid_add( - &self, - _audit: &mut AuditScope, - name: &str, - uuid: &Uuid, - ) -> Result<(), OperationError> { + pub fn write_name2uuid_add(&self, name: &str, uuid: &Uuid) -> Result<(), OperationError> { let uuids = uuid.to_hyphenated_ref().to_string(); self.conn @@ -885,11 +816,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn write_name2uuid_rem( - &self, - _audit: &mut AuditScope, - name: &str, - ) -> Result<(), OperationError> { + pub fn write_name2uuid_rem(&self, name: &str) -> Result<(), OperationError> { self.conn .prepare("DELETE FROM idx_name2uuid WHERE name = :name") .and_then(|mut stmt| stmt.execute(&[(":name", &name)])) @@ -898,7 +825,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn create_uuid2spn(&self, _audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn create_uuid2spn(&self) -> Result<(), OperationError> { self.conn .execute( "CREATE TABLE IF NOT EXISTS idx_uuid2spn (uuid TEXT PRIMARY KEY, spn BLOB)", @@ -909,12 +836,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn write_uuid2spn( - &self, - _audit: &mut AuditScope, - uuid: &Uuid, - k: Option<&Value>, - ) -> Result<(), OperationError> { + pub fn write_uuid2spn(&self, uuid: &Uuid, k: Option<&Value>) -> Result<(), OperationError> { let uuids = uuid.to_hyphenated_ref().to_string(); match k { Some(k) => { @@ -941,7 +863,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn create_uuid2rdn(&self, _audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn create_uuid2rdn(&self) -> Result<(), OperationError> { self.conn .execute( "CREATE TABLE IF NOT EXISTS idx_uuid2rdn (uuid TEXT PRIMARY KEY, rdn TEXT)", @@ -952,12 +874,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn write_uuid2rdn( - &self, - _audit: &mut AuditScope, - uuid: &Uuid, - k: Option<&String>, - ) -> Result<(), OperationError> { + pub fn write_uuid2rdn(&self, uuid: &Uuid, k: Option<&String>) -> Result<(), OperationError> { let uuids = uuid.to_hyphenated_ref().to_string(); match k { Some(k) => self @@ -976,12 +893,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn create_idx( - &self, - audit: &mut AuditScope, - attr: &str, - itype: &IndexType, - ) -> Result<(), OperationError> { + pub fn create_idx(&self, attr: &str, itype: &IndexType) -> Result<(), OperationError> { // Is there a better way than formatting this? I can't seem // to template into the str. // @@ -992,7 +904,6 @@ impl IdlSqliteWriteTransaction { attr ); trace!(idx = %idx_stmt, "Creating index"); - ltrace!(audit, "Creating index -> {}", idx_stmt); self.conn .execute(idx_stmt.as_str(), []) @@ -1001,12 +912,11 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub unsafe fn purge_idxs(&self, audit: &mut AuditScope) -> Result<(), OperationError> { - let idx_table_list = self.list_idxs(audit)?; + pub unsafe fn purge_idxs(&self) -> Result<(), OperationError> { + let idx_table_list = self.list_idxs()?; idx_table_list.iter().try_for_each(|idx_table| { trace!(table = ?idx_table, "removing idx_table"); - ltrace!(audit, "removing idx_table -> {:?}", idx_table); self.conn .prepare(format!("DROP TABLE {}", idx_table).as_str()) .and_then(|mut stmt| stmt.execute([]).map(|_| ())) @@ -1017,7 +927,6 @@ impl IdlSqliteWriteTransaction { // ! TRACING INTEGRATED pub fn store_idx_slope_analysis( &self, - _audit: &mut AuditScope, slopes: &HashMap, ) -> Result<(), OperationError> { self.conn @@ -1057,20 +966,13 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn is_idx_slopeyness_generated( - &self, - audit: &mut AuditScope, - ) -> Result { - self.exists_table(audit, "idxslope_analysis") + pub fn is_idx_slopeyness_generated(&self) -> Result { + self.exists_table("idxslope_analysis") } // ! TRACING INTEGRATED - pub fn get_idx_slope( - &self, - audit: &mut AuditScope, - ikey: &IdxKey, - ) -> Result, OperationError> { - let analysis_exists = self.exists_table(audit, "idxslope_analysis")?; + pub fn get_idx_slope(&self, ikey: &IdxKey) -> Result, OperationError> { + let analysis_exists = self.exists_table("idxslope_analysis")?; if !analysis_exists { return Ok(None); } @@ -1090,15 +992,13 @@ impl IdlSqliteWriteTransaction { .optional() .map_err(sqlite_error)?; trace!(name = %key, ?slope, "Got slope for index"); - ltrace!(audit, "Got slope for index name {} -> {:?}", key, slope); Ok(slope) } // ! TRACING INTEGRATED - pub unsafe fn purge_id2entry(&self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub unsafe fn purge_id2entry(&self) -> Result<(), OperationError> { trace!("purge id2entry ..."); - ltrace!(audit, "purge id2entry ..."); self.conn .execute("DELETE FROM id2entry", []) .map(|_| ()) @@ -1247,7 +1147,7 @@ impl IdlSqliteWriteTransaction { } // ! TRACING INTEGRATED - pub fn setup(&self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn setup(&self) -> Result<(), OperationError> { // This stores versions of components. For example: // ---------------------- // | id | version | @@ -1274,7 +1174,6 @@ impl IdlSqliteWriteTransaction { // If the table is empty, populate the versions as 0. let mut dbv_id2entry = self.get_db_version_key(DBV_ID2ENTRY); trace!(initial = %dbv_id2entry, "dbv_id2entry"); - ltrace!(audit, "dbv_id2entry initial == {}", dbv_id2entry); // Check db_version here. // * if 0 -> create v1. @@ -1303,11 +1202,6 @@ impl IdlSqliteWriteTransaction { dbv_id2entry = 1; admin_info!(entry = %dbv_id2entry, "dbv_id2entry migrated (id2entry, db_sid)"); - ladmin_info!( - audit, - "dbv_id2entry migrated (id2entry, db_sid) -> {}", - dbv_id2entry - ); } // * if v1 -> add the domain uuid table if dbv_id2entry == 1 { @@ -1324,7 +1218,6 @@ impl IdlSqliteWriteTransaction { dbv_id2entry = 2; admin_info!(entry = %dbv_id2entry, "dbv_id2entry migrated (db_did)"); - ladmin_info!(audit, "dbv_id2entry migrated (db_did) -> {}", dbv_id2entry); } // * if v2 -> add the op max ts table. if dbv_id2entry == 2 { @@ -1340,24 +1233,14 @@ impl IdlSqliteWriteTransaction { .map_err(sqlite_error)?; dbv_id2entry = 3; admin_info!(entry = %dbv_id2entry, "dbv_id2entry migrated (db_op_ts)"); - ladmin_info!( - audit, - "dbv_id2entry migrated (db_op_ts) -> {}", - dbv_id2entry - ); } // * if v3 -> create name2uuid, uuid2spn, uuid2rdn. if dbv_id2entry == 3 { - self.create_name2uuid(audit) - .and_then(|_| self.create_uuid2spn(audit)) - .and_then(|_| self.create_uuid2rdn(audit))?; + self.create_name2uuid() + .and_then(|_| self.create_uuid2spn()) + .and_then(|_| self.create_uuid2rdn())?; dbv_id2entry = 4; admin_info!(entry = %dbv_id2entry, "dbv_id2entry migrated (name2uuid, uuid2spn, uuid2rdn)"); - ladmin_info!( - audit, - "dbv_id2entry migrated (name2uuid, uuid2spn, uuid2rdn) -> {}", - dbv_id2entry - ); } // * if v4 -> complete. @@ -1374,11 +1257,7 @@ impl IdlSqliteWriteTransaction { impl IdlSqlite { // ! TRACING INTEGRATED - pub fn new( - audit: &mut AuditScope, - cfg: &BackendConfig, - vacuum: bool, - ) -> Result { + pub fn new(cfg: &BackendConfig, vacuum: bool) -> Result { if cfg.path.is_empty() { debug_assert!(cfg.pool_size == 1); } @@ -1396,10 +1275,12 @@ impl IdlSqlite { immediate = true, "NOTICE: A db vacuum has been requested. This may take a long time ..." ); + /* limmediate_warning!( audit, "NOTICE: A db vacuum has been requested. This may take a long time ...\n" ); + */ let vconn = Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?; @@ -1408,7 +1289,6 @@ impl IdlSqlite { .execute_batch("PRAGMA wal_checkpoint(TRUNCATE);") .map_err(|e| { admin_error!(?e, "rusqlite wal_checkpoint error"); - ladmin_error!(audit, "rusqlite wal_checkpoint error {:?}", e); OperationError::SqliteError })?; @@ -1416,13 +1296,11 @@ impl IdlSqlite { .pragma_update(None, "journal_mode", &"DELETE") .map_err(|e| { admin_error!(?e, "rusqlite journal_mode update error"); - ladmin_error!(audit, "rusqlite journal_mode update error {:?}", e); OperationError::SqliteError })?; vconn.close().map_err(|e| { admin_error!(?e, "rusqlite db close error"); - ladmin_error!(audit, "rusqlite db close error {:?}", e); OperationError::SqliteError })?; @@ -1433,13 +1311,11 @@ impl IdlSqlite { .pragma_update(None, "page_size", &(cfg.fstype as u32)) .map_err(|e| { admin_error!(?e, "rusqlite page_size update error"); - ladmin_error!(audit, "rusqlite page_size update error {:?}", e); OperationError::SqliteError })?; vconn.execute_batch("VACUUM").map_err(|e| { admin_error!(?e, "rusqlite vacuum error"); - ladmin_error!(audit, "rusqlite vacuum error {:?}", e); OperationError::SqliteError })?; @@ -1447,18 +1323,16 @@ impl IdlSqlite { .pragma_update(None, "journal_mode", &"WAL") .map_err(|e| { admin_error!(?e, "rusqlite journal_mode update error"); - ladmin_error!(audit, "rusqlite journal_mode update error {:?}", e); OperationError::SqliteError })?; vconn.close().map_err(|e| { admin_error!(?e, "rusqlite db close error"); - ladmin_error!(audit, "rusqlite db close error {:?}", e); OperationError::SqliteError })?; admin_warn!(immediate = true, "NOTICE: db vacuum complete"); - limmediate_warning!(audit, "NOTICE: db vacuum complete\n"); + // limmediate_warning!(audit, "NOTICE: db vacuum complete\n"); }; let fs_page_size = cfg.fstype as u32; @@ -1484,7 +1358,7 @@ impl IdlSqlite { // Look at max_size and thread_pool here for perf later let pool = builder2.build(manager).map_err(|e| { admin_error!(?e, "r2d2 error"); - ladmin_error!(audit, "r2d2 error {:?}", e); + // ladmin_error!(audit, "r2d2 error {:?}", e); OperationError::SqliteError })?; @@ -1492,9 +1366,8 @@ impl IdlSqlite { } // ! TRACING INTEGRATED - pub(crate) fn get_allids_count(&self, au: &mut AuditScope) -> Result { + pub(crate) fn get_allids_count(&self) -> Result { trace!("Counting allids..."); - ltrace!(au, "Counting allids..."); #[allow(clippy::expect_used)] self.pool .try_get() @@ -1527,15 +1400,14 @@ impl IdlSqlite { #[cfg(test)] mod tests { - use crate::audit::AuditScope; use crate::be::idl_sqlite::{IdlSqlite, IdlSqliteTransaction}; use crate::be::BackendConfig; #[test] fn test_idl_sqlite_verify() { - let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None); + let _ = crate::tracing_tree::test_init(); let cfg = BackendConfig::new_test(); - let be = IdlSqlite::new(&mut audit, &cfg, false).unwrap(); + let be = IdlSqlite::new(&cfg, false).unwrap(); let be_w = be.write(); let r = be_w.verify(); assert!(r.len() == 0); diff --git a/kanidmd/src/lib/be/mod.rs b/kanidmd/src/lib/be/mod.rs index b5acde493..8e7640ab9 100644 --- a/kanidmd/src/lib/be/mod.rs +++ b/kanidmd/src/lib/be/mod.rs @@ -125,29 +125,23 @@ pub struct BackendWriteTransaction<'a> { impl IdRawEntry { // ! TRACING INTEGRATED - fn into_dbentry(self, audit: &mut AuditScope) -> Result<(u64, DbEntry), OperationError> { + fn into_dbentry(self) -> Result<(u64, DbEntry), OperationError> { serde_cbor::from_slice(self.data.as_slice()) .map_err(|e| { admin_error!(?e, "Serde CBOR Error"); - ladmin_error!(audit, "Serde CBOR Error -> {:?}", e); OperationError::SerdeCborError }) .map(|dbe| (self.id, dbe)) } // ! TRACING INTEGRATED - fn into_entry( - self, - audit: &mut AuditScope, - ) -> Result, OperationError> { + fn into_entry(self) -> Result, OperationError> { let db_e = serde_cbor::from_slice(self.data.as_slice()).map_err(|e| { admin_error!(?e, "Serde CBOR Error"); - ladmin_error!(audit, "Serde CBOR Error -> {:?}", e); OperationError::SerdeCborError })?; // let id = u64::try_from(self.id).map_err(|_| OperationError::InvalidEntryId)?; - Entry::from_dbentry(audit, db_e, self.id) - .map_err(|_| OperationError::CorruptedEntry(self.id)) + Entry::from_dbentry(db_e, self.id).ok_or_else(|| OperationError::CorruptedEntry(self.id)) } } @@ -177,7 +171,7 @@ pub trait BackendTransaction { // Get the idl for this match self .get_idlayer() - .get_idl(au, attr, &IndexType::Equality, &idx_key)? + .get_idl(attr, &IndexType::Equality, &idx_key)? { Some(idl) => ( IdList::Indexed(idl), @@ -197,7 +191,7 @@ pub trait BackendTransaction { // Get the idl for this match self .get_idlayer() - .get_idl(au, attr, &IndexType::SubString, &idx_key)? + .get_idl(attr, &IndexType::SubString, &idx_key)? { Some(idl) => ( IdList::Indexed(idl), @@ -214,7 +208,6 @@ pub trait BackendTransaction { if idx.is_some() { // Get the idl for this match self.get_idlayer().get_idl( - au, attr, &IndexType::Presence, &"_".to_string(), @@ -609,7 +602,7 @@ pub trait BackendTransaction { } }; - let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { + let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { admin_error!(?e, "get_identry failed"); ladmin_error!(au, "get_identry failed {:?}", e); e @@ -725,7 +718,7 @@ pub trait BackendTransaction { match &idl { IdList::Indexed(idl) => Ok(!idl.is_empty()), _ => { - let entries = self.get_idlayer().get_identry(au, &idl).map_err(|e| { + let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { admin_error!(?e, "get_identry failed"); ladmin_error!(au, "get_identry failed {:?}", e); e @@ -754,8 +747,8 @@ pub trait BackendTransaction { } // ! TRACING INTEGRATED - fn verify(&self, audit: &mut AuditScope) -> Vec> { - self.get_idlayer().verify(audit) + fn verify(&self) -> Vec> { + self.get_idlayer().verify() } // ! TRACING INTEGRATED @@ -780,8 +773,9 @@ pub trait BackendTransaction { }; // If the set.len > 1, check each item. - n2u_set.iter().try_for_each(|name| { - match self.get_idlayer().name2uuid(audit, name) { + n2u_set + .iter() + .try_for_each(|name| match self.get_idlayer().name2uuid(name) { Ok(Some(idx_uuid)) => { if &idx_uuid == e_uuid { Ok(()) @@ -799,11 +793,10 @@ pub trait BackendTransaction { ladmin_error!(audit, "Invalid name2uuid state -> {:?}", r); Err(ConsistencyError::BackendIndexSync) } - } - })?; + })?; let spn = e.get_uuid2spn(); - match self.get_idlayer().uuid2spn(audit, &e_uuid) { + match self.get_idlayer().uuid2spn(&e_uuid) { Ok(Some(idx_spn)) => { if spn != idx_spn { admin_error!("Invalid uuid2spn state -> incorrect idx spn value"); @@ -819,7 +812,7 @@ pub trait BackendTransaction { }; let rdn = e.get_uuid2rdn(); - match self.get_idlayer().uuid2rdn(audit, &e_uuid) { + match self.get_idlayer().uuid2rdn(&e_uuid) { Ok(Some(idx_rdn)) => { if rdn != idx_rdn { admin_error!("Invalid uuid2rdn state -> incorrect idx rdn value"); @@ -847,7 +840,7 @@ pub trait BackendTransaction { // ! TRACING INTEGRATED fn verify_indexes(&self, audit: &mut AuditScope) -> Vec> { let idl = IdList::AllIds; - let entries = match self.get_idlayer().get_identry(audit, &idl) { + let entries = match self.get_idlayer().get_identry(&idl) { Ok(s) => s, Err(e) => { admin_error!(?e, "get_identry failure"); @@ -872,7 +865,7 @@ pub trait BackendTransaction { // load all entries into RAM, may need to change this later // if the size of the database compared to RAM is an issue let idl = IdList::AllIds; - let raw_entries: Vec = self.get_idlayer().get_identry_raw(audit, &idl)?; + let raw_entries: Vec = self.get_idlayer().get_identry_raw(&idl)?; let entries: Result, _> = raw_entries .iter() @@ -900,30 +893,18 @@ pub trait BackendTransaction { } // ! TRACING INTEGRATED - fn name2uuid( - &self, - audit: &mut AuditScope, - name: &str, - ) -> Result, OperationError> { - self.get_idlayer().name2uuid(audit, name) + fn name2uuid(&self, name: &str) -> Result, OperationError> { + self.get_idlayer().name2uuid(name) } // ! TRACING INTEGRATED - fn uuid2spn( - &self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { - self.get_idlayer().uuid2spn(audit, uuid) + fn uuid2spn(&self, uuid: &Uuid) -> Result, OperationError> { + self.get_idlayer().uuid2spn(uuid) } // ! TRACING INTEGRATED - fn uuid2rdn( - &self, - audit: &mut AuditScope, - uuid: &Uuid, - ) -> Result, OperationError> { - self.get_idlayer().uuid2rdn(audit, uuid) + fn uuid2rdn(&self, uuid: &Uuid) -> Result, OperationError> { + self.get_idlayer().uuid2rdn(uuid) } } @@ -953,34 +934,26 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> { impl<'a> BackendReadTransaction<'a> { // ! TRACING INTEGRATED - pub fn list_indexes(&self, audit: &mut AuditScope) -> Result, OperationError> { - self.get_idlayer().list_idxs(audit) + pub fn list_indexes(&self) -> Result, OperationError> { + self.get_idlayer().list_idxs() } // ! TRACING INTEGRATED - pub fn list_id2entry( - &self, - audit: &mut AuditScope, - ) -> Result, OperationError> { - self.get_idlayer().list_id2entry(audit) + pub fn list_id2entry(&self) -> Result, OperationError> { + self.get_idlayer().list_id2entry() } // ! TRACING INTEGRATED pub fn list_index_content( &self, - audit: &mut AuditScope, index_name: &str, ) -> Result, OperationError> { - self.get_idlayer().list_index_content(audit, index_name) + self.get_idlayer().list_index_content(index_name) } // ! TRACING INTEGRATED - pub fn get_id2entry( - &self, - audit: &mut AuditScope, - id: u64, - ) -> Result<(u64, String), OperationError> { - self.get_idlayer().get_id2entry(audit, id) + pub fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError> { + self.get_idlayer().get_id2entry(id) } } @@ -1025,7 +998,7 @@ impl<'a> BackendWriteTransaction<'a> { }) .collect(); - idlayer.write_identries(au, c_entries.iter())?; + idlayer.write_identries(c_entries.iter())?; idlayer.set_id2entry_max_id(id_max); @@ -1087,8 +1060,7 @@ impl<'a> BackendWriteTransaction<'a> { */ // Now, given the list of id's, update them - self.get_idlayer() - .write_identries(au, post_entries.iter())?; + self.get_idlayer().write_identries(post_entries.iter())?; // Finally, we now reindex all the changed entries. We do this by iterating and zipping // over the set, because we know the list is in the same order. @@ -1117,7 +1089,7 @@ impl<'a> BackendWriteTransaction<'a> { let id_list = entries.iter().map(|e| e.get_id()); // Now, given the list of id's, delete them. - self.get_idlayer().delete_identry(au, id_list)?; + self.get_idlayer().delete_identry(id_list)?; // Finally, purge the indexes from the entries we removed. entries @@ -1131,7 +1103,7 @@ impl<'a> BackendWriteTransaction<'a> { audit: &mut AuditScope, idxkeys: Vec, ) -> Result<(), OperationError> { - if self.is_idx_slopeyness_generated(audit)? { + if self.is_idx_slopeyness_generated()? { ltrace!(audit, "Indexing slopes available"); } else { ladmin_warning!( @@ -1227,19 +1199,19 @@ impl<'a> BackendWriteTransaction<'a> { // Write the changes out to the backend if let Some(rem) = n2u_rem { - idlayer.write_name2uuid_rem(audit, rem)? + idlayer.write_name2uuid_rem(rem)? } match u2s_act { None => {} - Some(Ok(k)) => idlayer.write_uuid2spn(audit, uuid, Some(k))?, - Some(Err(_)) => idlayer.write_uuid2spn(audit, uuid, None)?, + Some(Ok(k)) => idlayer.write_uuid2spn(uuid, Some(k))?, + Some(Err(_)) => idlayer.write_uuid2spn(uuid, None)?, } match u2r_act { None => {} - Some(Ok(k)) => idlayer.write_uuid2rdn(audit, uuid, Some(k))?, - Some(Err(_)) => idlayer.write_uuid2rdn(audit, uuid, None)?, + Some(Ok(k)) => idlayer.write_uuid2rdn(uuid, Some(k))?, + Some(Err(_)) => idlayer.write_uuid2rdn(uuid, None)?, } // Return none, mask_pre is now completed. None @@ -1261,22 +1233,22 @@ impl<'a> BackendWriteTransaction<'a> { // Write the changes out to the backend if let Some(add) = n2u_add { - idlayer.write_name2uuid_add(audit, e_uuid, add)? + idlayer.write_name2uuid_add(e_uuid, add)? } if let Some(rem) = n2u_rem { - idlayer.write_name2uuid_rem(audit, rem)? + idlayer.write_name2uuid_rem(rem)? } match u2s_act { None => {} - Some(Ok(k)) => idlayer.write_uuid2spn(audit, e_uuid, Some(k))?, - Some(Err(_)) => idlayer.write_uuid2spn(audit, e_uuid, None)?, + Some(Ok(k)) => idlayer.write_uuid2spn(e_uuid, Some(k))?, + Some(Err(_)) => idlayer.write_uuid2spn(e_uuid, None)?, } match u2r_act { None => {} - Some(Ok(k)) => idlayer.write_uuid2rdn(audit, e_uuid, Some(k))?, - Some(Err(_)) => idlayer.write_uuid2rdn(audit, e_uuid, None)?, + Some(Ok(k)) => idlayer.write_uuid2rdn(e_uuid, Some(k))?, + Some(Err(_)) => idlayer.write_uuid2rdn(e_uuid, None)?, } // Extremely Cursed - Okay, we know that self.idxmeta will NOT be changed @@ -1294,10 +1266,10 @@ impl<'a> BackendWriteTransaction<'a> { match act { Ok((attr, itype, idx_key)) => { ltrace!(audit, "Adding {:?} idx -> {:?}: {:?}", itype, attr, idx_key); - match idlayer.get_idl(audit, attr, itype, idx_key)? { + match idlayer.get_idl(attr, itype, idx_key)? { Some(mut idl) => { idl.insert_id(e_id); - idlayer.write_idl(audit, attr, itype, idx_key, &idl) + idlayer.write_idl(attr, itype, idx_key, &idl) } None => { ladmin_error!( @@ -1311,10 +1283,10 @@ impl<'a> BackendWriteTransaction<'a> { } Err((attr, itype, idx_key)) => { ltrace!(audit, "Removing {:?} idx -> {:?}: {:?}", itype, attr, idx_key); - match idlayer.get_idl(audit, attr, itype, idx_key)? { + match idlayer.get_idl(attr, itype, idx_key)? { Some(mut idl) => { idl.remove_id(e_id); - idlayer.write_idl(audit, attr, itype, idx_key, &idl) + idlayer.write_idl(attr, itype, idx_key, &idl) } None => { ladmin_error!( @@ -1336,7 +1308,7 @@ impl<'a> BackendWriteTransaction<'a> { &self, audit: &mut AuditScope, ) -> Result, OperationError> { - let idx_table_list = self.get_idlayer().list_idxs(audit)?; + let idx_table_list = self.get_idlayer().list_idxs()?; // Turn the vec to a real set let idx_table_set: HashSet<_> = idx_table_list.into_iter().collect(); @@ -1364,18 +1336,18 @@ impl<'a> BackendWriteTransaction<'a> { let idlayer = self.get_idlayer(); // Create name2uuid and uuid2name ltrace!(audit, "Creating index -> name2uuid"); - idlayer.create_name2uuid(audit)?; + idlayer.create_name2uuid()?; ltrace!(audit, "Creating index -> uuid2spn"); - idlayer.create_uuid2spn(audit)?; + idlayer.create_uuid2spn()?; ltrace!(audit, "Creating index -> uuid2rdn"); - idlayer.create_uuid2rdn(audit)?; + idlayer.create_uuid2rdn()?; self.idxmeta .idxkeys .keys() - .try_for_each(|ikey| idlayer.create_idx(audit, &ikey.attr, &ikey.itype)) + .try_for_each(|ikey| idlayer.create_idx(&ikey.attr, &ikey.itype)) } pub fn upgrade_reindex(&self, audit: &mut AuditScope, v: i64) -> Result<(), OperationError> { @@ -1397,7 +1369,7 @@ impl<'a> BackendWriteTransaction<'a> { pub fn reindex(&self, audit: &mut AuditScope) -> Result<(), OperationError> { let idlayer = self.get_idlayer(); // Purge the idxs - unsafe { idlayer.purge_idxs(audit)? }; + unsafe { idlayer.purge_idxs()? }; // Using the index metadata on the txn, create all our idx tables self.create_idxs(audit)?; @@ -1406,7 +1378,7 @@ impl<'a> BackendWriteTransaction<'a> { // Future idea: Do this in batches of X amount to limit memory // consumption. let idl = IdList::AllIds; - let entries = idlayer.get_identry(audit, &idl).map_err(|e| { + let entries = idlayer.get_identry(&idl).map_err(|e| { ladmin_error!(audit, "get_identry failure {:?}", e); e })?; @@ -1430,10 +1402,10 @@ impl<'a> BackendWriteTransaction<'a> { })?; limmediate_warning!(audit, " reindexed {} entries ✅\n", count); limmediate_warning!(audit, "Optimising Indexes ... "); - idlayer.optimise_dirty_idls(audit); + idlayer.optimise_dirty_idls(); limmediate_warning!(audit, "done ✅\n"); limmediate_warning!(audit, "Calculating Index Optimisation Slopes ... "); - idlayer.analyse_idx_slopes(audit).map_err(|e| { + idlayer.analyse_idx_slopes().map_err(|e| { ladmin_error!(audit, "index optimisation failed -> {:?}", e); e })?; @@ -1442,23 +1414,22 @@ impl<'a> BackendWriteTransaction<'a> { } #[cfg(test)] - pub fn purge_idxs(&self, audit: &mut AuditScope) -> Result<(), OperationError> { - unsafe { self.get_idlayer().purge_idxs(audit) } + pub fn purge_idxs(&self) -> Result<(), OperationError> { + unsafe { self.get_idlayer().purge_idxs() } } #[cfg(test)] pub fn load_test_idl( &self, - audit: &mut AuditScope, attr: &String, itype: &IndexType, idx_key: &String, ) -> Result, OperationError> { - self.get_idlayer().get_idl(audit, attr, itype, idx_key) + self.get_idlayer().get_idl(attr, itype, idx_key) } - fn is_idx_slopeyness_generated(&self, audit: &mut AuditScope) -> Result { - self.get_idlayer().is_idx_slopeyness_generated(audit) + fn is_idx_slopeyness_generated(&self) -> Result { + self.get_idlayer().is_idx_slopeyness_generated() } fn get_idx_slope( @@ -1469,7 +1440,7 @@ impl<'a> BackendWriteTransaction<'a> { // Do we have the slopeyness? let slope = self .get_idlayer() - .get_idx_slope(audit, ikey)? + .get_idx_slope(ikey)? .unwrap_or_else(|| get_idx_slope_default(ikey)); ltrace!(audit, "index slope - {:?} -> {:?}", ikey, slope); Ok(slope) @@ -1484,7 +1455,7 @@ impl<'a> BackendWriteTransaction<'a> { OperationError::FsError })?; - unsafe { idlayer.purge_id2entry(audit) }.map_err(|e| { + unsafe { idlayer.purge_id2entry() }.map_err(|e| { ladmin_error!(audit, "purge_id2entry failed {:?}", e); e })?; @@ -1508,12 +1479,12 @@ impl<'a> BackendWriteTransaction<'a> { }) .collect(); - idlayer.write_identries_raw(audit, identries?.into_iter())?; + idlayer.write_identries_raw(identries?.into_iter())?; // Reindex now we are loaded. self.reindex(audit)?; - let vr = self.verify(audit); + let vr = self.verify(); if vr.is_empty() { Ok(()) } else { @@ -1521,7 +1492,7 @@ impl<'a> BackendWriteTransaction<'a> { } } - pub fn commit(self, audit: &mut AuditScope) -> Result<(), OperationError> { + pub fn commit(self, _audit: &mut AuditScope) -> Result<(), OperationError> { let BackendWriteTransaction { idlayer, idxmeta: _, @@ -1531,7 +1502,7 @@ impl<'a> BackendWriteTransaction<'a> { // Unwrap the Cell we have finished with it. let idlayer = idlayer.into_inner(); - idlayer.commit(audit).map(|()| { + idlayer.commit().map(|()| { idxmeta_wr.commit(); }) } @@ -1644,7 +1615,7 @@ impl Backend { // this has a ::memory() type, but will path == "" work? lperf_trace_segment!(audit, "be::new", || { - let idlayer = Arc::new(IdlArcSqlite::new(audit, &cfg, vacuum)?); + let idlayer = Arc::new(IdlArcSqlite::new(&cfg, vacuum)?); let be = Backend { cfg, idlayer, @@ -1657,7 +1628,7 @@ impl Backend { // the indexing subsystem here. let r = { let mut idl_write = be.idlayer.write(); - idl_write.setup(audit).and_then(|_| idl_write.commit(audit)) + idl_write.setup().and_then(|_| idl_write.commit()) }; ltrace!(audit, "be new setup: {:?}", r); @@ -1731,14 +1702,7 @@ mod tests { macro_rules! run_test { ($test_fn:expr) => {{ - use env_logger; - ::std::env::set_var("RUST_LOG", "kanidm=debug"); - let _ = env_logger::builder() - .format_timestamp(None) - .format_level(false) - .is_test(true) - .try_init(); - + let _ = crate::tracing_tree::test_init(); let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4(), None); // This is a demo idxmeta, purely for testing. @@ -1818,9 +1782,9 @@ mod tests { } macro_rules! idl_state { - ($audit:expr, $be:expr, $attr:expr, $itype:expr, $idx_key:expr, $expect:expr) => {{ + ($be:expr, $attr:expr, $itype:expr, $idx_key:expr, $expect:expr) => {{ let t_idl = $be - .load_test_idl($audit, &$attr.to_string(), &$itype, &$idx_key.to_string()) + .load_test_idl(&$attr.to_string(), &$itype, &$idx_key.to_string()) .expect("IdList Load failed"); let t = $expect.map(|v: Vec| IDLBitRange::from_iter(v)); assert_eq!(t_idl, t); @@ -2071,7 +2035,7 @@ mod tests { be.restore(audit, DB_BACKUP_FILE_NAME) .expect("Restore failed!"); - assert!(be.verify(audit).len() == 0); + assert!(be.verify().len() == 0); }); } @@ -2130,7 +2094,7 @@ mod tests { be.restore(audit, DB_BACKUP2_FILE_NAME) .expect("Restore failed!"); - assert!(be.verify(audit).len() == 0); + assert!(be.verify().len() == 0); }); } @@ -2179,7 +2143,7 @@ mod tests { be.create(audit, vec![e1.clone(), e2.clone()]).unwrap(); // purge indexes - be.purge_idxs(audit).unwrap(); + be.purge_idxs().unwrap(); // Check they are gone let missing = be.missing_idxs(audit).unwrap(); assert!(missing.len() == 7); @@ -2189,35 +2153,13 @@ mod tests { assert!(missing.is_empty()); // check name and uuid ids on eq, sub, pres - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "william", - Some(vec![1]) - ); + idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1])); + + idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![2])); + + idl_state!(be, "name", IndexType::Presence, "_", Some(vec![1, 2])); idl_state!( - audit, - be, - "name", - IndexType::Equality, - "claire", - Some(vec![2]) - ); - - idl_state!( - audit, - be, - "name", - IndexType::Presence, - "_", - Some(vec![1, 2]) - ); - - idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2226,7 +2168,6 @@ mod tests { ); idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2234,19 +2175,11 @@ mod tests { Some(vec![2]) ); - idl_state!( - audit, - be, - "uuid", - IndexType::Presence, - "_", - Some(vec![1, 2]) - ); + idl_state!(be, "uuid", IndexType::Presence, "_", Some(vec![1, 2])); // Show what happens with empty idl_state!( - audit, be, "name", IndexType::Equality, @@ -2255,7 +2188,6 @@ mod tests { ); idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2265,7 +2197,6 @@ mod tests { let uuid_p_idl = be .load_test_idl( - audit, &"not_indexed".to_string(), &IndexType::Presence, &"_".to_string(), @@ -2277,15 +2208,15 @@ mod tests { let claire_uuid = Uuid::parse_str("bd651620-00dd-426b-aaa0-4494f7b7906f").unwrap(); let william_uuid = Uuid::parse_str("db237e8a-0079-4b8c-8a56-593b22aa44d1").unwrap(); - assert!(be.name2uuid(audit, "claire") == Ok(Some(claire_uuid))); - assert!(be.name2uuid(audit, "william") == Ok(Some(william_uuid))); - assert!(be.name2uuid(audit, "db237e8a-0079-4b8c-8a56-593b22aa44d1") == Ok(None)); + assert!(be.name2uuid("claire") == Ok(Some(claire_uuid))); + assert!(be.name2uuid("william") == Ok(Some(william_uuid))); + assert!(be.name2uuid("db237e8a-0079-4b8c-8a56-593b22aa44d1") == Ok(None)); // check uuid2spn - assert!(be.uuid2spn(audit, &claire_uuid) == Ok(Some(Value::new_iname("claire")))); - assert!(be.uuid2spn(audit, &william_uuid) == Ok(Some(Value::new_iname("william")))); + assert!(be.uuid2spn(&claire_uuid) == Ok(Some(Value::new_iname("claire")))); + assert!(be.uuid2spn(&william_uuid) == Ok(Some(Value::new_iname("william")))); // check uuid2rdn - assert!(be.uuid2rdn(audit, &claire_uuid) == Ok(Some("name=claire".to_string()))); - assert!(be.uuid2rdn(audit, &william_uuid) == Ok(Some("name=william".to_string()))); + assert!(be.uuid2rdn(&claire_uuid) == Ok(Some("name=claire".to_string()))); + assert!(be.uuid2rdn(&william_uuid) == Ok(Some("name=william".to_string()))); }); } @@ -2303,19 +2234,11 @@ mod tests { let rset = be.create(audit, vec![e1.clone()]).unwrap(); - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "william", - Some(vec![1]) - ); + idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1])); - idl_state!(audit, be, "name", IndexType::Presence, "_", Some(vec![1])); + idl_state!(be, "name", IndexType::Presence, "_", Some(vec![1])); idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2323,36 +2246,21 @@ mod tests { Some(vec![1]) ); - idl_state!(audit, be, "uuid", IndexType::Presence, "_", Some(vec![1])); + idl_state!(be, "uuid", IndexType::Presence, "_", Some(vec![1])); let william_uuid = Uuid::parse_str("db237e8a-0079-4b8c-8a56-593b22aa44d1").unwrap(); - assert!(be.name2uuid(audit, "william") == Ok(Some(william_uuid))); - assert!(be.uuid2spn(audit, &william_uuid) == Ok(Some(Value::from("william")))); - assert!(be.uuid2rdn(audit, &william_uuid) == Ok(Some("name=william".to_string()))); + assert!(be.name2uuid("william") == Ok(Some(william_uuid))); + assert!(be.uuid2spn(&william_uuid) == Ok(Some(Value::from("william")))); + assert!(be.uuid2rdn(&william_uuid) == Ok(Some("name=william".to_string()))); // == Now we delete, and assert we removed the items. be.delete(audit, &rset).unwrap(); - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "william", - Some(Vec::new()) - ); + idl_state!(be, "name", IndexType::Equality, "william", Some(Vec::new())); + + idl_state!(be, "name", IndexType::Presence, "_", Some(Vec::new())); idl_state!( - audit, - be, - "name", - IndexType::Presence, - "_", - Some(Vec::new()) - ); - - idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2360,18 +2268,11 @@ mod tests { Some(Vec::new()) ); - idl_state!( - audit, - be, - "uuid", - IndexType::Presence, - "_", - Some(Vec::new()) - ); + idl_state!(be, "uuid", IndexType::Presence, "_", Some(Vec::new())); - assert!(be.name2uuid(audit, "william") == Ok(None)); - assert!(be.uuid2spn(audit, &william_uuid) == Ok(None)); - assert!(be.uuid2rdn(audit, &william_uuid) == Ok(None)); + assert!(be.name2uuid("william") == Ok(None)); + assert!(be.uuid2spn(&william_uuid) == Ok(None)); + assert!(be.uuid2rdn(&william_uuid) == Ok(None)); }) } @@ -2406,19 +2307,11 @@ mod tests { // Now remove e1, e3. be.delete(audit, &rset).unwrap(); - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "claire", - Some(vec![2]) - ); + idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![2])); - idl_state!(audit, be, "name", IndexType::Presence, "_", Some(vec![2])); + idl_state!(be, "name", IndexType::Presence, "_", Some(vec![2])); idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2426,23 +2319,23 @@ mod tests { Some(vec![2]) ); - idl_state!(audit, be, "uuid", IndexType::Presence, "_", Some(vec![2])); + idl_state!(be, "uuid", IndexType::Presence, "_", Some(vec![2])); let claire_uuid = Uuid::parse_str("bd651620-00dd-426b-aaa0-4494f7b7906f").unwrap(); let william_uuid = Uuid::parse_str("db237e8a-0079-4b8c-8a56-593b22aa44d1").unwrap(); let lucy_uuid = Uuid::parse_str("7b23c99d-c06b-4a9a-a958-3afa56383e1d").unwrap(); - assert!(be.name2uuid(audit, "claire") == Ok(Some(claire_uuid))); - assert!(be.uuid2spn(audit, &claire_uuid) == Ok(Some(Value::from("claire")))); - assert!(be.uuid2rdn(audit, &claire_uuid) == Ok(Some("name=claire".to_string()))); + assert!(be.name2uuid("claire") == Ok(Some(claire_uuid))); + assert!(be.uuid2spn(&claire_uuid) == Ok(Some(Value::from("claire")))); + assert!(be.uuid2rdn(&claire_uuid) == Ok(Some("name=claire".to_string()))); - assert!(be.name2uuid(audit, "william") == Ok(None)); - assert!(be.uuid2spn(audit, &william_uuid) == Ok(None)); - assert!(be.uuid2rdn(audit, &william_uuid) == Ok(None)); + assert!(be.name2uuid("william") == Ok(None)); + assert!(be.uuid2spn(&william_uuid) == Ok(None)); + assert!(be.uuid2rdn(&william_uuid) == Ok(None)); - assert!(be.name2uuid(audit, "lucy") == Ok(None)); - assert!(be.uuid2spn(audit, &lucy_uuid) == Ok(None)); - assert!(be.uuid2rdn(audit, &lucy_uuid) == Ok(None)); + assert!(be.name2uuid("lucy") == Ok(None)); + assert!(be.uuid2spn(&lucy_uuid) == Ok(None)); + assert!(be.uuid2rdn(&lucy_uuid) == Ok(None)); }) } @@ -2475,27 +2368,20 @@ mod tests { be.modify(audit, &rset, &vec![ce1]).unwrap(); // Now check the idls - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "claire", - Some(vec![1]) - ); + idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![1])); - idl_state!(audit, be, "name", IndexType::Presence, "_", Some(vec![1])); + idl_state!(be, "name", IndexType::Presence, "_", Some(vec![1])); - idl_state!(audit, be, "tb", IndexType::Equality, "test", Some(vec![1])); + idl_state!(be, "tb", IndexType::Equality, "test", Some(vec![1])); - idl_state!(audit, be, "ta", IndexType::Equality, "test", Some(vec![])); + idl_state!(be, "ta", IndexType::Equality, "test", Some(vec![])); // let claire_uuid = Uuid::parse_str("bd651620-00dd-426b-aaa0-4494f7b7906f").unwrap(); let william_uuid = Uuid::parse_str("db237e8a-0079-4b8c-8a56-593b22aa44d1").unwrap(); - assert!(be.name2uuid(audit, "william") == Ok(None)); - assert!(be.name2uuid(audit, "claire") == Ok(Some(william_uuid))); - assert!(be.uuid2spn(audit, &william_uuid) == Ok(Some(Value::from("claire")))); - assert!(be.uuid2rdn(audit, &william_uuid) == Ok(Some("name=claire".to_string()))); + assert!(be.name2uuid("william") == Ok(None)); + assert!(be.name2uuid("claire") == Ok(Some(william_uuid))); + assert!(be.uuid2spn(&william_uuid) == Ok(Some(Value::from("claire")))); + assert!(be.uuid2rdn(&william_uuid) == Ok(Some("name=claire".to_string()))); }) } @@ -2522,17 +2408,9 @@ mod tests { be.modify(audit, &rset, &vec![ce1]).unwrap(); - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "claire", - Some(vec![1]) - ); + idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![1])); idl_state!( - audit, be, "uuid", IndexType::Equality, @@ -2540,34 +2418,26 @@ mod tests { Some(vec![1]) ); - idl_state!(audit, be, "name", IndexType::Presence, "_", Some(vec![1])); - idl_state!(audit, be, "uuid", IndexType::Presence, "_", Some(vec![1])); + idl_state!(be, "name", IndexType::Presence, "_", Some(vec![1])); + idl_state!(be, "uuid", IndexType::Presence, "_", Some(vec![1])); idl_state!( - audit, be, "uuid", IndexType::Equality, "db237e8a-0079-4b8c-8a56-593b22aa44d1", Some(Vec::new()) ); - idl_state!( - audit, - be, - "name", - IndexType::Equality, - "william", - Some(Vec::new()) - ); + idl_state!(be, "name", IndexType::Equality, "william", Some(Vec::new())); let claire_uuid = Uuid::parse_str("04091a7a-6ce4-42d2-abf5-c2ce244ac9e8").unwrap(); let william_uuid = Uuid::parse_str("db237e8a-0079-4b8c-8a56-593b22aa44d1").unwrap(); - assert!(be.name2uuid(audit, "william") == Ok(None)); - assert!(be.name2uuid(audit, "claire") == Ok(Some(claire_uuid))); - assert!(be.uuid2spn(audit, &william_uuid) == Ok(None)); - assert!(be.uuid2rdn(audit, &william_uuid) == Ok(None)); - assert!(be.uuid2spn(audit, &claire_uuid) == Ok(Some(Value::from("claire")))); - assert!(be.uuid2rdn(audit, &claire_uuid) == Ok(Some("name=claire".to_string()))); + assert!(be.name2uuid("william") == Ok(None)); + assert!(be.name2uuid("claire") == Ok(Some(claire_uuid))); + assert!(be.uuid2spn(&william_uuid) == Ok(None)); + assert!(be.uuid2rdn(&william_uuid) == Ok(None)); + assert!(be.uuid2spn(&claire_uuid) == Ok(Some(Value::from("claire")))); + assert!(be.uuid2rdn(&claire_uuid) == Ok(Some("name=claire".to_string()))); }) } @@ -2867,7 +2737,7 @@ mod tests { run_test!(|audit: &mut AuditScope, be: &mut BackendWriteTransaction| { // Test where the index is in schema but not created (purge idxs) // should fall back to an empty set because we can't satisfy the term - be.purge_idxs(audit).unwrap(); + be.purge_idxs().unwrap(); debug!("{:?}", be.missing_idxs(audit).unwrap()); let f_eq = unsafe { filter_resolved!(f_eq("name", PartialValue::new_utf8s("william"))) }; @@ -2913,7 +2783,7 @@ mod tests { // If the slopes haven't been generated yet, there are some hardcoded values // that we can use instead. They aren't generated until a first re-index. - assert!(!be.is_idx_slopeyness_generated(audit).unwrap()); + assert!(!be.is_idx_slopeyness_generated().unwrap()); let ta_eq_slope = be .get_idx_slope(audit, &IdxKey::new("ta", IndexType::Equality)) @@ -2947,7 +2817,7 @@ mod tests { // Now check slope generation for the values. Today these are calculated // at reindex time, so we now perform the re-index. assert!(be.reindex(audit).is_ok()); - assert!(be.is_idx_slopeyness_generated(audit).unwrap()); + assert!(be.is_idx_slopeyness_generated().unwrap()); let ta_eq_slope = be .get_idx_slope(audit, &IdxKey::new("ta", IndexType::Equality)) diff --git a/kanidmd/src/lib/core/mod.rs b/kanidmd/src/lib/core/mod.rs index e032090c3..1b5bf1f29 100644 --- a/kanidmd/src/lib/core/mod.rs +++ b/kanidmd/src/lib/core/mod.rs @@ -142,7 +142,7 @@ pub fn dbscan_list_indexes_core(config: &Configuration) { let be = dbscan_setup_be!(audit, &config); let be_rotxn = be.read(); - match be_rotxn.list_indexes(&mut audit) { + match be_rotxn.list_indexes() { Ok(mut idx_list) => { idx_list.sort_unstable(); idx_list.iter().for_each(|idx_name| { @@ -165,7 +165,7 @@ pub fn dbscan_list_id2entry_core(config: &Configuration) { let be = dbscan_setup_be!(audit, &config); let be_rotxn = be.read(); - match be_rotxn.list_id2entry(&mut audit) { + match be_rotxn.list_id2entry() { Ok(mut id_list) => { id_list.sort_unstable_by_key(|k| k.0); id_list.iter().for_each(|(id, value)| { @@ -195,7 +195,7 @@ pub fn dbscan_list_index_core(config: &Configuration, index_name: &str) { let be = dbscan_setup_be!(audit, &config); let be_rotxn = be.read(); - match be_rotxn.list_index_content(&mut audit, index_name) { + match be_rotxn.list_index_content(index_name) { Ok(mut idx_list) => { idx_list.sort_unstable_by(|a, b| a.0.cmp(&b.0)); idx_list.iter().for_each(|(key, value)| { @@ -218,7 +218,7 @@ pub fn dbscan_get_id2entry_core(config: &Configuration, id: u64) { let be = dbscan_setup_be!(audit, &config); let be_rotxn = be.read(); - match be_rotxn.get_id2entry(&mut audit, id) { + match be_rotxn.get_id2entry(id) { Ok((id, value)) => println!("{:>8}: {}", id, value), Err(e) => { audit.write_log(); diff --git a/kanidmd/src/lib/entry.rs b/kanidmd/src/lib/entry.rs index 29f2ef13f..de9866818 100644 --- a/kanidmd/src/lib/entry.rs +++ b/kanidmd/src/lib/entry.rs @@ -1294,9 +1294,7 @@ impl Entry { } // ! TRACING INTEGRATED - // Why is this returning a `Result` when we could just do `Option` - // How did this even pass clippy? - pub fn from_dbentry(au: &mut AuditScope, db_e: DbEntry, id: u64) -> Result { + pub fn from_dbentry(db_e: DbEntry, id: u64) -> Option { // Convert attrs from db format to value let r_attrs: Result, ()> = match db_e.ent { DbEntryVers::V1(v1) => v1 @@ -1309,7 +1307,6 @@ impl Entry { Ok(vv) => Ok((k, vv)), Err(()) => { admin_error!(value = ?k, "from_dbentry failed"); - ladmin_error!(au, "from_dbentry failed on value {:?}", k); Err(()) } } @@ -1317,18 +1314,15 @@ impl Entry { .collect(), }; - let attrs = r_attrs?; + let attrs = r_attrs.ok()?; let uuid: Uuid = *match attrs.get("uuid") { Some(vs) => vs.iter().take(1).next(), None => None, } - .ok_or(())? - // Now map value -> uuid - .to_uuid() - .ok_or(())?; + .and_then(|v| v.to_uuid())?; - Ok(Entry { + Some(Entry { valid: EntrySealed { uuid }, state: EntryCommitted { id }, attrs, diff --git a/kanidmd/src/lib/server.rs b/kanidmd/src/lib/server.rs index ad40c5583..9d90d3c0f 100644 --- a/kanidmd/src/lib/server.rs +++ b/kanidmd/src/lib/server.rs @@ -286,12 +286,12 @@ pub trait QueryServerTransaction<'a> { // Remember, we don't care if the name is invalid, because search // will validate/normalise the filter we construct for us. COOL! // ! TRACING INTEGRATED - fn name_to_uuid(&self, audit: &mut AuditScope, name: &str) -> Result { + fn name_to_uuid(&self, _audit: &mut AuditScope, name: &str) -> Result { // Is it just a uuid? Uuid::parse_str(name).or_else(|_| { let lname = name.to_lowercase(); self.get_be_txn() - .name2uuid(audit, lname.as_str())? + .name2uuid(lname.as_str())? .ok_or(OperationError::NoMatchingEntries) // should we log this? }) } @@ -299,10 +299,10 @@ pub trait QueryServerTransaction<'a> { // ! TRACING INTEGRATED fn uuid_to_spn( &self, - audit: &mut AuditScope, + _audit: &mut AuditScope, uuid: &Uuid, ) -> Result, OperationError> { - let r = self.get_be_txn().uuid2spn(audit, uuid)?; + let r = self.get_be_txn().uuid2spn(uuid)?; if let Some(ref n) = r { // Shouldn't we be doing more graceful error handling here? @@ -314,10 +314,10 @@ pub trait QueryServerTransaction<'a> { } // ! TRACING INTEGRATED - fn uuid_to_rdn(&self, audit: &mut AuditScope, uuid: &Uuid) -> Result { + fn uuid_to_rdn(&self, _audit: &mut AuditScope, uuid: &Uuid) -> Result { // If we have a some, pass it on, else unwrap into a default. self.get_be_txn() - .uuid2rdn(audit, uuid) + .uuid2rdn(uuid) .map(|v| v.unwrap_or_else(|| format!("uuid={}", uuid.to_hyphenated_ref()))) } @@ -844,7 +844,7 @@ impl<'a> QueryServerReadTransaction<'a> { // If we fail after backend, we need to return NOW because we can't // assert any other faith in the DB states. // * backend - let be_errs = self.get_be_txn().verify(audit); + let be_errs = self.get_be_txn().verify(); if !be_errs.is_empty() { return be_errs;