mirror of
https://github.com/kanidm/kanidm.git
synced 2025-05-25 10:23:55 +02:00
Improve IDL exists behaviour, improve memberof verification
Again to improve test performance. This improves the validation of idx existance to be a faster SQLite call, caches the results as needed. Memberof was taking up a large amount of time in verify phases of test finalisation, and so a better in memory version has been added.
This commit is contained in:
parent
46161564d9
commit
2309aa83bd
server/lib/src
|
@ -18,7 +18,8 @@ use crate::be::idl_sqlite::{
|
|||
IdlSqlite, IdlSqliteReadTransaction, IdlSqliteTransaction, IdlSqliteWriteTransaction,
|
||||
};
|
||||
use crate::be::idxkey::{
|
||||
IdlCacheKey, IdlCacheKeyRef, IdlCacheKeyToRef, IdxKey, IdxKeyRef, IdxKeyToRef, IdxSlope,
|
||||
IdlCacheKey, IdlCacheKeyRef, IdlCacheKeyToRef, IdxKey, IdxKeyRef, IdxKeyToRef, IdxNameKey,
|
||||
IdxSlope,
|
||||
};
|
||||
use crate::be::keystorage::{KeyHandle, KeyHandleId};
|
||||
use crate::be::{BackendConfig, IdList, IdRawEntry};
|
||||
|
@ -35,6 +36,10 @@ const DEFAULT_NAME_CACHE_RATIO: usize = 8;
|
|||
const DEFAULT_CACHE_RMISS: usize = 0;
|
||||
const DEFAULT_CACHE_WMISS: usize = 0;
|
||||
|
||||
const DEFAULT_IDX_CACHE_RMISS: usize = 8;
|
||||
const DEFAULT_IDX_CACHE_WMISS: usize = 16;
|
||||
const DEFAULT_IDX_EXISTS_TARGET: usize = 256;
|
||||
|
||||
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
|
||||
enum NameCacheKey {
|
||||
Name2Uuid(String),
|
||||
|
@ -55,6 +60,9 @@ pub struct IdlArcSqlite {
|
|||
entry_cache: ARCache<u64, Arc<EntrySealedCommitted>>,
|
||||
idl_cache: ARCache<IdlCacheKey, Box<IDLBitRange>>,
|
||||
name_cache: ARCache<NameCacheKey, NameCacheValue>,
|
||||
|
||||
idx_exists_cache: ARCache<IdxNameKey, bool>,
|
||||
|
||||
op_ts_max: CowCell<Option<Duration>>,
|
||||
allids: CowCell<IDLBitRange>,
|
||||
maxid: CowCell<u64>,
|
||||
|
@ -66,6 +74,8 @@ pub struct IdlArcSqliteReadTransaction<'a> {
|
|||
entry_cache: ARCacheReadTxn<'a, u64, Arc<EntrySealedCommitted>, ()>,
|
||||
idl_cache: ARCacheReadTxn<'a, IdlCacheKey, Box<IDLBitRange>, ()>,
|
||||
name_cache: ARCacheReadTxn<'a, NameCacheKey, NameCacheValue, ()>,
|
||||
|
||||
idx_exists_cache: ARCacheReadTxn<'a, IdxNameKey, bool, ()>,
|
||||
allids: CowCellReadTxn<IDLBitRange>,
|
||||
}
|
||||
|
||||
|
@ -74,6 +84,9 @@ pub struct IdlArcSqliteWriteTransaction<'a> {
|
|||
entry_cache: ARCacheWriteTxn<'a, u64, Arc<EntrySealedCommitted>, ()>,
|
||||
idl_cache: ARCacheWriteTxn<'a, IdlCacheKey, Box<IDLBitRange>, ()>,
|
||||
name_cache: ARCacheWriteTxn<'a, NameCacheKey, NameCacheValue, ()>,
|
||||
|
||||
idx_exists_cache: ARCacheWriteTxn<'a, IdxNameKey, bool, ()>,
|
||||
|
||||
op_ts_max: CowCellWriteTxn<'a, Option<Duration>>,
|
||||
allids: CowCellWriteTxn<'a, IDLBitRange>,
|
||||
maxid: CowCellWriteTxn<'a, u64>,
|
||||
|
@ -178,8 +191,8 @@ macro_rules! get_idl {
|
|||
// or smaller type. Perhaps even a small cache of the IdlCacheKeys that
|
||||
// are allocated to reduce some allocs? Probably over thinking it at
|
||||
// this point.
|
||||
//
|
||||
// First attempt to get from this cache.
|
||||
|
||||
// Now attempt to get from this cache.
|
||||
let cache_key = IdlCacheKeyRef {
|
||||
a: $attr,
|
||||
i: $itype,
|
||||
|
@ -195,16 +208,47 @@ macro_rules! get_idl {
|
|||
);
|
||||
return Ok(Some(data.as_ref().clone()));
|
||||
}
|
||||
|
||||
// If it was a miss, does the actually exist in the DB?
|
||||
let idx_key = IdxNameKey {
|
||||
a: $attr.clone(),
|
||||
i: $itype,
|
||||
};
|
||||
let idx_r = $self.idx_exists_cache.get(&idx_key);
|
||||
if idx_r == Some(&false) {
|
||||
// The idx does not exist - bail early.
|
||||
return Ok(None)
|
||||
}
|
||||
|
||||
// The table either exists and we don't have data on it yet,
|
||||
// or it does not exist and we need to hear back from the lower level
|
||||
|
||||
// If miss, get from db *and* insert to the cache.
|
||||
let db_r = $self.db.get_idl($attr, $itype, $idx_key)?;
|
||||
|
||||
if let Some(ref idl) = db_r {
|
||||
if idx_r == None {
|
||||
// It exists, so track that data, because we weren't
|
||||
// previously tracking it.
|
||||
$self.idx_exists_cache.insert(idx_key, true)
|
||||
}
|
||||
|
||||
let ncache_key = IdlCacheKey {
|
||||
a: $attr.clone(),
|
||||
i: $itype.clone(),
|
||||
k: $idx_key.into(),
|
||||
};
|
||||
$self.idl_cache.insert(ncache_key, Box::new(idl.clone()))
|
||||
}
|
||||
} else {
|
||||
// The DB was unable to return this idx because table backing the
|
||||
// idx does not exist. We should cache this to prevent repeat hits
|
||||
// on sqlite until the db does exist, at which point the cache is
|
||||
// cleared anyway.
|
||||
//
|
||||
// NOTE: If the db idx misses it returns Some(empty_set), so this
|
||||
// only caches missing index tables.
|
||||
$self.idx_exists_cache.insert(idx_key, false)
|
||||
};
|
||||
Ok(db_r)
|
||||
}};
|
||||
}
|
||||
|
@ -593,6 +637,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
*/
|
||||
self.entry_cache.clear();
|
||||
self.idl_cache.clear();
|
||||
self.idx_exists_cache.clear();
|
||||
self.name_cache.clear();
|
||||
Ok(())
|
||||
}
|
||||
|
@ -604,6 +649,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
mut entry_cache,
|
||||
mut idl_cache,
|
||||
mut name_cache,
|
||||
idx_exists_cache,
|
||||
op_ts_max,
|
||||
allids,
|
||||
maxid,
|
||||
|
@ -677,6 +723,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
// Can no longer fail from this point.
|
||||
op_ts_max.commit();
|
||||
name_cache.commit();
|
||||
idx_exists_cache.commit();
|
||||
idl_cache.commit();
|
||||
allids.commit();
|
||||
maxid.commit();
|
||||
|
@ -708,6 +755,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
*self.maxid = mid;
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub fn write_identries<'b, I>(&'b mut self, mut entries: I) -> Result<(), OperationError>
|
||||
where
|
||||
I: Iterator<Item = &'b Entry<EntrySealed, EntryCommitted>>,
|
||||
|
@ -757,6 +805,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
pub fn write_idl(
|
||||
&mut self,
|
||||
attr: &Attribute,
|
||||
|
@ -1127,9 +1176,17 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create_idx(&self, attr: &Attribute, itype: IndexType) -> Result<(), OperationError> {
|
||||
// We don't need to affect this, so pass it down.
|
||||
self.db.create_idx(attr, itype)
|
||||
pub fn create_idx(&mut self, attr: &Attribute, itype: IndexType) -> Result<(), OperationError> {
|
||||
self.db.create_idx(attr, itype)?;
|
||||
|
||||
// Cache that this exists since we just made it.
|
||||
let idx_key = IdxNameKey {
|
||||
a: attr.clone(),
|
||||
i: itype,
|
||||
};
|
||||
self.idx_exists_cache.insert(idx_key, true);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// ⚠️ - This function will destroy all indexes in the database.
|
||||
|
@ -1141,6 +1198,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
|
|||
debug!("CLEARING CACHE");
|
||||
self.db.danger_purge_idxs().map(|()| {
|
||||
self.idl_cache.clear();
|
||||
self.idx_exists_cache.clear();
|
||||
self.name_cache.clear();
|
||||
})
|
||||
}
|
||||
|
@ -1266,6 +1324,21 @@ impl IdlArcSqlite {
|
|||
OperationError::InvalidState
|
||||
})?;
|
||||
|
||||
let idx_exists_cache = ARCacheBuilder::new()
|
||||
.set_expected_workload(
|
||||
DEFAULT_IDX_EXISTS_TARGET,
|
||||
cfg.pool_size as usize,
|
||||
DEFAULT_IDX_CACHE_RMISS,
|
||||
DEFAULT_IDX_CACHE_WMISS,
|
||||
true,
|
||||
)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.ok_or_else(|| {
|
||||
admin_error!("Failed to construct idx_exists_cache");
|
||||
OperationError::InvalidState
|
||||
})?;
|
||||
|
||||
let allids = CowCell::new(IDLBitRange::new());
|
||||
|
||||
let maxid = CowCell::new(0);
|
||||
|
@ -1279,6 +1352,7 @@ impl IdlArcSqlite {
|
|||
entry_cache,
|
||||
idl_cache,
|
||||
name_cache,
|
||||
idx_exists_cache,
|
||||
op_ts_max,
|
||||
allids,
|
||||
maxid,
|
||||
|
@ -1298,6 +1372,7 @@ impl IdlArcSqlite {
|
|||
let db_read = self.db.read()?;
|
||||
let idl_cache_read = self.idl_cache.read();
|
||||
let name_cache_read = self.name_cache.read();
|
||||
let idx_exists_cache_read = self.idx_exists_cache.read();
|
||||
let allids_read = self.allids.read();
|
||||
|
||||
Ok(IdlArcSqliteReadTransaction {
|
||||
|
@ -1305,6 +1380,7 @@ impl IdlArcSqlite {
|
|||
entry_cache: entry_cache_read,
|
||||
idl_cache: idl_cache_read,
|
||||
name_cache: name_cache_read,
|
||||
idx_exists_cache: idx_exists_cache_read,
|
||||
allids: allids_read,
|
||||
})
|
||||
}
|
||||
|
@ -1315,6 +1391,7 @@ impl IdlArcSqlite {
|
|||
let db_write = self.db.write()?;
|
||||
let idl_cache_write = self.idl_cache.write();
|
||||
let name_cache_write = self.name_cache.write();
|
||||
let idx_exists_cache_write = self.idx_exists_cache.write();
|
||||
let op_ts_max_write = self.op_ts_max.write();
|
||||
let allids_write = self.allids.write();
|
||||
let maxid_write = self.maxid.write();
|
||||
|
@ -1325,6 +1402,7 @@ impl IdlArcSqlite {
|
|||
entry_cache: entry_cache_write,
|
||||
idl_cache: idl_cache_write,
|
||||
name_cache: name_cache_write,
|
||||
idx_exists_cache: idx_exists_cache_write,
|
||||
op_ts_max: op_ts_max_write,
|
||||
allids: allids_write,
|
||||
maxid: maxid_write,
|
||||
|
|
|
@ -205,12 +205,15 @@ pub(crate) trait IdlSqliteTransaction {
|
|||
let mut stmt = self
|
||||
.get_conn()?
|
||||
.prepare(&format!(
|
||||
"SELECT COUNT(name) from {}.sqlite_master where name = :tname",
|
||||
"SELECT rowid from {}.sqlite_master where name = :tname LIMIT 1",
|
||||
self.get_db_name()
|
||||
))
|
||||
.map_err(sqlite_error)?;
|
||||
|
||||
let i: Option<i64> = stmt
|
||||
.query_row(&[(":tname", tname)], |row| row.get(0))
|
||||
// If the row doesn't exist, we don't mind.
|
||||
.optional()
|
||||
.map_err(sqlite_error)?;
|
||||
|
||||
match i {
|
||||
|
|
|
@ -155,3 +155,9 @@ impl Ord for (dyn IdlCacheKeyToRef + '_) {
|
|||
self.keyref().cmp(&other.keyref())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
|
||||
pub struct IdxNameKey {
|
||||
pub a: Attribute,
|
||||
pub i: IndexType,
|
||||
}
|
||||
|
|
|
@ -571,35 +571,43 @@ impl Plugin for MemberOf {
|
|||
Err(e) => return vec![e],
|
||||
};
|
||||
|
||||
// First we have to build a direct membership map. This saves us
|
||||
// needing to run queries since we already have every entry on hand
|
||||
// from the all_cand search.
|
||||
let mut direct_membership_map: BTreeMap<Uuid, BTreeSet<Uuid>> = Default::default();
|
||||
|
||||
let pv_class: PartialValue = EntryClass::Group.into();
|
||||
|
||||
for entry in all_cand.iter() {
|
||||
if !entry.attribute_equality(Attribute::Class, &pv_class) {
|
||||
// Not a group, move on.
|
||||
continue;
|
||||
}
|
||||
|
||||
let group_uuid = entry.get_uuid();
|
||||
|
||||
let member_iter = entry
|
||||
.get_ava_refer(Attribute::Member)
|
||||
.into_iter()
|
||||
.flat_map(|set| set.iter())
|
||||
.chain(
|
||||
entry
|
||||
.get_ava_refer(Attribute::DynMember)
|
||||
.into_iter()
|
||||
.flat_map(|set| set.iter()),
|
||||
);
|
||||
|
||||
for member_uuid in member_iter {
|
||||
let member_groups = direct_membership_map.entry(*member_uuid).or_default();
|
||||
member_groups.insert(group_uuid);
|
||||
}
|
||||
}
|
||||
|
||||
// for each entry in the DB (live).
|
||||
for e in all_cand {
|
||||
let uuid = e.get_uuid();
|
||||
let filt_in = filter!(f_and!([
|
||||
f_eq(Attribute::Class, EntryClass::Group.into()),
|
||||
f_or!([
|
||||
f_eq(Attribute::Member, PartialValue::Refer(uuid)),
|
||||
f_eq(Attribute::DynMember, PartialValue::Refer(uuid))
|
||||
])
|
||||
]));
|
||||
|
||||
// what groups is this entry a direct member of?
|
||||
let direct_memberof = match qs
|
||||
.internal_search(filt_in)
|
||||
.map_err(|_| ConsistencyError::QueryServerSearchFailure)
|
||||
{
|
||||
Ok(d_mo) => d_mo,
|
||||
Err(e) => return vec![Err(e)],
|
||||
};
|
||||
|
||||
// for all direct -> add uuid to map
|
||||
let d_groups_set: BTreeSet<Uuid> =
|
||||
direct_memberof.iter().map(|e| e.get_uuid()).collect();
|
||||
|
||||
let d_groups_set = if d_groups_set.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(d_groups_set)
|
||||
};
|
||||
let d_groups_set: Option<&BTreeSet<Uuid>> = direct_membership_map.get(&uuid);
|
||||
|
||||
trace!(
|
||||
"DMO search groups {:?} -> {:?}",
|
||||
|
@ -607,6 +615,10 @@ impl Plugin for MemberOf {
|
|||
d_groups_set
|
||||
);
|
||||
|
||||
// Remember, we only need to check direct memberships, because when memberof
|
||||
// it applies it clones dmo -> mo, so validation of all dmo sets implies mo is
|
||||
// valid (and a subset) of dmo.
|
||||
|
||||
match (e.get_ava_set(Attribute::DirectMemberOf), d_groups_set) {
|
||||
(Some(edmos), Some(b)) => {
|
||||
// Can they both be reference sets?
|
||||
|
@ -636,7 +648,7 @@ impl Plugin for MemberOf {
|
|||
}
|
||||
(entry_direct_member_of, expected_direct_groups) => {
|
||||
error!(
|
||||
"MemberOfInvalid directmemberof set and DMO search set differ in size: {}",
|
||||
"MemberOfInvalid directmemberof set and DMO search set differ in presence: {}",
|
||||
e.get_display_id()
|
||||
);
|
||||
// trace!(?e);
|
||||
|
@ -645,19 +657,6 @@ impl Plugin for MemberOf {
|
|||
r.push(Err(ConsistencyError::MemberOfInvalid(e.get_id())));
|
||||
}
|
||||
}
|
||||
|
||||
// Could check all dmos in mos?
|
||||
|
||||
/* To check nested! */
|
||||
// add all direct to a stack
|
||||
// for all in stack
|
||||
// check their direct memberships
|
||||
// if not in map
|
||||
// add to map
|
||||
// push to stack
|
||||
|
||||
// check mo == map set
|
||||
// if not, consistency error!
|
||||
}
|
||||
|
||||
r
|
||||
|
|
Loading…
Reference in a new issue