mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 20:47:01 +01:00
Update to concread, add hooks for cache quiescing (#641)
This commit is contained in:
parent
840024f006
commit
6e1ed9ea07
438
Cargo.lock
generated
438
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
|
@ -28,6 +28,7 @@ exclude = [
|
|||
# tokio-util = { git = "https://github.com/Firstyear/tokio.git", rev = "aa6fb48d9a1f3652ee79e3b018a2b9d0c9f89c1e" }
|
||||
|
||||
# concread = { path = "../concread" }
|
||||
concread = { git = "https://github.com/kanidm/concread.git" }
|
||||
# idlset = { path = "../idlset" }
|
||||
# ldap3_server = { path = "../ldap3_server" }
|
||||
# webauthn-rs = { path = "../webauthn-rs" }
|
||||
|
|
|
@ -66,8 +66,8 @@ r2d2_sqlite = "0.19"
|
|||
structopt = { version = "0.3", default-features = false }
|
||||
time = { version = "0.2", features = ["serde", "std"] }
|
||||
|
||||
hashbrown = "0.11"
|
||||
concread = "^0.2.21"
|
||||
hashbrown = { version = "0.11", features = ["serde", "inline-more", "ahash"] }
|
||||
concread = "^0.3"
|
||||
smolset = "1.3"
|
||||
# concread = { version = "^0.2.9", features = ["simd_support"] }
|
||||
|
||||
|
|
|
@ -1327,11 +1327,16 @@ impl AccessControls {
|
|||
*/
|
||||
acp_resolve_filter_cache: ARCacheBuilder::new()
|
||||
.set_size(ACP_RESOLVE_FILTER_CACHE_MAX, ACP_RESOLVE_FILTER_CACHE_LOCAL)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.expect("Failed to construct acp_resolve_filter_cache"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn try_quiesce(&self) {
|
||||
self.acp_resolve_filter_cache.try_quiesce();
|
||||
}
|
||||
|
||||
pub fn read(&self) -> AccessControlsReadTransaction {
|
||||
AccessControlsReadTransaction {
|
||||
inner: self.inner.read(),
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
use hashbrown::HashSet;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{collections::HashSet, time::Duration};
|
||||
use std::time::Duration;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
use webauthn_rs::proto::{COSEKey, UserVerificationPolicy};
|
||||
|
|
|
@ -1078,11 +1078,11 @@ impl IdlArcSqlite {
|
|||
|
||||
// Autotune heuristic.
|
||||
let mut cache_size = cfg.arcsize.unwrap_or_else(|| {
|
||||
// For now I've noticed about 20% of the number of entries
|
||||
// works well, but it may not be perfect ...
|
||||
// Due to changes in concread, we can now scale this up! We now aim for 120%
|
||||
// of entries.
|
||||
db.get_allids_count()
|
||||
.map(|c| {
|
||||
let tmpsize = (c / 5) as usize;
|
||||
let tmpsize = ((c / 5) as usize) * 6;
|
||||
// if our calculation's too small anyway, just set it to the minimum target
|
||||
std::cmp::max(tmpsize, DEFAULT_CACHE_TARGET)
|
||||
})
|
||||
|
@ -1106,6 +1106,7 @@ impl IdlArcSqlite {
|
|||
DEFAULT_CACHE_WMISS,
|
||||
false,
|
||||
)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.ok_or_else(|| {
|
||||
admin_error!("Failed to construct entry_cache");
|
||||
|
@ -1121,6 +1122,7 @@ impl IdlArcSqlite {
|
|||
DEFAULT_CACHE_WMISS,
|
||||
false,
|
||||
)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.ok_or_else(|| {
|
||||
admin_error!("Failed to construct idl_cache");
|
||||
|
@ -1135,6 +1137,7 @@ impl IdlArcSqlite {
|
|||
DEFAULT_CACHE_WMISS,
|
||||
true,
|
||||
)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.ok_or_else(|| {
|
||||
admin_error!("Failed to construct name_cache");
|
||||
|
@ -1158,6 +1161,12 @@ impl IdlArcSqlite {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn try_quiesce(&self) {
|
||||
self.entry_cache.try_quiesce();
|
||||
self.idl_cache.try_quiesce();
|
||||
self.name_cache.try_quiesce();
|
||||
}
|
||||
|
||||
pub fn read(&self) -> IdlArcSqliteReadTransaction {
|
||||
// IMPORTANT! Always take entrycache FIRST
|
||||
let entry_cache_read = self.entry_cache.read();
|
||||
|
|
|
@ -1519,6 +1519,10 @@ impl Backend {
|
|||
self.cfg.pool_size
|
||||
}
|
||||
|
||||
pub fn try_quiesce(&self) {
|
||||
self.idlayer.try_quiesce();
|
||||
}
|
||||
|
||||
pub fn read(&self) -> BackendReadTransaction {
|
||||
BackendReadTransaction {
|
||||
idlayer: UnsafeCell::new(self.idlayer.read()),
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
use crate::be::dbvalue::DbBackupCodeV1;
|
||||
use crate::be::dbvalue::{DbCredTypeV1, DbCredV1, DbPasswordV1, DbWebauthnV1};
|
||||
use hashbrown::HashMap as Map;
|
||||
use hashbrown::HashSet;
|
||||
use kanidm_proto::v1::{BackupCodesView, CredentialDetail, CredentialDetailType, OperationError};
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::pkcs5::pbkdf2_hmac;
|
||||
use openssl::sha::Sha512;
|
||||
use rand::prelude::*;
|
||||
use std::collections::HashSet;
|
||||
use std::convert::TryFrom;
|
||||
use std::time::{Duration, Instant};
|
||||
use uuid::Uuid;
|
||||
|
|
|
@ -42,13 +42,12 @@ use tracing::trace;
|
|||
use crate::be::dbentry::{DbEntry, DbEntryV1, DbEntryVers};
|
||||
use crate::be::{IdxKey, IdxSlope};
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use ldap3_server::simple::{LdapPartialAttribute, LdapSearchResultEntry};
|
||||
use smartstring::alias::String as AttrString;
|
||||
use std::collections::BTreeMap as Map;
|
||||
pub use std::collections::BTreeSet as Set;
|
||||
use std::collections::BTreeSet;
|
||||
// use hashbrown::HashMap as Map;
|
||||
use hashbrown::HashMap;
|
||||
use smartstring::alias::String as AttrString;
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
|
|
@ -1861,7 +1861,7 @@ mod tests {
|
|||
let backup_code_good = readable_password_from_random();
|
||||
let backup_code_bad = readable_password_from_random();
|
||||
assert!(backup_code_bad != backup_code_good);
|
||||
let mut code_set = std::collections::HashSet::new();
|
||||
let mut code_set = HashSet::new();
|
||||
code_set.insert(backup_code_good.clone());
|
||||
|
||||
let backup_codes = BackupCodes::new(code_set);
|
||||
|
|
|
@ -531,9 +531,9 @@ mod tests {
|
|||
use crate::ldap::LdapServer;
|
||||
use crate::modify::{Modify, ModifyList};
|
||||
use async_std::task;
|
||||
use hashbrown::HashSet;
|
||||
use ldap3_server::proto::{LdapFilter, LdapOp, LdapSearchScope};
|
||||
use ldap3_server::simple::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
const TEST_PASSWORD: &'static str = "ntaoeuntnaoeuhraohuercahu😍";
|
||||
|
||||
|
|
|
@ -908,6 +908,7 @@ impl QueryServer {
|
|||
resolve_filter_cache: Arc::new(
|
||||
ARCacheBuilder::new()
|
||||
.set_size(RESOLVE_FILTER_CACHE_MAX, RESOLVE_FILTER_CACHE_LOCAL)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.expect("Failer to build resolve_filter_cache"),
|
||||
),
|
||||
|
@ -919,6 +920,12 @@ impl QueryServer {
|
|||
task::block_on(self.read_async())
|
||||
}
|
||||
|
||||
pub fn try_quiesce(&self) {
|
||||
self.be.try_quiesce();
|
||||
self.accesscontrols.try_quiesce();
|
||||
self.resolve_filter_cache.try_quiesce();
|
||||
}
|
||||
|
||||
pub async fn read_async(&self) -> QueryServerReadTransaction<'_> {
|
||||
// We need to ensure a db conn will be available
|
||||
#[allow(clippy::expect_used)]
|
||||
|
|
|
@ -47,8 +47,8 @@ impl EventTag {
|
|||
use EventTag::*;
|
||||
match self {
|
||||
AdminError | FilterError | RequestError | SecurityError => "🚨",
|
||||
AdminWarn | FilterWarn | RequestWarn => "🚧",
|
||||
AdminInfo | FilterInfo | RequestInfo | SecurityInfo => "💬",
|
||||
AdminWarn | FilterWarn | RequestWarn => "⚠️ ",
|
||||
AdminInfo | FilterInfo | RequestInfo | SecurityInfo => " ",
|
||||
RequestTrace | FilterTrace | PerfTrace => "📍",
|
||||
SecurityCritical => "🔐",
|
||||
SecurityAccess => "🔓",
|
||||
|
|
|
@ -396,8 +396,8 @@ impl TreeEvent {
|
|||
.map(EventTag::emoji)
|
||||
.unwrap_or_else(|| match self.level {
|
||||
Level::ERROR => "🚨",
|
||||
Level::WARN => "🚧",
|
||||
Level::INFO => "💬",
|
||||
Level::WARN => "⚠️ ",
|
||||
Level::INFO => " ",
|
||||
Level::DEBUG => "🐛",
|
||||
Level::TRACE => "📍",
|
||||
})
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use std::collections::HashSet;
|
||||
use hashbrown::HashSet;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
|
Loading…
Reference in a new issue