mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 12:37:00 +01:00
20240611 performance (#2836)
While basking under the shade of the coolabah tree, I was overcome by an intense desire to improve the performance and memory usage of Kanidm. This pr reduces a major source of repeated small clones, lowers default log level in testing, removes some trace fields that are both large and probably shouldn't be traced, and also changes some lto settings for release builds.
This commit is contained in:
parent
167a7be86c
commit
9c4e8bb90a
30
Cargo.lock
generated
30
Cargo.lock
generated
|
@ -1168,6 +1168,7 @@ version = "1.3.0-dev"
|
|||
dependencies = [
|
||||
"clap",
|
||||
"clap_complete",
|
||||
"dhat",
|
||||
"fs2",
|
||||
"futures",
|
||||
"kanidm_build_profiles",
|
||||
|
@ -1355,6 +1356,22 @@ dependencies = [
|
|||
"nom",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dhat"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "98cd11d84628e233de0ce467de10b8633f4ddaecafadefc86e13b84b8739b827"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"lazy_static",
|
||||
"mintex",
|
||||
"parking_lot 0.12.3",
|
||||
"rustc-hash",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thousands",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dialoguer"
|
||||
version = "0.10.4"
|
||||
|
@ -3493,6 +3510,7 @@ dependencies = [
|
|||
"compact_jwt 0.4.1",
|
||||
"concread",
|
||||
"criterion",
|
||||
"dhat",
|
||||
"dyn-clone",
|
||||
"enum-iterator",
|
||||
"fernet",
|
||||
|
@ -3981,6 +3999,12 @@ dependencies = [
|
|||
"adler",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mintex"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9bec4598fddb13cc7b528819e697852653252b760f1228b7642679bf2ff2cd07"
|
||||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "0.8.11"
|
||||
|
@ -5885,6 +5909,12 @@ dependencies = [
|
|||
"syn 2.0.66",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "thousands"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3bf63baf9f5039dadc247375c29eb13706706cfde997d0330d05aa63a77d8820"
|
||||
|
||||
[[package]]
|
||||
name = "thread_local"
|
||||
version = "1.1.8"
|
||||
|
|
83
Cargo.toml
83
Cargo.toml
|
@ -1,6 +1,14 @@
|
|||
[profile.release]
|
||||
debug = true
|
||||
lto = "thin"
|
||||
[workspace.package]
|
||||
version = "1.3.0-dev"
|
||||
authors = [
|
||||
"William Brown <william@blackhats.net.au>",
|
||||
"James Hodgkinson <james@terminaloutcomes.com>",
|
||||
]
|
||||
rust-version = "1.77"
|
||||
edition = "2021"
|
||||
license = "MPL-2.0"
|
||||
homepage = "https://github.com/kanidm/kanidm/"
|
||||
repository = "https://github.com/kanidm/kanidm/"
|
||||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
|
@ -31,18 +39,64 @@ members = [
|
|||
"libs/users",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "1.3.0-dev"
|
||||
authors = [
|
||||
"William Brown <william@blackhats.net.au>",
|
||||
"James Hodgkinson <james@terminaloutcomes.com>",
|
||||
]
|
||||
rust-version = "1.77"
|
||||
edition = "2021"
|
||||
license = "MPL-2.0"
|
||||
homepage = "https://github.com/kanidm/kanidm/"
|
||||
repository = "https://github.com/kanidm/kanidm/"
|
||||
# Below follows some guides/estimates to system resources consumed during a build.
|
||||
# Most of these values are over-estimates and are just rough observations.
|
||||
#
|
||||
# These were tested on a 10core M1 Max.
|
||||
# Parallel Linking Maximum = an estimate of how much ram will be consumed at peak
|
||||
# while the build is linking multiple binaries in parallel
|
||||
# Single Largest Binary Maximum = an estamite of how much ram is conusmed by the
|
||||
# single largest binary during linking. This would reflect a single threaded
|
||||
# build ram maximum.
|
||||
# Time = estimate on how long the build may take.
|
||||
|
||||
# cargo build --release
|
||||
#
|
||||
# Parallel Linking Maximum: 6GB
|
||||
# Single Largest Binary Maximum: 5GB
|
||||
# Time: ~6 minutes
|
||||
[profile.release]
|
||||
debug = true
|
||||
strip = "none"
|
||||
lto = "thin"
|
||||
opt-level = 3
|
||||
codegen-units = 32
|
||||
|
||||
[profile.release.build-override]
|
||||
debug = false
|
||||
opt-level = 0
|
||||
codegen-units = 256
|
||||
|
||||
# cargo build --profile release-lto
|
||||
#
|
||||
# Parallel Linking Maximum: 24GB
|
||||
# Single Largest Binary Maximum: 16GB
|
||||
# Time: ~11 minutes
|
||||
[profile.release-lto]
|
||||
inherits = "release"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
# cargo build
|
||||
#
|
||||
# Parallel Linking Maximum: 4GB
|
||||
# Single Largest Binary Maximum: 3GB
|
||||
# Time: ~2 minutes
|
||||
#
|
||||
# cargo test [-- --test-threads=1]
|
||||
#
|
||||
# Parallel Maximum: 1GB
|
||||
# Single Maximum: 0.2GB
|
||||
[profile.dev]
|
||||
debug = true
|
||||
lto = false
|
||||
opt-level = 0
|
||||
codegen-units = 256
|
||||
|
||||
[profile.dev.build-override]
|
||||
debug = true
|
||||
opt-level = 0
|
||||
codegen-units = 256
|
||||
|
||||
[patch.crates-io]
|
||||
## As Kanidm maintains a number of libraries, sometimes during development we need to override them
|
||||
|
@ -115,6 +169,7 @@ crossbeam = "0.8.4"
|
|||
criterion = "^0.5.1"
|
||||
csv = "1.3.0"
|
||||
dialoguer = "0.10.4"
|
||||
dhat = "0.3.3"
|
||||
dyn-clone = "^1.0.17"
|
||||
fernet = "^0.2.1"
|
||||
filetime = "^0.2.23"
|
||||
|
|
|
@ -12,8 +12,8 @@ tls_client_ca = "/tmp/kanidm/client_ca"
|
|||
# NOTE: this is overridden by KANIDM_LOG_LEVEL environment variable
|
||||
# Defaults to "info"
|
||||
#
|
||||
# log_level = "info"
|
||||
log_level = "debug"
|
||||
log_level = "info"
|
||||
# log_level = "debug"
|
||||
# log_level = "trace"
|
||||
|
||||
# otel_grpc_url = "http://localhost:4317"
|
||||
|
|
|
@ -122,6 +122,7 @@ pub struct KanidmClientBuilder {
|
|||
verify_hostnames: bool,
|
||||
ca: Option<reqwest::Certificate>,
|
||||
connect_timeout: Option<u64>,
|
||||
request_timeout: Option<u64>,
|
||||
use_system_proxies: bool,
|
||||
/// Where to store auth tokens, only use in testing!
|
||||
token_cache_path: Option<String>,
|
||||
|
@ -143,6 +144,10 @@ impl Display for KanidmClientBuilder {
|
|||
Some(value) => writeln!(f, "connect_timeout: {}", value)?,
|
||||
None => writeln!(f, "connect_timeout: unset")?,
|
||||
}
|
||||
match self.request_timeout {
|
||||
Some(value) => writeln!(f, "request_timeout: {}", value)?,
|
||||
None => writeln!(f, "request_timeout: unset")?,
|
||||
}
|
||||
writeln!(f, "use_system_proxies: {}", self.use_system_proxies)?;
|
||||
writeln!(
|
||||
f,
|
||||
|
@ -166,6 +171,7 @@ fn test_kanidmclientbuilder_display() {
|
|||
verify_hostnames: true,
|
||||
ca: None,
|
||||
connect_timeout: Some(420),
|
||||
request_timeout: Some(69),
|
||||
use_system_proxies: true,
|
||||
token_cache_path: Some(CLIENT_TOKEN_CACHE.to_string()),
|
||||
};
|
||||
|
@ -211,6 +217,7 @@ impl KanidmClientBuilder {
|
|||
verify_hostnames: true,
|
||||
ca: None,
|
||||
connect_timeout: None,
|
||||
request_timeout: None,
|
||||
use_system_proxies: true,
|
||||
token_cache_path: None,
|
||||
}
|
||||
|
@ -267,6 +274,7 @@ impl KanidmClientBuilder {
|
|||
verify_hostnames,
|
||||
ca,
|
||||
connect_timeout,
|
||||
request_timeout,
|
||||
use_system_proxies,
|
||||
token_cache_path,
|
||||
} = self;
|
||||
|
@ -291,6 +299,7 @@ impl KanidmClientBuilder {
|
|||
verify_hostnames,
|
||||
ca,
|
||||
connect_timeout,
|
||||
request_timeout,
|
||||
use_system_proxies,
|
||||
token_cache_path,
|
||||
})
|
||||
|
@ -416,6 +425,13 @@ impl KanidmClientBuilder {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn request_timeout(self, secs: u64) -> Self {
|
||||
KanidmClientBuilder {
|
||||
request_timeout: Some(secs),
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
pub fn no_proxy(self) -> Self {
|
||||
KanidmClientBuilder {
|
||||
use_system_proxies: false,
|
||||
|
@ -501,9 +517,12 @@ impl KanidmClientBuilder {
|
|||
};
|
||||
|
||||
let client_builder = match &self.connect_timeout {
|
||||
Some(secs) => client_builder
|
||||
.connect_timeout(Duration::from_secs(*secs))
|
||||
.timeout(Duration::from_secs(*secs)),
|
||||
Some(secs) => client_builder.connect_timeout(Duration::from_secs(*secs)),
|
||||
None => client_builder,
|
||||
};
|
||||
|
||||
let client_builder = match &self.request_timeout {
|
||||
Some(secs) => client_builder.timeout(Duration::from_secs(*secs)),
|
||||
None => client_builder,
|
||||
};
|
||||
|
||||
|
|
|
@ -234,6 +234,19 @@ impl CryptoPolicy {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn danger_test_minimum() -> Self {
|
||||
CryptoPolicy {
|
||||
pbkdf2_cost: 1000,
|
||||
argon2id_params: Params::new(
|
||||
Params::MIN_M_COST,
|
||||
Params::MIN_T_COST,
|
||||
Params::MIN_P_COST,
|
||||
None,
|
||||
)
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn time_target(target_time: Duration) -> Self {
|
||||
const PBKDF2_BENCH_FACTOR: usize = 10;
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ pub use {tracing, tracing_forest, tracing_subscriber};
|
|||
/// Start up the logging for test mode.
|
||||
pub fn test_init() {
|
||||
let filter = EnvFilter::from_default_env()
|
||||
.add_directive(LevelFilter::TRACE.into())
|
||||
// Skipping trace on tests by default saves a *TON* of ram.
|
||||
.add_directive(LevelFilter::INFO.into())
|
||||
// escargot builds cargo packages while we integration test and is SUPER noisy.
|
||||
.add_directive(
|
||||
"escargot=ERROR"
|
||||
|
|
|
@ -873,7 +873,7 @@ impl QueryServerWriteV1 {
|
|||
e
|
||||
})?;
|
||||
|
||||
let target_attr = Attribute::try_from(attr)?;
|
||||
let target_attr = Attribute::try_from(attr.as_str())?;
|
||||
let mdf = match ModifyEvent::from_target_uuid_attr_purge(
|
||||
ident,
|
||||
target_uuid,
|
||||
|
|
|
@ -31,7 +31,8 @@ pub struct TrustedClientIp(pub IpAddr);
|
|||
impl FromRequestParts<ServerState> for TrustedClientIp {
|
||||
type Rejection = (StatusCode, &'static str);
|
||||
|
||||
#[instrument(level = "debug", skip(state))]
|
||||
// Need to skip all to prevent leaking tokens to logs.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
async fn from_request_parts(
|
||||
parts: &mut Parts,
|
||||
state: &ServerState,
|
||||
|
@ -88,7 +89,8 @@ pub struct VerifiedClientInformation(pub ClientAuthInfo);
|
|||
impl FromRequestParts<ServerState> for VerifiedClientInformation {
|
||||
type Rejection = (StatusCode, &'static str);
|
||||
|
||||
#[instrument(level = "debug", skip(state))]
|
||||
// Need to skip all to prevent leaking tokens to logs.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
async fn from_request_parts(
|
||||
parts: &mut Parts,
|
||||
state: &ServerState,
|
||||
|
|
|
@ -2847,7 +2847,8 @@ pub async fn auth(
|
|||
auth_session_state_management(state, jar, inter)
|
||||
}
|
||||
|
||||
#[instrument(skip(state))]
|
||||
// Disable on any level except trace to stop leaking tokens
|
||||
#[instrument(level = "trace", skip_all)]
|
||||
fn auth_session_state_management(
|
||||
state: ServerState,
|
||||
mut jar: CookieJar,
|
||||
|
|
|
@ -19,6 +19,10 @@ path = "src/main.rs"
|
|||
test = true
|
||||
doctest = false
|
||||
|
||||
[features]
|
||||
dhat-heap = ["dep:dhat"]
|
||||
dhat-ad-hoc = ["dep:dhat"]
|
||||
|
||||
[dependencies]
|
||||
kanidm_proto = { workspace = true }
|
||||
kanidmd_core = { workspace = true }
|
||||
|
@ -27,6 +31,7 @@ sketching = { workspace = true }
|
|||
fs2 = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
dhat = { workspace = true, optional = true }
|
||||
clap = { workspace = true, features = ["env"] }
|
||||
mimalloc = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
|
|
|
@ -10,9 +10,14 @@
|
|||
#![deny(clippy::needless_pass_by_value)]
|
||||
#![deny(clippy::trivially_copy_pass_by_ref)]
|
||||
|
||||
#[cfg(not(feature = "dhat-heap"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
#[global_allocator]
|
||||
static ALLOC: dhat::Alloc = dhat::Alloc;
|
||||
|
||||
use std::fs::{metadata, File};
|
||||
// This works on both unix and windows.
|
||||
use fs2::FileExt;
|
||||
|
@ -284,6 +289,9 @@ fn main() -> ExitCode {
|
|||
return ExitCode::FAILURE;
|
||||
}
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
let _profiler = dhat::Profiler::new_heap();
|
||||
|
||||
let maybe_rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("kanidmd-thread-pool")
|
||||
|
|
|
@ -136,10 +136,16 @@ pub(crate) fn qs_test(args: TokenStream, item: TokenStream) -> TokenStream {
|
|||
let body = async {
|
||||
let test_config = #default_config_struct;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
let _profiler = dhat::Profiler::new_heap();
|
||||
|
||||
let test_server = crate::testkit::setup_test(test_config).await;
|
||||
|
||||
#test_fn(&test_server).await;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
drop(_profiler);
|
||||
|
||||
// Any needed teardown?
|
||||
// Clear the cache before we verify.
|
||||
assert!(test_server.clear_cache().await.is_ok());
|
||||
|
@ -224,10 +230,16 @@ pub(crate) fn qs_pair_test(args: &TokenStream, item: TokenStream) -> TokenStream
|
|||
let body = async {
|
||||
let test_config = #default_config_struct;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
let _profiler = dhat::Profiler::new_heap();
|
||||
|
||||
let (server_a, server_b) = crate::testkit::setup_pair_test(test_config).await;
|
||||
|
||||
#test_fn(&server_a, &server_b).await;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
drop(_profiler);
|
||||
|
||||
// Any needed teardown?
|
||||
assert!(server_a.clear_cache().await.is_ok());
|
||||
assert!(server_b.clear_cache().await.is_ok());
|
||||
|
@ -323,10 +335,16 @@ pub(crate) fn idm_test(args: &TokenStream, item: TokenStream) -> TokenStream {
|
|||
let body = async {
|
||||
let test_config = #default_config_struct;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
let _profiler = dhat::Profiler::new_heap();
|
||||
|
||||
let (test_server, mut idms_delayed, mut idms_audit) = crate::testkit::setup_idm_test(test_config).await;
|
||||
|
||||
#test_fn(#test_fn_args).await;
|
||||
|
||||
#[cfg(feature = "dhat-heap")]
|
||||
drop(_profiler);
|
||||
|
||||
// Any needed teardown?
|
||||
// assert!(test_server.clear_cache().await.is_ok());
|
||||
// Make sure there are no errors.
|
||||
|
|
|
@ -25,12 +25,18 @@ harness = false
|
|||
name = "image_benches"
|
||||
harness = false
|
||||
|
||||
[features]
|
||||
# default = [ "libsqlite3-sys/bundled", "openssl/vendored" ]
|
||||
dhat-heap = ["dep:dhat"]
|
||||
dhat-ad-hoc = ["dep:dhat"]
|
||||
|
||||
[dependencies]
|
||||
base64 = { workspace = true }
|
||||
base64urlsafedata = { workspace = true }
|
||||
bitflags = { workspace = true }
|
||||
compact_jwt = { workspace = true, features = ["openssl", "hsm-crypto"] }
|
||||
concread = { workspace = true }
|
||||
dhat = { workspace = true, optional = true }
|
||||
dyn-clone = { workspace = true }
|
||||
enum-iterator = { workspace = true }
|
||||
fernet = { workspace = true, features = ["fernet_danger_timestamps"] }
|
||||
|
@ -98,9 +104,6 @@ svg = { workspace = true }
|
|||
[target.'cfg(target_family = "windows")'.dependencies]
|
||||
whoami = { workspace = true }
|
||||
|
||||
[features]
|
||||
# default = [ "libsqlite3-sys/bundled", "openssl/vendored" ]
|
||||
|
||||
[dev-dependencies]
|
||||
compact_jwt = { workspace = true, features = ["openssl", "hsm-crypto", "unsafe_release_without_verify"] }
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
|
|
10
server/lib/PROFILING.md
Normal file
10
server/lib/PROFILING.md
Normal file
|
@ -0,0 +1,10 @@
|
|||
|
||||
|
||||
```
|
||||
cargo test --features=dhat-heap test_idm_authsession_simple_password_mech
|
||||
|
||||
cargo install cargo-flamegraph
|
||||
cargo flamegraph --root --reverse --unit-test -- 'testname'
|
||||
|
||||
KANI_CARGO_OPTS="--features dhat-heap" ./run_insecure_dev_server.sh
|
||||
```
|
|
@ -1755,6 +1755,9 @@ impl IdlSqlite {
|
|||
};
|
||||
|
||||
let fs_page_size = cfg.fstype as u32;
|
||||
// sqlite caches based on pages, so we calc based on page size to achieve our target which
|
||||
// is 32MB (constrst the SQLite default of 2MB)
|
||||
let cache_pages = 33554432 / fs_page_size;
|
||||
let checkpoint_pages = cfg.fstype.checkpoint_pages();
|
||||
|
||||
// Initial setup routines.
|
||||
|
@ -1766,7 +1769,9 @@ impl IdlSqlite {
|
|||
.execute_batch(
|
||||
format!(
|
||||
"PRAGMA page_size={fs_page_size};
|
||||
PRAGMA cache_size={cache_pages};
|
||||
PRAGMA journal_mode=WAL;
|
||||
PRAGMA synchronous=NORMAL;
|
||||
PRAGMA wal_autocheckpoint={checkpoint_pages};
|
||||
PRAGMA wal_checkpoint(RESTART);"
|
||||
)
|
||||
|
@ -1848,6 +1853,17 @@ impl IdlSqlite {
|
|||
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error);
|
||||
match conn {
|
||||
Ok(conn) => {
|
||||
// We need to set the cachesize at this point as well.
|
||||
conn
|
||||
.execute_batch(
|
||||
format!(
|
||||
"PRAGMA cache_size={cache_pages};"
|
||||
)
|
||||
.as_str(),
|
||||
)
|
||||
.map_err(sqlite_error)?;
|
||||
|
||||
|
||||
// load the rusqlite vtab module to allow for virtual tables
|
||||
rusqlite::vtab::array::load_module(&conn).map_err(|e| {
|
||||
admin_error!(
|
||||
|
|
|
@ -588,7 +588,7 @@ pub trait BackendTransaction {
|
|||
// Unlike DS, even if we don't get the index back, we can just pass
|
||||
// to the in-memory filter test and be done.
|
||||
|
||||
debug!(filter_optimised = ?filt);
|
||||
trace!(filter_optimised = ?filt);
|
||||
|
||||
let (idl, fplan) = trace_span!("be::search -> filter2idl")
|
||||
.in_scope(|| self.filter2idl(filt.to_inner(), FILTER_SEARCH_TEST_THRESHOLD))?;
|
||||
|
@ -682,7 +682,7 @@ pub trait BackendTransaction {
|
|||
erl: &Limits,
|
||||
filt: &Filter<FilterValidResolved>,
|
||||
) -> Result<bool, OperationError> {
|
||||
debug!(filter_optimised = ?filt);
|
||||
trace!(filter_optimised = ?filt);
|
||||
|
||||
// Using the indexes, resolve the IdList here, or AllIds.
|
||||
// Also get if the filter was 100% resolved or not.
|
||||
|
|
|
@ -31,7 +31,7 @@ fn test_valueattribute_round_trip() {
|
|||
let the_list = all::<Attribute>().collect::<Vec<_>>();
|
||||
for attr in the_list {
|
||||
let s: &'static str = attr.into();
|
||||
let attr2 = Attribute::try_from(s.to_string()).unwrap();
|
||||
let attr2 = Attribute::try_from(s).unwrap();
|
||||
assert!(attr == attr2);
|
||||
}
|
||||
}
|
||||
|
@ -217,26 +217,18 @@ impl From<&Attribute> for &'static str {
|
|||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for Attribute {
|
||||
type Error = OperationError;
|
||||
|
||||
fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
Attribute::try_from(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&AttrString> for Attribute {
|
||||
type Error = OperationError;
|
||||
|
||||
fn try_from(value: &AttrString) -> Result<Self, Self::Error> {
|
||||
Attribute::try_from(value.to_string())
|
||||
Attribute::try_from(value.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<String> for Attribute {
|
||||
impl<'a> TryFrom<&'a str> for Attribute {
|
||||
type Error = OperationError;
|
||||
fn try_from(val: String) -> Result<Self, OperationError> {
|
||||
let res = match val.as_str() {
|
||||
fn try_from(val: &'a str) -> Result<Self, OperationError> {
|
||||
let res = match val {
|
||||
ATTR_ACCOUNT => Attribute::Account,
|
||||
ATTR_ACCOUNT_EXPIRE => Attribute::AccountExpire,
|
||||
ATTR_ACCOUNT_VALID_FROM => Attribute::AccountValidFrom,
|
||||
|
@ -402,7 +394,7 @@ impl TryFrom<String> for Attribute {
|
|||
TEST_ATTR_NOTALLOWED => Attribute::TestNotAllowed,
|
||||
_ => {
|
||||
trace!("Failed to convert {} to Attribute", val);
|
||||
return Err(OperationError::InvalidAttributeName(val));
|
||||
return Err(OperationError::InvalidAttributeName(val.to_string()));
|
||||
}
|
||||
};
|
||||
Ok(res)
|
||||
|
@ -610,7 +602,7 @@ impl<'a> serde::Deserialize<'a> for Attribute {
|
|||
D: serde::Deserializer<'a>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
Attribute::try_from(s).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))
|
||||
Attribute::try_from(s.as_str()).map_err(|e| serde::de::Error::custom(format!("{:?}", e)))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -133,9 +133,14 @@ impl IdmServer {
|
|||
qs: QueryServer,
|
||||
origin: &str,
|
||||
) -> Result<(IdmServer, IdmServerDelayed, IdmServerAudit), OperationError> {
|
||||
let crypto_policy = if cfg!(test) {
|
||||
CryptoPolicy::danger_test_minimum()
|
||||
} else {
|
||||
// This is calculated back from:
|
||||
// 100 password auths / thread -> 0.010 sec per op
|
||||
let crypto_policy = CryptoPolicy::time_target(Duration::from_millis(10));
|
||||
CryptoPolicy::time_target(Duration::from_millis(10))
|
||||
};
|
||||
|
||||
let (async_tx, async_rx) = unbounded();
|
||||
let (audit_tx, audit_rx) = unbounded();
|
||||
|
||||
|
|
|
@ -21,10 +21,14 @@
|
|||
#![deny(clippy::manual_let_else)]
|
||||
#![allow(clippy::unreachable)]
|
||||
|
||||
#[cfg(test)]
|
||||
#[cfg(all(test, not(feature = "dhat-heap")))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
#[cfg(all(test, feature = "dhat-heap"))]
|
||||
#[global_allocator]
|
||||
static ALLOC: dhat::Alloc = dhat::Alloc;
|
||||
|
||||
#[macro_use]
|
||||
extern crate rusqlite;
|
||||
|
||||
|
|
|
@ -141,7 +141,7 @@ impl ModifyList<ModifyInvalid> {
|
|||
|
||||
pe.attrs.iter().try_for_each(|(attr, vals)| {
|
||||
// Issue a purge to the attr.
|
||||
let attr: Attribute = (attr.clone()).try_into()?;
|
||||
let attr: Attribute = attr.as_str().try_into()?;
|
||||
mods.push(m_purge(attr));
|
||||
// Now if there are vals, push those too.
|
||||
// For each value we want to now be present.
|
||||
|
|
|
@ -53,7 +53,7 @@ impl NameHistory {
|
|||
// as of now we're interested just in the name so we use Iname
|
||||
match post_name {
|
||||
Value::Iname(n) => post.add_ava_if_not_exist(
|
||||
ava_name.try_into()?,
|
||||
ava_name.as_str().try_into()?,
|
||||
Value::AuditLogString(cid.clone(), n),
|
||||
),
|
||||
_ => return Err(OperationError::InvalidValueState),
|
||||
|
@ -77,7 +77,7 @@ impl NameHistory {
|
|||
let ava_name = Self::get_ava_name(history_attr);
|
||||
match name {
|
||||
Value::Iname(n) => cand.add_ava_if_not_exist(
|
||||
ava_name.try_into()?,
|
||||
ava_name.as_str().try_into()?,
|
||||
Value::AuditLogString(cid.clone(), n),
|
||||
),
|
||||
_ => return Err(OperationError::InvalidValueState),
|
||||
|
|
|
@ -104,7 +104,7 @@ impl ReferentialIntegrity {
|
|||
.flat_map(|u| ref_types.values().filter_map(move |r_type| {
|
||||
let value_attribute = r_type.name.to_string();
|
||||
// For everything that references the uuid's in the deleted set.
|
||||
let val: Result<Attribute, OperationError> = value_attribute.try_into();
|
||||
let val: Result<Attribute, OperationError> = value_attribute.as_str().try_into();
|
||||
// error!("{:?}", val);
|
||||
let res = match val {
|
||||
Ok(val) => {
|
||||
|
|
|
@ -132,7 +132,8 @@ fn create_filter_entry<'a>(
|
|||
|
||||
// -- Conditions pass -- now verify the attributes.
|
||||
|
||||
security_access!(?entry, acs = ?accr.acp, "entry matches acs");
|
||||
let entry_name = entry.get_display_id();
|
||||
security_access!(%entry_name, acs = ?accr.acp.acp.name, "entry matches acs");
|
||||
// It matches, so now we have to check attrs and classes.
|
||||
// Remember, we have to match ALL requested attrs
|
||||
// and classes to pass!
|
||||
|
|
|
@ -127,13 +127,13 @@ fn delete_filter_entry<'a>(
|
|||
}
|
||||
};
|
||||
|
||||
let entry_name = entry.get_display_id();
|
||||
security_access!(
|
||||
entry_uuid = ?entry.get_uuid(),
|
||||
%entry_name,
|
||||
acs = %acd.acp.acp.name,
|
||||
"entry matches acs"
|
||||
);
|
||||
// It matches, so we can delete this!
|
||||
trace!("passed");
|
||||
|
||||
true
|
||||
}); // any related_acp
|
||||
|
||||
|
|
|
@ -2022,7 +2022,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// If the server is in a late phase of start up or is
|
||||
// operational, then a reindex may be required. After the reindex, the schema
|
||||
// must also be reloaded so that slope optimisation indexes are loaded correctly.
|
||||
if *self.phase >= ServerPhase::DomainInfoReady {
|
||||
if *self.phase >= ServerPhase::Running {
|
||||
self.reindex()?;
|
||||
self.reload_schema()?;
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ impl KanidmOrcaClient {
|
|||
.address(profile.control_uri().to_string())
|
||||
.danger_accept_invalid_hostnames(true)
|
||||
.danger_accept_invalid_certs(true)
|
||||
.request_timeout(1200)
|
||||
.build()
|
||||
.map_err(|err| {
|
||||
error!(?err, "Unable to create kanidm client");
|
||||
|
|
|
@ -21,6 +21,7 @@ async fn preflight_person(
|
|||
|
||||
if client.person_exists(&person.username).await? {
|
||||
// Do nothing? Do we need to reset them later?
|
||||
return Ok(());
|
||||
} else {
|
||||
client
|
||||
.person_create(&person.username, &person.display_name)
|
||||
|
@ -74,36 +75,63 @@ pub async fn preflight(state: State) -> Result<(), Error> {
|
|||
// Apply any flags if they exist.
|
||||
apply_flags(client.clone(), state.preflight_flags.as_slice()).await?;
|
||||
|
||||
let mut tasks = Vec::with_capacity(state.persons.len());
|
||||
let state_persons_len = state.persons.len();
|
||||
|
||||
let mut tasks = Vec::with_capacity(state_persons_len);
|
||||
|
||||
// Create persons.
|
||||
for person in state.persons.into_iter() {
|
||||
let c = client.clone();
|
||||
tasks.push(tokio::spawn(preflight_person(c, person)))
|
||||
// Write operations are single threaded in Kanidm, so we don't need to attempt
|
||||
// to parallelise that here.
|
||||
// tasks.push(tokio::spawn(preflight_person(c, person)))
|
||||
tasks.push(preflight_person(c, person))
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await.map_err(|tokio_err| {
|
||||
let tasks_par = tasks.split_off(state_persons_len / 2);
|
||||
|
||||
let left = tokio::spawn(async move {
|
||||
for (i, task) in tasks.into_iter().enumerate() {
|
||||
let _ = task.await;
|
||||
if i % 500 == 0 {
|
||||
eprint!(".");
|
||||
}
|
||||
}
|
||||
});
|
||||
let right = tokio::spawn(async move {
|
||||
for (i, task) in tasks_par.into_iter().enumerate() {
|
||||
let _ = task.await;
|
||||
if i % 500 == 0 {
|
||||
eprint!(".");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
left.await.map_err(|tokio_err| {
|
||||
error!(?tokio_err, "Failed to join task");
|
||||
Error::Tokio
|
||||
})??;
|
||||
// The double ? isn't a mistake, it's because this is Result<Result<T, E>, E>
|
||||
// and flatten is nightly.
|
||||
}
|
||||
})?;
|
||||
right.await.map_err(|tokio_err| {
|
||||
error!(?tokio_err, "Failed to join task");
|
||||
Error::Tokio
|
||||
})?;
|
||||
|
||||
// Create groups.
|
||||
let mut tasks = Vec::with_capacity(state.groups.len());
|
||||
|
||||
for group in state.groups.into_iter() {
|
||||
let c = client.clone();
|
||||
tasks.push(tokio::spawn(preflight_group(c, group)))
|
||||
// Write operations are single threaded in Kanidm, so we don't need to attempt
|
||||
// to parallelise that here.
|
||||
// tasks.push(tokio::spawn(preflight_group(c, group)))
|
||||
tasks.push(preflight_group(c, group))
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await.map_err(|tokio_err| {
|
||||
error!(?tokio_err, "Failed to join task");
|
||||
Error::Tokio
|
||||
})??;
|
||||
task.await?;
|
||||
/*
|
||||
task.await
|
||||
*/
|
||||
}
|
||||
|
||||
// Create integrations.
|
||||
|
|
|
@ -803,6 +803,7 @@ async fn main() -> ExitCode {
|
|||
}
|
||||
|
||||
let cb = cb.connect_timeout(cfg.conn_timeout);
|
||||
let cb = cb.request_timeout(cfg.request_timeout);
|
||||
|
||||
let rsclient = match cb.build() {
|
||||
Ok(rsc) => rsc,
|
||||
|
|
|
@ -18,6 +18,7 @@ struct ConfigInt {
|
|||
sock_path: Option<String>,
|
||||
task_sock_path: Option<String>,
|
||||
conn_timeout: Option<u64>,
|
||||
request_timeout: Option<u64>,
|
||||
cache_timeout: Option<u64>,
|
||||
pam_allowed_login_groups: Option<Vec<String>>,
|
||||
default_shell: Option<String>,
|
||||
|
@ -101,6 +102,7 @@ pub struct KanidmUnixdConfig {
|
|||
pub sock_path: String,
|
||||
pub task_sock_path: String,
|
||||
pub conn_timeout: u64,
|
||||
pub request_timeout: u64,
|
||||
pub cache_timeout: u64,
|
||||
pub unix_sock_timeout: u64,
|
||||
pub pam_allowed_login_groups: Vec<String>,
|
||||
|
@ -130,6 +132,7 @@ impl Display for KanidmUnixdConfig {
|
|||
writeln!(f, "sock_path: {}", self.sock_path)?;
|
||||
writeln!(f, "task_sock_path: {}", self.task_sock_path)?;
|
||||
writeln!(f, "conn_timeout: {}", self.conn_timeout)?;
|
||||
writeln!(f, "request_timeout: {}", self.request_timeout)?;
|
||||
writeln!(f, "unix_sock_timeout: {}", self.unix_sock_timeout)?;
|
||||
writeln!(f, "cache_timeout: {}", self.cache_timeout)?;
|
||||
writeln!(
|
||||
|
@ -176,6 +179,7 @@ impl KanidmUnixdConfig {
|
|||
sock_path: DEFAULT_SOCK_PATH.to_string(),
|
||||
task_sock_path: DEFAULT_TASK_SOCK_PATH.to_string(),
|
||||
conn_timeout: DEFAULT_CONN_TIMEOUT,
|
||||
request_timeout: DEFAULT_CONN_TIMEOUT * 2,
|
||||
unix_sock_timeout: DEFAULT_CONN_TIMEOUT * 2,
|
||||
cache_timeout: DEFAULT_CACHE_TIMEOUT,
|
||||
pam_allowed_login_groups: Vec::new(),
|
||||
|
@ -240,13 +244,16 @@ impl KanidmUnixdConfig {
|
|||
UnixIntegrationError
|
||||
})?;
|
||||
|
||||
let conn_timeout = config.conn_timeout.unwrap_or(self.conn_timeout);
|
||||
|
||||
// Now map the values into our config.
|
||||
Ok(KanidmUnixdConfig {
|
||||
db_path: config.db_path.unwrap_or(self.db_path),
|
||||
sock_path: config.sock_path.unwrap_or(self.sock_path),
|
||||
task_sock_path: config.task_sock_path.unwrap_or(self.task_sock_path),
|
||||
conn_timeout: config.conn_timeout.unwrap_or(self.conn_timeout),
|
||||
unix_sock_timeout: config.conn_timeout.unwrap_or(self.conn_timeout) * 2,
|
||||
conn_timeout,
|
||||
request_timeout: config.request_timeout.unwrap_or(conn_timeout * 2),
|
||||
unix_sock_timeout: conn_timeout * 2,
|
||||
cache_timeout: config.cache_timeout.unwrap_or(self.cache_timeout),
|
||||
pam_allowed_login_groups: config
|
||||
.pam_allowed_login_groups
|
||||
|
|
Loading…
Reference in a new issue