diff --git a/libs/profiles/src/lib.rs b/libs/profiles/src/lib.rs index d45d167ab..b720e220f 100644 --- a/libs/profiles/src/lib.rs +++ b/libs/profiles/src/lib.rs @@ -92,6 +92,11 @@ pub fn apply_profile() { println!("cargo:rustc-env=KANIDM_PKG_VERSION={}", version); }; + let version_pre = env!("CARGO_PKG_VERSION_PRE"); + if version_pre == "dev" { + println!("cargo:rustc-env=KANIDM_PRE_RELEASE=1"); + } + match profile_cfg.cpu_flags { CpuOptLevel::apple_m1 => println!("cargo:rustc-env=RUSTFLAGS=-Ctarget-cpu=apple_m1"), CpuOptLevel::none => {} diff --git a/server/core/src/config.rs b/server/core/src/config.rs index 6d5c28f72..1c4443042 100644 --- a/server/core/src/config.rs +++ b/server/core/src/config.rs @@ -251,6 +251,7 @@ impl ServerConfig { "KANIDM_DEFAULT_CONFIG_PATH", "KANIDM_DEFAULT_UNIX_SHELL_PATH", "KANIDM_PKG_VERSION", + "KANIDM_PRE_RELEASE", "KANIDM_PROFILE_NAME", "KANIDM_WEB_UI_PKG_PATH", ]; diff --git a/server/core/src/lib.rs b/server/core/src/lib.rs index c501b77b0..528d207a5 100644 --- a/server/core/src/lib.rs +++ b/server/core/src/lib.rs @@ -108,7 +108,9 @@ async fn setup_qs_idms( // Now search for the schema itself, and validate that the system // in memory matches the BE on disk, and that it's syntactically correct. // Write it out if changes are needed. - query_server.initialise_helper(curtime).await?; + query_server + .initialise_helper(curtime, DOMAIN_TGT_LEVEL) + .await?; // We generate a SINGLE idms only! @@ -135,7 +137,9 @@ async fn setup_qs( // Now search for the schema itself, and validate that the system // in memory matches the BE on disk, and that it's syntactically correct. // Write it out if changes are needed. - query_server.initialise_helper(curtime).await?; + query_server + .initialise_helper(curtime, DOMAIN_TGT_LEVEL) + .await?; Ok(query_server) } diff --git a/server/lib-macros/src/entry.rs b/server/lib-macros/src/entry.rs index 935886cbf..78516d9ff 100644 --- a/server/lib-macros/src/entry.rs +++ b/server/lib-macros/src/entry.rs @@ -1,14 +1,80 @@ use proc_macro::TokenStream; use proc_macro2::{Ident, Span}; use quote::{quote, quote_spanned, ToTokens}; -use syn::spanned::Spanned; +use syn::{parse::Parser, punctuated::Punctuated, spanned::Spanned, ExprAssign, Token}; fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream { tokens.extend(TokenStream::from(error.into_compile_error())); tokens } -pub(crate) fn qs_test(_args: &TokenStream, item: TokenStream, with_init: bool) -> TokenStream { +const ALLOWED_ATTRIBUTES: &[&str] = &["audit", "domain_level"]; + +#[derive(Default)] +struct Flags { + audit: bool, +} + +fn parse_attributes( + args: &TokenStream, + input: &syn::ItemFn, +) -> Result<(proc_macro2::TokenStream, Flags), syn::Error> { + let args: Punctuated = + match Punctuated::::parse_terminated.parse(args.clone()) { + Ok(it) => it, + Err(e) => return Err(e), + }; + + let args_are_allowed = args.pairs().all(|p| { + ALLOWED_ATTRIBUTES.to_vec().contains( + &p.value() + .left + .span() + .source_text() + .unwrap_or_default() + .as_str(), + ) + }); + + if !args_are_allowed { + let msg = "Invalid test config attribute. The following are allow"; + return Err(syn::Error::new_spanned( + input.sig.fn_token, + format!("{}: {}", msg, ALLOWED_ATTRIBUTES.join(", ")), + )); + } + + let mut flags = Flags::default(); + let mut field_modifications = quote! {}; + + args.pairs().for_each(|p| { + match p + .value() + .left + .span() + .source_text() + .unwrap_or_default() + .as_str() + { + "audit" => flags.audit = true, + _ => { + let field_name = p.value().left.to_token_stream(); // here we can use to_token_stream as we know we're iterating over ExprAssigns + let field_value = p.value().right.to_token_stream(); + field_modifications.extend(quote! { + #field_name: #field_value,}) + } + } + }); + + let ts = quote!(crate::testkit::TestConfiguration { + #field_modifications + ..crate::testkit::TestConfiguration::default() + }); + + Ok((ts, flags)) +} + +pub(crate) fn qs_test(args: TokenStream, item: TokenStream) -> TokenStream { let input: syn::ItemFn = match syn::parse(item.clone()) { Ok(it) => it, Err(e) => return token_stream_with_error(item, e), @@ -42,6 +108,12 @@ pub(crate) fn qs_test(_args: &TokenStream, item: TokenStream, with_init: bool) - (start, end) }; + // Setup the config filling the remaining fields with the default values + let (default_config_struct, _flags) = match parse_attributes(&args, &input) { + Ok(dc) => dc, + Err(e) => return token_stream_with_error(args, e), + }; + let rt = quote_spanned! {last_stmt_start_span=> tokio::runtime::Builder::new_current_thread() }; @@ -50,16 +122,6 @@ pub(crate) fn qs_test(_args: &TokenStream, item: TokenStream, with_init: bool) - #[::core::prelude::v1::test] }; - let init = if with_init { - quote! { - test_server.initialise_helper(duration_from_epoch_now()) - .await - .expect("init failed!"); - } - } else { - quote! {} - }; - let test_fn = &input.sig.ident; let test_driver = Ident::new(&format!("qs_{}", test_fn), input.sig.span()); @@ -72,9 +134,9 @@ pub(crate) fn qs_test(_args: &TokenStream, item: TokenStream, with_init: bool) - #header fn #test_driver() { let body = async { - let test_server = crate::testkit::setup_test().await; + let test_config = #default_config_struct; - #init + let test_server = crate::testkit::setup_test(test_config).await; #test_fn(&test_server).await; @@ -100,7 +162,7 @@ pub(crate) fn qs_test(_args: &TokenStream, item: TokenStream, with_init: bool) - result.into() } -pub(crate) fn qs_pair_test(_args: &TokenStream, item: TokenStream, with_init: bool) -> TokenStream { +pub(crate) fn qs_pair_test(args: &TokenStream, item: TokenStream) -> TokenStream { let input: syn::ItemFn = match syn::parse(item.clone()) { Ok(it) => it, Err(e) => return token_stream_with_error(item, e), @@ -142,17 +204,10 @@ pub(crate) fn qs_pair_test(_args: &TokenStream, item: TokenStream, with_init: bo #[::core::prelude::v1::test] }; - let init = if with_init { - quote! { - server_a.initialise_helper(duration_from_epoch_now()) - .await - .expect("init_a failed!"); - server_b.initialise_helper(duration_from_epoch_now()) - .await - .expect("init_b failed!"); - } - } else { - quote! {} + // Setup the config filling the remaining fields with the default values + let (default_config_struct, _flags) = match parse_attributes(&args, &input) { + Ok(dc) => dc, + Err(e) => return token_stream_with_error(args.clone(), e), }; let test_fn = &input.sig.ident; @@ -167,9 +222,9 @@ pub(crate) fn qs_pair_test(_args: &TokenStream, item: TokenStream, with_init: bo #header fn #test_driver() { let body = async { - let (server_a, server_b) = crate::testkit::setup_pair_test().await; + let test_config = #default_config_struct; - #init + let (server_a, server_b) = crate::testkit::setup_pair_test(test_config).await; #test_fn(&server_a, &server_b).await; @@ -197,8 +252,6 @@ pub(crate) fn qs_pair_test(_args: &TokenStream, item: TokenStream, with_init: bo } pub(crate) fn idm_test(args: &TokenStream, item: TokenStream) -> TokenStream { - let audit = args.to_string() == "audit"; - let input: syn::ItemFn = match syn::parse(item.clone()) { Ok(it) => it, Err(e) => return token_stream_with_error(item, e), @@ -232,6 +285,12 @@ pub(crate) fn idm_test(args: &TokenStream, item: TokenStream) -> TokenStream { (start, end) }; + // Setup the config filling the remaining fields with the default values + let (default_config_struct, flags) = match parse_attributes(&args, &input) { + Ok(dc) => dc, + Err(e) => return token_stream_with_error(args.clone(), e), + }; + let rt = quote_spanned! {last_stmt_start_span=> tokio::runtime::Builder::new_current_thread() }; @@ -243,7 +302,7 @@ pub(crate) fn idm_test(args: &TokenStream, item: TokenStream) -> TokenStream { let test_fn = &input.sig.ident; let test_driver = Ident::new(&format!("idm_{}", test_fn), input.sig.span()); - let test_fn_args = if audit { + let test_fn_args = if flags.audit { quote! { &test_server, &mut idms_delayed, &mut idms_audit } @@ -262,7 +321,9 @@ pub(crate) fn idm_test(args: &TokenStream, item: TokenStream) -> TokenStream { #header fn #test_driver() { let body = async { - let (test_server, mut idms_delayed, mut idms_audit) = crate::testkit::setup_idm_test().await; + let test_config = #default_config_struct; + + let (test_server, mut idms_delayed, mut idms_audit) = crate::testkit::setup_idm_test(test_config).await; #test_fn(#test_fn_args).await; diff --git a/server/lib-macros/src/lib.rs b/server/lib-macros/src/lib.rs index c17fa2c2c..67d5f07ac 100644 --- a/server/lib-macros/src/lib.rs +++ b/server/lib-macros/src/lib.rs @@ -19,17 +19,12 @@ use proc_macro::TokenStream; #[proc_macro_attribute] pub fn qs_test(args: TokenStream, item: TokenStream) -> TokenStream { - entry::qs_test(&args, item, true) + entry::qs_test(args, item) } #[proc_macro_attribute] pub fn qs_pair_test(args: TokenStream, item: TokenStream) -> TokenStream { - entry::qs_pair_test(&args, item, true) -} - -#[proc_macro_attribute] -pub fn qs_test_no_init(args: TokenStream, item: TokenStream) -> TokenStream { - entry::qs_test(&args, item, false) + entry::qs_pair_test(&args, item) } #[proc_macro_attribute] diff --git a/server/lib/src/be/mod.rs b/server/lib/src/be/mod.rs index 4a1eb5afa..f3e00b07a 100644 --- a/server/lib/src/be/mod.rs +++ b/server/lib/src/be/mod.rs @@ -1628,18 +1628,16 @@ impl<'a> BackendWriteTransaction<'a> { let dbv = self.get_db_index_version()?; admin_debug!(?dbv, ?v, "upgrade_reindex"); if dbv < v { - limmediate_warning!( - "NOTICE: A system reindex is required. This may take a long time ...\n" - ); self.reindex()?; - limmediate_warning!("NOTICE: System reindex complete\n"); self.set_db_index_version(v) } else { Ok(()) } } + #[instrument(level = "info", skip_all)] pub fn reindex(&mut self) -> Result<(), OperationError> { + limmediate_warning!("NOTICE: System reindex started\n"); // Purge the idxs self.idlayer.danger_purge_idxs()?; @@ -1672,7 +1670,7 @@ impl<'a> BackendWriteTransaction<'a> { admin_error!("reindex failed -> {:?}", e); e })?; - limmediate_warning!(" reindexed {} entries ✅\n", count); + limmediate_warning!("done ✅: reindexed {} entries\n", count); limmediate_warning!("Optimising Indexes ... "); self.idlayer.optimise_dirty_idls(); limmediate_warning!("done ✅\n"); @@ -1682,6 +1680,7 @@ impl<'a> BackendWriteTransaction<'a> { e })?; limmediate_warning!("done ✅\n"); + limmediate_warning!("NOTICE: System reindex complete\n"); Ok(()) } diff --git a/server/lib/src/constants/acp.rs b/server/lib/src/constants/acp.rs index 153a06e57..8aaa48377 100644 --- a/server/lib/src/constants/acp.rs +++ b/server/lib/src/constants/acp.rs @@ -1412,6 +1412,62 @@ lazy_static! { }; } +lazy_static! { + pub static ref IDM_ACP_GROUP_MANAGE_DL6: BuiltinAcp = BuiltinAcp{ + classes: vec![ + EntryClass::Object, + EntryClass::AccessControlProfile, + EntryClass::AccessControlCreate, + EntryClass::AccessControlDelete, + EntryClass::AccessControlModify, + EntryClass::AccessControlSearch + ], + name: "idm_acp_group_manage", + uuid: UUID_IDM_ACP_GROUP_MANAGE_V1, + description: "Builtin IDM Control for creating and deleting groups in the directory", + receiver: BuiltinAcpReceiver::Group ( vec![UUID_IDM_GROUP_ADMINS] ), + // group which is not in HP, Recycled, Tombstone + target: BuiltinAcpTarget::Filter( ProtoFilter::And(vec![ + match_class_filter!(EntryClass::Group), + FILTER_ANDNOT_HP_OR_RECYCLED_OR_TOMBSTONE.clone(), + ])), + search_attrs: vec![ + Attribute::Class, + Attribute::Name, + Attribute::Uuid, + Attribute::Spn, + Attribute::Uuid, + Attribute::Description, + Attribute::Member, + Attribute::DynMember, + Attribute::EntryManagedBy, + ], + create_attrs: vec![ + Attribute::Class, + Attribute::Name, + Attribute::Uuid, + Attribute::Description, + Attribute::Member, + Attribute::EntryManagedBy, + ], + create_classes: vec![ + EntryClass::Object, + EntryClass::Group, + ], + modify_present_attrs: vec![ + Attribute::Name, + Attribute::Description, + Attribute::Member, + ], + modify_removed_attrs: vec![ + Attribute::Name, + Attribute::Description, + Attribute::Member, + ], + ..Default::default() + }; +} + lazy_static! { pub static ref IDM_ACP_GROUP_UNIX_MANAGE_V1: BuiltinAcp = BuiltinAcp { classes: vec![ @@ -1582,6 +1638,39 @@ lazy_static! { }; } +lazy_static! { + pub static ref IDM_ACP_PEOPLE_CREATE_DL6: BuiltinAcp = BuiltinAcp { + classes: vec![ + EntryClass::Object, + EntryClass::AccessControlProfile, + EntryClass::AccessControlCreate, + ], + name: "idm_acp_people_create", + uuid: UUID_IDM_ACP_PEOPLE_CREATE_V1, + description: "Builtin IDM Control for creating new persons.", + receiver: BuiltinAcpReceiver::Group(vec![ + UUID_IDM_PEOPLE_ADMINS, + UUID_IDM_PEOPLE_ON_BOARDING + ]), + target: BuiltinAcpTarget::Filter(ProtoFilter::And(vec![ + match_class_filter!(EntryClass::Person).clone(), + match_class_filter!(EntryClass::Account).clone(), + FILTER_ANDNOT_TOMBSTONE_OR_RECYCLED.clone(), + ])), + create_attrs: vec![ + Attribute::Class, + Attribute::Uuid, + Attribute::Name, + Attribute::DisplayName, + Attribute::Mail, + Attribute::AccountExpire, + Attribute::AccountValidFrom, + ], + create_classes: vec![EntryClass::Object, EntryClass::Account, EntryClass::Person,], + ..Default::default() + }; +} + lazy_static! { pub static ref IDM_ACP_PEOPLE_MANAGE_V1: BuiltinAcp = BuiltinAcp { classes: vec![ diff --git a/server/lib/src/constants/mod.rs b/server/lib/src/constants/mod.rs index 43148f35b..ded001fbe 100644 --- a/server/lib/src/constants/mod.rs +++ b/server/lib/src/constants/mod.rs @@ -18,7 +18,9 @@ pub use crate::constants::values::*; use std::time::Duration; -// Increment this as we add new schema types and values!!! +// This value no longer requires incrementing during releases. It only +// serves as a "once off" marker so that we know when the initial db +// index is performed on first-run. pub const SYSTEM_INDEX_VERSION: i64 = 31; /* @@ -54,6 +56,8 @@ pub const DOMAIN_LEVEL_6: DomainVersion = 6; pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_2; // The minimum supported domain functional level pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL; +// The previous releases domain functional level +pub const DOMAIN_PREVIOUS_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_5; // The target supported domain functional level pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_6; // The maximum supported domain functional level diff --git a/server/lib/src/idm/credupdatesession.rs b/server/lib/src/idm/credupdatesession.rs index d0ba1d6e9..650994366 100644 --- a/server/lib/src/idm/credupdatesession.rs +++ b/server/lib/src/idm/credupdatesession.rs @@ -4080,7 +4080,7 @@ mod tests { ); } - #[idm_test(audit)] + #[idm_test(audit = 1)] async fn test_idm_credential_update_account_policy_attested_passkey_changed( idms: &IdmServer, idms_delayed: &mut IdmServerDelayed, diff --git a/server/lib/src/idm/reauth.rs b/server/lib/src/idm/reauth.rs index e85db06f4..49d0f6a8b 100644 --- a/server/lib/src/idm/reauth.rs +++ b/server/lib/src/idm/reauth.rs @@ -657,7 +657,7 @@ mod tests { assert!(matches!(ident.access_scope(), AccessScope::ReadWrite)); } - #[idm_test(audit)] + #[idm_test(audit = 1)] async fn test_idm_reauth_softlocked_pw( idms: &IdmServer, idms_delayed: &mut IdmServerDelayed, diff --git a/server/lib/src/idm/server.rs b/server/lib/src/idm/server.rs index 4ee5141e7..9ff7c258b 100644 --- a/server/lib/src/idm/server.rs +++ b/server/lib/src/idm/server.rs @@ -2535,7 +2535,7 @@ mod tests { idms_auth.commit().expect("Must not fail"); } - #[idm_test(audit)] + #[idm_test(audit = 1)] async fn test_idm_simple_password_invalid( idms: &IdmServer, _idms_delayed: &IdmServerDelayed, @@ -3150,7 +3150,7 @@ mod tests { } } - #[idm_test(audit)] + #[idm_test(audit = 1)] async fn test_idm_account_softlocking( idms: &IdmServer, idms_delayed: &mut IdmServerDelayed, @@ -3312,7 +3312,7 @@ mod tests { // Tested in the softlock state machine. } - #[idm_test(audit)] + #[idm_test(audit = 1)] async fn test_idm_account_softlocking_interleaved( idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed, diff --git a/server/lib/src/macros.rs b/server/lib/src/macros.rs index 7ccd408d2..10e37ef10 100644 --- a/server/lib/src/macros.rs +++ b/server/lib/src/macros.rs @@ -18,7 +18,7 @@ macro_rules! setup_test { .enable_all() .build() .unwrap() - .block_on(qs.initialise_helper(duration_from_epoch_now())) + .block_on(qs.initialise_helper(duration_from_epoch_now(), DOMAIN_TGT_LEVEL)) .expect("init failed!"); qs }}; @@ -44,7 +44,7 @@ macro_rules! setup_test { .enable_all() .build() .unwrap() - .block_on(qs.initialise_helper(duration_from_epoch_now())) + .block_on(qs.initialise_helper(duration_from_epoch_now(), DOMAIN_TGT_LEVEL)) .expect("init failed!"); if !$preload_entries.is_empty() { diff --git a/server/lib/src/plugins/domain.rs b/server/lib/src/plugins/domain.rs index 94744287b..1f9d77a53 100644 --- a/server/lib/src/plugins/domain.rs +++ b/server/lib/src/plugins/domain.rs @@ -107,7 +107,7 @@ impl Domain { e.set_ava(Attribute::Version, once(n)); warn!("plugin_domain: Applying domain version transform"); } else { - warn!("plugin_domain: NOT Applying domain version transform"); + debug!("plugin_domain: NOT Applying domain version transform"); }; // create the domain_display_name if it's missing diff --git a/server/lib/src/server/migrations.rs b/server/lib/src/server/migrations.rs index 39dc1cdc5..8be254e0c 100644 --- a/server/lib/src/server/migrations.rs +++ b/server/lib/src/server/migrations.rs @@ -7,21 +7,18 @@ use super::ServerPhase; impl QueryServer { #[instrument(level = "info", name = "system_initialisation", skip_all)] - pub async fn initialise_helper(&self, ts: Duration) -> Result<(), OperationError> { + pub async fn initialise_helper( + &self, + ts: Duration, + domain_target_level: DomainVersion, + ) -> Result<(), OperationError> { // We need to perform this in a single transaction pass to prevent tainting // databases during upgrades. let mut write_txn = self.write(ts).await; // Check our database version - attempt to do an initial indexing - // based on the in memory configuration - // - // If we ever change the core in memory schema, or the schema that we ship - // in fixtures, we have to bump these values. This is how we manage the - // first-run and upgrade reindexings. - // - // A major reason here to split to multiple transactions is to allow schema - // reloading to occur, which causes the idxmeta to update, and allows validation - // of the schema in the subsequent steps as we proceed. + // based on the in memory configuration. This ONLY triggers ONCE on + // the very first run of the instance when the DB in newely created. write_txn.upgrade_reindex(SYSTEM_INDEX_VERSION)?; // Because we init the schema here, and commit, this reloads meaning @@ -123,14 +120,15 @@ impl QueryServer { // Reload if anything in the (older) system migrations requires it. write_txn.reload()?; - // This is what tells us if the domain entry existed before or not. + // This is what tells us if the domain entry existed before or not. This + // is now the primary method of migrations and version detection. let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) { Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)), Err(OperationError::NoMatchingEntries) => Ok(0), Err(r) => Err(r), }?; - info!(?db_domain_version, "Before setting internal domain info"); + debug!(?db_domain_version, "Before setting internal domain info"); // No domain info was present, so neither was the rest of the IDM. We need to bootstrap // the base-schema here. @@ -138,9 +136,16 @@ impl QueryServer { write_txn.initialise_schema_idm()?; write_txn.reload()?; + + // Since we just loaded in a ton of schema, lets reindex it to make + // sure that some base IDM operations are fast. Since this is still + // very early in the bootstrap process, and very few entries exist, + // reindexing is very fast here. + write_txn.reindex()?; } - // Indicate the schema is now ready, which allows dyngroups to work. + // Indicate the schema is now ready, which allows dyngroups to work when they + // are created in the next phase of migrations. write_txn.set_phase(ServerPhase::SchemaReady); // Init idm will now set the system config version and minimum domain @@ -153,8 +158,7 @@ impl QueryServer { write_txn.initialise_idm()?; } - // Now force everything to reload, init idm can touch a lot. - write_txn.force_all_reload(); + // Reload as init idm affects access controls. write_txn.reload()?; // Domain info is now ready and reloaded, we can proceed. @@ -167,50 +171,47 @@ impl QueryServer { // The reloads will have populated this structure now. let domain_info_version = write_txn.get_domain_version(); - info!(?db_domain_version, "After setting internal domain info"); + debug!(?db_domain_version, "After setting internal domain info"); - if domain_info_version < DOMAIN_TGT_LEVEL { + if domain_info_version < domain_target_level { write_txn .internal_modify_uuid( UUID_DOMAIN_INFO, &ModifyList::new_purge_and_set( Attribute::Version, - Value::new_uint32(DOMAIN_TGT_LEVEL), + Value::new_uint32(domain_target_level), ), ) .map(|()| { - warn!("Domain level has been raised to {}", DOMAIN_TGT_LEVEL); + warn!("Domain level has been raised to {}", domain_target_level); })?; + } else { + // This forces pre-release versions to re-migrate each start up. This solves + // the domain-version-sprawl issue so that during a development cycle we can + // do a single domain version bump, and continue to extend the migrations + // within that release cycle to contain what we require. + // + // If this is a pre-release build + // AND + // we are NOT in a test environment + // AND + // We did not already need a version migration as above + if option_env!("KANIDM_PRE_RELEASE").is_some() && !cfg!(test) { + write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?; + } } - // Reload if anything in migrations requires it - this triggers the domain migrations. + // Reload if anything in migrations requires it - this triggers the domain migrations + // which in turn can trigger schema reloads etc. write_txn.reload()?; - // reindex and set to version + 1, this way when we bump the version - // we are essetially pushing this version id back up to step write_1 - write_txn - .upgrade_reindex(SYSTEM_INDEX_VERSION + 1) - .and_then(|_| write_txn.reload())?; - - // Force the schema to reload - this is so that any changes to index slope - // analysis that was performed during the reindex are now reflected correctly - // in the in-memory schema cache. - // - // A side effect of these reloads is that other plugins or elements that reload - // on schema change are now setup. - write_txn.force_schema_reload(); - // We are ready to run write_txn.set_phase(ServerPhase::Running); // Commit all changes, this also triggers the reload. write_txn.commit()?; - // Here is where in the future we will need to apply domain version increments. - // The actually migrations are done in a transaction though, this just needs to - // bump the version in it's own transaction. - - admin_debug!("Database version check and migrations success! ☀️ "); + debug!("Database version check and migrations success! ☀️ "); Ok(()) } } @@ -721,7 +722,7 @@ impl<'a> QueryServerWriteTransaction<'a> { #[instrument(level = "info", skip_all)] /// This migration will /// * Trigger a "once off" mfa account policy rule on all persons. - pub fn migrate_domain_2_to_3(&mut self) -> Result<(), OperationError> { + pub(crate) fn migrate_domain_2_to_3(&mut self) -> Result<(), OperationError> { let idm_all_persons = match self.internal_search_uuid(UUID_IDM_ALL_PERSONS) { Ok(entry) => entry, Err(OperationError::NoMatchingEntries) => return Ok(()), @@ -750,7 +751,7 @@ impl<'a> QueryServerWriteTransaction<'a> { #[instrument(level = "info", skip_all)] /// Migrations for Oauth to support multiple origins, and custom claims. - pub fn migrate_domain_3_to_4(&mut self) -> Result<(), OperationError> { + pub(crate) fn migrate_domain_3_to_4(&mut self) -> Result<(), OperationError> { let idm_schema_attrs = [ SCHEMA_ATTR_OAUTH2_RS_CLAIM_MAP_DL4.clone().into(), SCHEMA_ATTR_OAUTH2_ALLOW_LOCALHOST_REDIRECT_DL4 @@ -786,7 +787,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// and to allow oauth2 sessions on resource servers for client credentials /// grants. Accounts, persons and service accounts have some attributes /// relocated to allow oauth2 rs to become accounts. - pub fn migrate_domain_4_to_5(&mut self) -> Result<(), OperationError> { + pub(crate) fn migrate_domain_4_to_5(&mut self) -> Result<(), OperationError> { let idm_schema_classes = [ SCHEMA_CLASS_PERSON_DL5.clone().into(), SCHEMA_CLASS_ACCOUNT_DL5.clone().into(), @@ -860,7 +861,8 @@ impl<'a> QueryServerWriteTransaction<'a> { } /// Migration domain level 5 to 6 - support query limits in account policy. - pub fn migrate_domain_5_to_6(&mut self) -> Result<(), OperationError> { + #[instrument(level = "info", skip_all)] + pub(crate) fn migrate_domain_5_to_6(&mut self) -> Result<(), OperationError> { let idm_schema_classes = [ SCHEMA_ATTR_LIMIT_SEARCH_MAX_RESULTS_DL6.clone().into(), SCHEMA_ATTR_LIMIT_SEARCH_MAX_FILTER_TEST_DL6.clone().into(), @@ -878,11 +880,21 @@ impl<'a> QueryServerWriteTransaction<'a> { self.reload()?; // Update access controls. - self.internal_migrate_or_create(IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone().into()) + let idm_access_controls = [ + IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone().into(), + IDM_ACP_PEOPLE_CREATE_DL6.clone().into(), + IDM_ACP_GROUP_MANAGE_DL6.clone().into(), + ]; + + idm_access_controls + .into_iter() + .try_for_each(|entry| self.internal_migrate_or_create(entry)) .map_err(|err| { error!(?err, "migrate_domain_5_to_6 -> Error"); err - }) + })?; + + Ok(()) } #[instrument(level = "info", skip_all)] @@ -1176,121 +1188,43 @@ mod tests { } } - /* - #[qs_test_no_init] - async fn test_qs_upgrade_entry_attrs(server: &QueryServer) { - let mut server_txn = server.write(duration_from_epoch_now()).await; - assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION).is_ok()); - assert!(server_txn.commit().is_ok()); + #[qs_test(domain_level=DOMAIN_LEVEL_5)] + async fn test_migrations_dl5_dl6(server: &QueryServer) { + // Assert our instance was setup to version 5 + let mut write_txn = server.write(duration_from_epoch_now()).await; - let mut server_txn = server.write(duration_from_epoch_now()).await; - server_txn.initialise_schema_core().unwrap(); - server_txn.initialise_schema_idm().unwrap(); - assert!(server_txn.commit().is_ok()); - - let mut server_txn = server.write(duration_from_epoch_now()).await; - assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION + 1).is_ok()); - assert!(server_txn.commit().is_ok()); - - let mut server_txn = server.write(duration_from_epoch_now()).await; - assert!(server_txn - .internal_migrate_or_create_str(JSON_SYSTEM_INFO_V1) - .is_ok()); - assert!(server_txn - .internal_migrate_or_create_str(JSON_DOMAIN_INFO_V1) - .is_ok()); - assert!(server_txn - .internal_migrate_or_create_str(JSON_SYSTEM_CONFIG_V1) - .is_ok()); - assert!(server_txn.commit().is_ok()); - - let mut server_txn = server.write(duration_from_epoch_now()).await; - // ++ Mod the schema to set name to the old string type - let me_syn = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_or!([ - f_eq(Attribute::AttributeName, Attribute::Name.to_partialvalue()), - f_eq(Attribute::AttributeName, Attribute::DomainName.into()), - ])), - ModifyList::new_purge_and_set( - "syntax", - Value::new_syntaxs("UTF8STRING_INSENSITIVE").unwrap(), - ), - ) - }; - assert!(server_txn.modify(&me_syn).is_ok()); - assert!(server_txn.commit().is_ok()); - - let mut server_txn = server.write(duration_from_epoch_now()).await; - // ++ Mod domain name and name to be the old type. - let me_dn = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq(Attribute::Uuid, PartialValue::Uuid(UUID_DOMAIN_INFO))), - ModifyList::new_list(vec![ - Modify::Purged(Attribute::Name.into()), - Modify::Purged(Attribute::DomainName.into()), - Modify::Present(Attribute::Name.into(), Value::new_iutf8("domain_local")), - Modify::Present( - Attribute::DomainName.into(), - Value::new_iutf8("example.com"), - ), - ]), - ) - }; - assert!(server_txn.modify(&me_dn).is_ok()); - - // Now, both the types are invalid. - - // WARNING! We can't commit here because this triggers domain_reload which will fail - // due to incorrect syntax of the domain name! Run the migration in the same txn! - // Trigger a schema reload. - assert!(server_txn.reload_schema().is_ok()); - - // We can't just re-run the migrate here because name takes it's definition from - // in memory, and we can't re-run the initial memory gen. So we just fix it to match - // what the migrate "would do". - let me_syn = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_or!([ - f_eq(Attribute::AttributeName, Attribute::Name.to_partialvalue()), - f_eq(Attribute::AttributeName, Attribute::DomainName.into()), - ])), - ModifyList::new_purge_and_set( - "syntax", - Value::new_syntaxs("UTF8STRING_INAME").unwrap(), - ), - ) - }; - assert!(server_txn.modify(&me_syn).is_ok()); - - // WARNING! We can't commit here because this triggers domain_reload which will fail - // due to incorrect syntax of the domain name! Run the migration in the same txn! - // Trigger a schema reload. - assert!(server_txn.reload_schema().is_ok()); - - // ++ Run the upgrade for X to Y - assert!(server_txn.migrate_2_to_3().is_ok()); - - assert!(server_txn.commit().is_ok()); - - // Assert that it migrated and worked as expected. - let mut server_txn = server.write(duration_from_epoch_now()).await; - let domain = server_txn + let db_domain_version = write_txn .internal_search_uuid(UUID_DOMAIN_INFO) - .expect("failed"); - // ++ assert all names are iname - assert!( - domain.get_ava_set(Attribute::Name).expect("no name?").syntax() == SyntaxType::Utf8StringIname - ); - // ++ assert all domain/domain_name are iname - assert!( - domain - .get_ava_set(Attribute::DomainName.as_ref()) - .expect("no domain_name?") - .syntax() - == SyntaxType::Utf8StringIname - ); - assert!(server_txn.commit().is_ok()); + .expect("unable to access domain entry") + .get_ava_single_uint32(Attribute::Version) + .expect("Attribute Version not present"); + + assert_eq!(db_domain_version, DOMAIN_LEVEL_5); + + // Entry doesn't exist yet. + let _entry_not_found = write_txn + .internal_search_uuid(UUID_SCHEMA_ATTR_LIMIT_SEARCH_MAX_RESULTS) + .expect_err("unable to newly migrated schema entry"); + + // Set the version to 6. + write_txn + .internal_modify_uuid( + UUID_DOMAIN_INFO, + &ModifyList::new_purge_and_set( + Attribute::Version, + Value::new_uint32(DOMAIN_LEVEL_6), + ), + ) + .expect("Unable to set domain level to version 6"); + + // Re-load - this applies the migrations. + write_txn.reload().expect("Unable to reload transaction"); + + // It now exists as the migrations were run. + let _entry = write_txn + .internal_search_uuid(UUID_SCHEMA_ATTR_LIMIT_SEARCH_MAX_RESULTS) + .expect("unable to newly migrated schema entry"); + + write_txn.commit().expect("Unable to commit"); } - */ } diff --git a/server/lib/src/server/mod.rs b/server/lib/src/server/mod.rs index 53067b381..8a0d1b164 100644 --- a/server/lib/src/server/mod.rs +++ b/server/lib/src/server/mod.rs @@ -1438,7 +1438,7 @@ impl<'a> QueryServerWriteTransaction<'a> { return Err(OperationError::MG0001InvalidReMigrationLevel); }; - debug!( + info!( "Prepare to re-migrate from {} -> {}", level, mut_d_info.d_vers ); @@ -1722,7 +1722,6 @@ impl<'a> QueryServerWriteTransaction<'a> { debug!(domain_previous_version = ?previous_version, domain_target_version = ?domain_info_version); if previous_version <= DOMAIN_LEVEL_2 && domain_info_version >= DOMAIN_LEVEL_3 { - // 2 -> 3 Migration self.migrate_domain_2_to_3()?; } @@ -1734,6 +1733,10 @@ impl<'a> QueryServerWriteTransaction<'a> { self.migrate_domain_4_to_5()?; } + if previous_version <= DOMAIN_LEVEL_5 && domain_info_version >= DOMAIN_LEVEL_6 { + self.migrate_domain_5_to_6()?; + } + Ok(()) } @@ -1815,20 +1818,10 @@ impl<'a> QueryServerWriteTransaction<'a> { self.be_txn.reindex() } - fn force_all_reload(&mut self) { - self.changed_schema = true; - self.changed_acp = true; - self.changed_oauth2 = true; - self.changed_domain = true; - self.changed_sync_agreement = true; - self.changed_system_config = true; - } - fn force_schema_reload(&mut self) { self.changed_schema = true; } - #[instrument(level = "info", skip_all)] pub(crate) fn upgrade_reindex(&mut self, v: i64) -> Result<(), OperationError> { self.be_txn.upgrade_reindex(v) } @@ -1850,7 +1843,8 @@ impl<'a> QueryServerWriteTransaction<'a> { } pub(crate) fn reload(&mut self) -> Result<(), OperationError> { - // First, check if the domain version has changed. + // First, check if the domain version has changed. This can trigger + // changes to schema, access controls and more. if self.changed_domain { self.reload_domain_info_version()?; } @@ -1861,7 +1855,16 @@ impl<'a> QueryServerWriteTransaction<'a> { // Reload the schema from qs. if self.changed_schema { self.reload_schema()?; + + // If the server is in a late phase of start up or is + // operational, then a reindex may be required. After the reindex, the schema + // must also be reloaded so that slope optimisation indexes are loaded correctly. + if *self.phase >= ServerPhase::DomainInfoReady { + self.reindex()?; + self.reload_schema()?; + } } + // Determine if we need to update access control profiles // based on any modifications that have occurred. // IF SCHEMA CHANGED WE MUST ALSO RELOAD!!! IE if schema had an attr removed @@ -1886,6 +1889,13 @@ impl<'a> QueryServerWriteTransaction<'a> { self.reload_domain_info()?; } + // Clear flags + self.changed_domain = false; + self.changed_schema = false; + self.changed_system_config = false; + self.changed_acp = false; + self.changed_sync_agreement = false; + Ok(()) } diff --git a/server/lib/src/testkit.rs b/server/lib/src/testkit.rs index 65eee5442..239009e70 100644 --- a/server/lib/src/testkit.rs +++ b/server/lib/src/testkit.rs @@ -2,8 +2,20 @@ use crate::be::{Backend, BackendConfig}; use crate::prelude::*; use crate::schema::Schema; +pub struct TestConfiguration { + pub domain_level: DomainVersion, +} + +impl Default for TestConfiguration { + fn default() -> Self { + TestConfiguration { + domain_level: DOMAIN_TGT_LEVEL, + } + } +} + #[allow(clippy::expect_used)] -pub async fn setup_test() -> QueryServer { +pub async fn setup_test(config: TestConfiguration) -> QueryServer { sketching::test_init(); // Create an in memory BE @@ -15,13 +27,19 @@ pub async fn setup_test() -> QueryServer { let be = Backend::new(BackendConfig::new_test("main"), idxmeta, false).expect("Failed to init BE"); - // Init is called via the proc macro - QueryServer::new(be, schema_outer, "example.com".to_string(), Duration::ZERO) - .expect("Failed to setup Query Server") + let test_server = QueryServer::new(be, schema_outer, "example.com".to_string(), Duration::ZERO) + .expect("Failed to setup Query Server"); + + test_server + .initialise_helper(duration_from_epoch_now(), config.domain_level) + .await + .expect("init failed!"); + + test_server } #[allow(clippy::expect_used)] -pub async fn setup_pair_test() -> (QueryServer, QueryServer) { +pub async fn setup_pair_test(config: TestConfiguration) -> (QueryServer, QueryServer) { sketching::test_init(); let qs_a = { @@ -54,16 +72,23 @@ pub async fn setup_pair_test() -> (QueryServer, QueryServer) { .expect("Failed to setup Query Server") }; + qs_a.initialise_helper(duration_from_epoch_now(), config.domain_level) + .await + .expect("init failed!"); + + qs_b.initialise_helper(duration_from_epoch_now(), config.domain_level) + .await + .expect("init failed!"); + (qs_a, qs_b) } #[allow(clippy::expect_used)] -pub async fn setup_idm_test() -> (IdmServer, IdmServerDelayed, IdmServerAudit) { - let qs = setup_test().await; +pub async fn setup_idm_test( + config: TestConfiguration, +) -> (IdmServer, IdmServerDelayed, IdmServerAudit) { + let qs = setup_test(config).await; - qs.initialise_helper(duration_from_epoch_now()) - .await - .expect("init failed!"); IdmServer::new(qs, "https://idm.example.com") .await .expect("Failed to setup idms")