diff --git a/server/lib/src/constants/groups.rs b/server/lib/src/constants/groups.rs index d90de0744..29b402414 100644 --- a/server/lib/src/constants/groups.rs +++ b/server/lib/src/constants/groups.rs @@ -289,15 +289,6 @@ lazy_static! { ..Default::default() }; - /// Self-write of mail - pub static ref IDM_PEOPLE_SELF_WRITE_MAIL_V1: BuiltinGroup = BuiltinGroup { - name: "idm_people_self_write_mail", - description: "Builtin IDM Group for people accounts to update their own mail.", - uuid: UUID_IDM_PEOPLE_SELF_MAIL_WRITE, - members: Vec::with_capacity(0), - ..Default::default() - }; - /// Self-write of mail pub static ref IDM_PEOPLE_SELF_MAIL_WRITE_DL7: BuiltinGroup = BuiltinGroup { name: "idm_people_self_mail_write", @@ -373,36 +364,7 @@ lazy_static! { }; /// This must be the last group to init to include the UUID of the other high priv groups. - pub static ref IDM_HIGH_PRIVILEGE_V1: BuiltinGroup = BuiltinGroup { - name: "idm_high_privilege", - uuid: UUID_IDM_HIGH_PRIVILEGE, - entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS), - description: "Builtin IDM provided groups with high levels of access that should be audited and limited in modification.", - members: vec![ - UUID_SYSTEM_ADMINS, - UUID_IDM_ADMINS, - UUID_DOMAIN_ADMINS, - UUID_IDM_SERVICE_DESK, - UUID_IDM_RECYCLE_BIN_ADMINS, - UUID_IDM_SCHEMA_ADMINS, - UUID_IDM_ACCESS_CONTROL_ADMINS, - UUID_IDM_OAUTH2_ADMINS, - UUID_IDM_RADIUS_ADMINS, - UUID_IDM_ACCOUNT_POLICY_ADMINS, - UUID_IDM_RADIUS_SERVERS, - UUID_IDM_GROUP_ADMINS, - UUID_IDM_UNIX_ADMINS, - UUID_IDM_PEOPLE_PII_READ, - UUID_IDM_PEOPLE_ADMINS, - UUID_IDM_PEOPLE_ON_BOARDING, - UUID_IDM_SERVICE_ACCOUNT_ADMINS, - UUID_IDM_HIGH_PRIVILEGE, - ], - ..Default::default() - }; - - /// This must be the last group to init to include the UUID of the other high priv groups. - pub static ref IDM_HIGH_PRIVILEGE_DL7: BuiltinGroup = BuiltinGroup { + pub static ref IDM_HIGH_PRIVILEGE_DL8: BuiltinGroup = BuiltinGroup { name: "idm_high_privilege", uuid: UUID_IDM_HIGH_PRIVILEGE, entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS), @@ -426,12 +388,14 @@ lazy_static! { UUID_IDM_PEOPLE_ON_BOARDING, UUID_IDM_SERVICE_ACCOUNT_ADMINS, UUID_IDM_CLIENT_CERTIFICATE_ADMINS, + UUID_IDM_APPLICATION_ADMINS, + UUID_IDM_MAIL_ADMINS, UUID_IDM_HIGH_PRIVILEGE, ], ..Default::default() }; - pub static ref BUILTIN_GROUP_APPLICATION_ADMINS: BuiltinGroup = BuiltinGroup { + pub static ref BUILTIN_GROUP_APPLICATION_ADMINS_DL8: BuiltinGroup = BuiltinGroup { name: "idm_application_admins", uuid: UUID_IDM_APPLICATION_ADMINS, description: "Builtin Application Administration Group.", @@ -458,17 +422,19 @@ pub fn idm_builtin_non_admin_groups() -> Vec<&'static BuiltinGroup> { &BUILTIN_GROUP_PEOPLE_PII_READ, &BUILTIN_GROUP_PEOPLE_ON_BOARDING, &BUILTIN_GROUP_SERVICE_ACCOUNT_ADMINS, - &BUILTIN_GROUP_APPLICATION_ADMINS, &BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8, &IDM_GROUP_ADMINS_V1, &IDM_ALL_PERSONS, &IDM_ALL_ACCOUNTS, &BUILTIN_IDM_RADIUS_SERVERS_V1, &BUILTIN_IDM_MAIL_SERVERS_DL8, - &IDM_PEOPLE_SELF_WRITE_MAIL_V1, + &BUILTIN_GROUP_PEOPLE_SELF_NAME_WRITE_DL7, + &IDM_PEOPLE_SELF_MAIL_WRITE_DL7, + &BUILTIN_GROUP_CLIENT_CERTIFICATE_ADMINS_DL7, + &BUILTIN_GROUP_APPLICATION_ADMINS_DL8, // Write deps on read, so write must be added first. // All members must exist before we write HP - &IDM_HIGH_PRIVILEGE_V1, + &IDM_HIGH_PRIVILEGE_DL8, // other things &IDM_UI_ENABLE_EXPERIMENTAL_FEATURES, &IDM_ACCOUNT_MAIL_READ, diff --git a/server/lib/src/constants/mod.rs b/server/lib/src/constants/mod.rs index 18dba211c..f9ebb387b 100644 --- a/server/lib/src/constants/mod.rs +++ b/server/lib/src/constants/mod.rs @@ -54,14 +54,6 @@ pub type DomainVersion = u32; /// previously. pub const DOMAIN_LEVEL_0: DomainVersion = 0; -/// Deprecated as of 1.3.0 -pub const DOMAIN_LEVEL_5: DomainVersion = 5; - -/// Domain Level introduced with 1.2.0. -/// Deprecated as of 1.4.0 -pub const DOMAIN_LEVEL_6: DomainVersion = 6; -pub const PATCH_LEVEL_1: u32 = 1; - /// Domain Level introduced with 1.3.0. /// Deprecated as of 1.5.0 pub const DOMAIN_LEVEL_7: DomainVersion = 7; @@ -85,7 +77,7 @@ pub const DOMAIN_LEVEL_11: DomainVersion = 11; // The minimum level that we can re-migrate from. // This should be DOMAIN_TGT_LEVEL minus 2 -pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL - 2; +pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_8; // The minimum supported domain functional level (for replication) pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL; // The previous releases domain functional level diff --git a/server/lib/src/constants/schema.rs b/server/lib/src/constants/schema.rs index a5d943e34..552f76d99 100644 --- a/server/lib/src/constants/schema.rs +++ b/server/lib/src/constants/schema.rs @@ -1209,6 +1209,30 @@ pub static ref SCHEMA_CLASS_DOMAIN_INFO_DL9: SchemaClass = SchemaClass { ..Default::default() }; +pub static ref SCHEMA_CLASS_DOMAIN_INFO_DL10: SchemaClass = SchemaClass { + uuid: UUID_SCHEMA_CLASS_DOMAIN_INFO, + name: EntryClass::DomainInfo.into(), + description: "Local domain information and configuration".to_string(), + + systemmay: vec![ + Attribute::DomainSsid, + Attribute::DomainLdapBasedn, + Attribute::LdapAllowUnixPwBind, + Attribute::Image, + Attribute::PatchLevel, + Attribute::DomainDevelopmentTaint, + Attribute::DomainAllowEasterEggs, + Attribute::DomainDisplayName, + ], + systemmust: vec![ + Attribute::Name, + Attribute::DomainUuid, + Attribute::DomainName, + Attribute::Version, + ], + ..Default::default() +}; + pub static ref SCHEMA_CLASS_POSIXGROUP: SchemaClass = SchemaClass { uuid: UUID_SCHEMA_CLASS_POSIXGROUP, name: EntryClass::PosixGroup.into(), diff --git a/server/lib/src/idm/scim.rs b/server/lib/src/idm/scim.rs index f65f8dd16..27313d10a 100644 --- a/server/lib/src/idm/scim.rs +++ b/server/lib/src/idm/scim.rs @@ -5,7 +5,7 @@ use base64::{ Engine as _, }; -use compact_jwt::{Jws, JwsCompact, JwsEs256Signer, JwsSigner}; +use compact_jwt::{Jws, JwsCompact}; use kanidm_proto::internal::{ApiTokenPurpose, ScimSyncToken}; use kanidm_proto::scim_v1::*; use std::collections::{BTreeMap, BTreeSet}; @@ -25,7 +25,6 @@ pub(crate) struct SyncAccount { pub name: String, pub uuid: Uuid, pub sync_tokens: BTreeMap, - pub jws_key: Option, } macro_rules! try_from_entry { @@ -40,15 +39,6 @@ macro_rules! try_from_entry { .map(|s| s.to_string()) .ok_or(OperationError::MissingAttribute(Attribute::Name))?; - let jws_key = $value - .get_ava_single_jws_key_es256(Attribute::JwsEs256PrivateKey) - .cloned() - .map(|jws_key| { - jws_key - .set_sign_option_embed_jwk(true) - .set_sign_option_legacy_kid(true) - }); - let sync_tokens = $value .get_ava_as_apitoken_map(Attribute::SyncTokenSession) .cloned() @@ -60,7 +50,6 @@ macro_rules! try_from_entry { name, uuid, sync_tokens, - jws_key, }) }}; } @@ -123,16 +112,6 @@ impl IdmServerProxyWriteTransaction<'_> { gte: &GenerateScimSyncTokenEvent, ct: Duration, ) -> Result { - // Get the target signing key. - let sync_account = self - .qs_write - .internal_search_uuid(gte.target) - .and_then(|entry| SyncAccount::try_from_entry_rw(&entry)) - .map_err(|e| { - admin_error!(?e, "Failed to search service account"); - e - })?; - let session_id = Uuid::new_v4(); let issued_at = time::OffsetDateTime::UNIX_EPOCH + ct; @@ -185,25 +164,9 @@ impl IdmServerProxyWriteTransaction<'_> { })?; // The modify succeeded and was allowed, now sign the token for return. - if self.qs_write.get_domain_version() < DOMAIN_LEVEL_6 { - sync_account - .jws_key - .as_ref() - .ok_or_else(|| { - admin_error!("Unable to sign sync token, no sync keys available"); - OperationError::CryptographyError - }) - .and_then(|jws_key| { - jws_key.sign(&token).map_err(|err| { - admin_error!(?err, "Unable to sign sync token"); - OperationError::CryptographyError - }) - }) - } else { - self.qs_write - .get_domain_key_object_handle()? - .jws_es256_sign(&token, ct) - } + self.qs_write + .get_domain_key_object_handle()? + .jws_es256_sign(&token, ct) // Done! } diff --git a/server/lib/src/idm/serviceaccount.rs b/server/lib/src/idm/serviceaccount.rs index 19275346f..c33a4e81f 100644 --- a/server/lib/src/idm/serviceaccount.rs +++ b/server/lib/src/idm/serviceaccount.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use std::time::Duration; -use compact_jwt::{Jws, JwsCompact, JwsEs256Signer, JwsSigner}; +use compact_jwt::{Jws, JwsCompact}; use kanidm_proto::internal::ApiToken as ProtoApiToken; use time::OffsetDateTime; @@ -23,15 +23,6 @@ macro_rules! try_from_entry { )); } - let jws_key = $value - .get_ava_single_jws_key_es256(Attribute::JwsEs256PrivateKey) - .cloned() - .map(|jws_key| { - jws_key - .set_sign_option_embed_jwk(true) - .set_sign_option_legacy_kid(true) - }); - let api_tokens = $value .get_ava_as_apitoken_map(Attribute::ApiTokenSession) .cloned() @@ -48,7 +39,6 @@ macro_rules! try_from_entry { valid_from, expire, api_tokens, - jws_key, }) }}; } @@ -60,8 +50,6 @@ pub struct ServiceAccount { pub expire: Option, pub api_tokens: BTreeMap, - - pub jws_key: Option, } impl ServiceAccount { @@ -253,25 +241,9 @@ impl IdmServerProxyWriteTransaction<'_> { err })?; - if self.qs_write.get_domain_version() < DOMAIN_LEVEL_6 { - service_account - .jws_key - .as_ref() - .ok_or_else(|| { - admin_error!("Unable to sign sync token, no sync keys available"); - OperationError::CryptographyError - }) - .and_then(|jws_key| { - jws_key.sign(&token).map_err(|err| { - admin_error!(?err, "Unable to sign sync token"); - OperationError::CryptographyError - }) - }) - } else { - self.qs_write - .get_domain_key_object_handle()? - .jws_es256_sign(&token, ct) - } + self.qs_write + .get_domain_key_object_handle()? + .jws_es256_sign(&token, ct) } pub fn service_account_destroy_api_token( diff --git a/server/lib/src/plugins/domain.rs b/server/lib/src/plugins/domain.rs index 0ef28ab74..c85ce76de 100644 --- a/server/lib/src/plugins/domain.rs +++ b/server/lib/src/plugins/domain.rs @@ -7,8 +7,6 @@ use std::iter::once; use std::sync::Arc; -use compact_jwt::JwsEs256Signer; -use rand::prelude::*; use regex::Regex; use tracing::trace; @@ -61,13 +59,6 @@ impl Plugin for Domain { } } -fn generate_domain_cookie_key() -> Value { - let mut key = [0; 64]; - let mut rng = StdRng::from_entropy(); - rng.fill(&mut key); - Value::new_privatebinary(&key) -} - impl Domain { /// Generates the cookie key for the domain. fn modify_inner( @@ -79,11 +70,14 @@ impl Domain { && e.attribute_equality(Attribute::Uuid, &PVUUID_DOMAIN_INFO) { // Validate the domain ldap basedn syntax. - if let Some(basedn) = e - .get_ava_single_iutf8(Attribute::DomainLdapBasedn) { - + if let Some(basedn) = e.get_ava_single_iutf8(Attribute::DomainLdapBasedn) { if !DOMAIN_LDAP_BASEDN_RE.is_match(basedn) { - error!("Invalid {} '{}'. Must pass regex \"{}\"", Attribute::DomainLdapBasedn,basedn, *DOMAIN_LDAP_BASEDN_RE); + error!( + "Invalid {} '{}'. Must pass regex \"{}\"", + Attribute::DomainLdapBasedn, + basedn, + *DOMAIN_LDAP_BASEDN_RE + ); return Err(OperationError::InvalidState); } } @@ -109,39 +103,26 @@ impl Domain { debug!("plugin_domain: NOT Applying domain version transform"); }; - // create the domain_display_name if it's missing - if !e.attribute_pres(Attribute::DomainDisplayName) { - let domain_display_name = Value::new_utf8(format!("Kanidm {}", qs.get_domain_name())); - security_info!("plugin_domain: setting default domain_display_name to {:?}", domain_display_name); + // create the domain_display_name if it's missing. This was the behaviour in versions + // prior to DL10. Rather than checking the domain version itself, the issue is we + // have to check the min remigration level. This is because during a server setup + // we start from the MIN remigration level and work up, and the domain version == 0. + // + // So effectively we only skip setting this value after we know that we are at DL12 + // since we could never go back to anything lower than 10 at that point. + if DOMAIN_MIN_REMIGRATION_LEVEL < DOMAIN_LEVEL_10 + && !e.attribute_pres(Attribute::DomainDisplayName) + { + let domain_display_name = + Value::new_utf8(format!("Kanidm {}", qs.get_domain_name())); + security_info!( + "plugin_domain: setting default domain_display_name to {:?}", + domain_display_name + ); e.set_ava(&Attribute::DomainDisplayName, once(domain_display_name)); } - if qs.get_domain_version() < DOMAIN_LEVEL_6 && !e.attribute_pres(Attribute::FernetPrivateKeyStr) { - security_info!("regenerating domain token encryption key"); - let k = fernet::Fernet::generate_key(); - let v = Value::new_secret_str(&k); - e.add_ava(Attribute::FernetPrivateKeyStr, v); - } - - if qs.get_domain_version() < DOMAIN_LEVEL_6 && !e.attribute_pres(Attribute::Es256PrivateKeyDer) { - security_info!("regenerating domain es256 private key"); - let der = JwsEs256Signer::generate_es256() - .and_then(|jws| jws.private_key_to_der()) - .map_err(|e| { - admin_error!(err = ?e, "Unable to generate ES256 JwsSigner private key"); - OperationError::CryptographyError - })?; - let v = Value::new_privatebinary(&der); - e.add_ava(Attribute::Es256PrivateKeyDer, v); - } - - if qs.get_domain_version() < DOMAIN_LEVEL_6 && !e.attribute_pres(Attribute::PrivateCookieKey) { - security_info!("regenerating domain cookie key"); - e.add_ava(Attribute::PrivateCookieKey, generate_domain_cookie_key()); - } - - trace!(?e); Ok(()) } else { Ok(()) diff --git a/server/lib/src/plugins/gidnumber.rs b/server/lib/src/plugins/gidnumber.rs index 3d1c5d31d..6f11e9986 100644 --- a/server/lib/src/plugins/gidnumber.rs +++ b/server/lib/src/plugins/gidnumber.rs @@ -62,10 +62,7 @@ pub const GID_UNUSED_D_MAX: u32 = 0x7fff_ffff; pub struct GidNumber {} -fn apply_gidnumber( - e: &mut Entry, - domain_version: DomainVersion, -) -> Result<(), OperationError> { +fn apply_gidnumber(e: &mut Entry) -> Result<(), OperationError> { if (e.attribute_equality(Attribute::Class, &EntryClass::PosixGroup.into()) || e.attribute_equality(Attribute::Class, &EntryClass::PosixAccount.into())) && !e.attribute_pres(Attribute::GidNumber) @@ -89,48 +86,33 @@ fn apply_gidnumber( e.set_ava(&Attribute::GidNumber, once(gid_v)); Ok(()) } else if let Some(gid) = e.get_ava_single_uint32(Attribute::GidNumber) { - if domain_version <= DOMAIN_LEVEL_6 { - if gid < GID_REGULAR_USER_MIN { - error!( - "Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}", - gid, - GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX, - GID_UNUSED_C_MIN, GID_UNUSED_C_MAX, - GID_UNUSED_D_MIN, GID_UNUSED_D_MAX - ); - Err(OperationError::PL0001GidOverlapsSystemRange) - } else { - Ok(()) - } + // If they provided us with a gid number, ensure it's in a safe range. + if (GID_REGULAR_USER_MIN..=GID_REGULAR_USER_MAX).contains(&gid) + || (GID_UNUSED_A_MIN..=GID_UNUSED_A_MAX).contains(&gid) + || (GID_UNUSED_B_MIN..= GID_UNUSED_B_MAX).contains(&gid) + || (GID_UNUSED_C_MIN..=GID_UNUSED_C_MAX).contains(&gid) + // We won't ever generate an id in the nspawn range, but we do secretly allow + // it to be set for compatibility with services like freeipa or openldap. TBH + // most people don't even use systemd nspawn anyway ... + // + // I made this design choice to avoid a tunable that may confuse people to + // its purpose. This way things "just work" for imports and existing systems + // but we do the right thing in the future. + || (GID_NSPAWN_MIN..=GID_NSPAWN_MAX).contains(&gid) + || (GID_UNUSED_D_MIN..=GID_UNUSED_D_MAX).contains(&gid) + { + Ok(()) } else { - // If they provided us with a gid number, ensure it's in a safe range. - if (GID_REGULAR_USER_MIN..=GID_REGULAR_USER_MAX).contains(&gid) - || (GID_UNUSED_A_MIN..=GID_UNUSED_A_MAX).contains(&gid) - || (GID_UNUSED_B_MIN..= GID_UNUSED_B_MAX).contains(&gid) - || (GID_UNUSED_C_MIN..=GID_UNUSED_C_MAX).contains(&gid) - // We won't ever generate an id in the nspawn range, but we do secretly allow - // it to be set for compatibility with services like freeipa or openldap. TBH - // most people don't even use systemd nspawn anyway ... - // - // I made this design choice to avoid a tunable that may confuse people to - // its purpose. This way things "just work" for imports and existing systems - // but we do the right thing in the future. - || (GID_NSPAWN_MIN..=GID_NSPAWN_MAX).contains(&gid) - || (GID_UNUSED_D_MIN..=GID_UNUSED_D_MAX).contains(&gid) - { - Ok(()) - } else { - // Note that here we don't advertise that we allow the nspawn range to be set, even - // though we do allow it. - error!( - "Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}", - gid, - GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX, - GID_UNUSED_C_MIN, GID_UNUSED_C_MAX, - GID_UNUSED_D_MIN, GID_UNUSED_D_MAX - ); - Err(OperationError::PL0001GidOverlapsSystemRange) - } + // Note that here we don't advertise that we allow the nspawn range to be set, even + // though we do allow it. + error!( + "Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}", + gid, + GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX, + GID_UNUSED_C_MIN, GID_UNUSED_C_MAX, + GID_UNUSED_D_MIN, GID_UNUSED_D_MAX + ); + Err(OperationError::PL0001GidOverlapsSystemRange) } } else { Ok(()) @@ -144,37 +126,31 @@ impl Plugin for GidNumber { #[instrument(level = "debug", name = "gidnumber_pre_create_transform", skip_all)] fn pre_create_transform( - qs: &mut QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { - let dv = qs.get_domain_version(); - cand.iter_mut() - .try_for_each(|cand| apply_gidnumber(cand, dv)) + cand.iter_mut().try_for_each(apply_gidnumber) } #[instrument(level = "debug", name = "gidnumber_pre_modify", skip_all)] fn pre_modify( - qs: &mut QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, _pre_cand: &[Arc], cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { - let dv = qs.get_domain_version(); - cand.iter_mut() - .try_for_each(|cand| apply_gidnumber(cand, dv)) + cand.iter_mut().try_for_each(apply_gidnumber) } #[instrument(level = "debug", name = "gidnumber_pre_batch_modify", skip_all)] fn pre_batch_modify( - qs: &mut QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, _pre_cand: &[Arc], cand: &mut Vec>, _me: &BatchModifyEvent, ) -> Result<(), OperationError> { - let dv = qs.get_domain_version(); - cand.iter_mut() - .try_for_each(|cand| apply_gidnumber(cand, dv)) + cand.iter_mut().try_for_each(apply_gidnumber) } } @@ -186,9 +162,7 @@ mod tests { }; use crate::prelude::*; - use kanidm_proto::internal::DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus; - - #[qs_test(domain_level=DOMAIN_LEVEL_7)] + #[qs_test] async fn test_gidnumber_generate(server: &QueryServer) { let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn"); @@ -423,85 +397,4 @@ mod tests { assert!(server_txn.commit().is_ok()); } - - #[qs_test(domain_level=DOMAIN_LEVEL_6)] - async fn test_gidnumber_domain_level_6(server: &QueryServer) { - let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn"); - - // This will be INVALID in DL 7 but it's allowed for DL6 - let user_a_uuid = uuid!("d90fb0cb-6785-4f36-94cb-e364d9c13255"); - { - let op_result = server_txn.internal_create(vec![entry_init!( - (Attribute::Class, EntryClass::Account.to_value()), - (Attribute::Class, EntryClass::PosixAccount.to_value()), - (Attribute::Name, Value::new_iname("testperson_2")), - (Attribute::Uuid, Value::Uuid(user_a_uuid)), - // NOTE HERE: We do GID_UNUSED_A_MIN minus 1 which isn't accepted - // on DL7 - (Attribute::GidNumber, Value::Uint32(GID_UNUSED_A_MIN - 1)), - (Attribute::Description, Value::new_utf8s("testperson")), - (Attribute::DisplayName, Value::new_utf8s("testperson")) - )]); - - assert!(op_result.is_ok()); - - let user_a = server_txn - .internal_search_uuid(user_a_uuid) - .expect("Unable to access user"); - - let user_a_uid = user_a - .get_ava_single_uint32(Attribute::GidNumber) - .expect("gidnumber not present on account"); - - assert_eq!(user_a_uid, GID_UNUSED_A_MIN - 1); - } - - assert!(server_txn.commit().is_ok()); - - // Now, do the DL6 upgrade check - will FAIL because the above user has an invalid ID. - let mut server_txn = server.read().await.unwrap(); - - let check_item = server_txn - .domain_upgrade_check_6_to_7_gidnumber() - .expect("Failed to perform migration check."); - - assert_eq!( - check_item.status, - ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber - ); - - drop(server_txn); - - let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn"); - - // Test rejection of important gid values. - let user_b_uuid = uuid!("33afc396-2434-47e5-b143-05176148b50e"); - // Test that an entry when modified to have posix attributes, if a gidnumber - // is provided then it is respected. - { - let op_result = server_txn.internal_create(vec![entry_init!( - (Attribute::Class, EntryClass::Account.to_value()), - (Attribute::Class, EntryClass::Person.to_value()), - (Attribute::Name, Value::new_iname("testperson_6")), - (Attribute::Uuid, Value::Uuid(user_b_uuid)), - (Attribute::Description, Value::new_utf8s("testperson")), - (Attribute::DisplayName, Value::new_utf8s("testperson")) - )]); - - assert!(op_result.is_ok()); - - for id in [0, 500, GID_REGULAR_USER_MIN - 1] { - let modlist = modlist!([ - m_pres(Attribute::Class, &EntryClass::PosixAccount.to_value()), - m_pres(Attribute::GidNumber, &Value::Uint32(id)) - ]); - let op_result = server_txn.internal_modify_uuid(user_b_uuid, &modlist); - - trace!(?id); - assert_eq!(op_result, Err(OperationError::PL0001GidOverlapsSystemRange)); - } - } - - assert!(server_txn.commit().is_ok()); - } } diff --git a/server/lib/src/plugins/jwskeygen.rs b/server/lib/src/plugins/jwskeygen.rs index 629375fe1..2221eef35 100644 --- a/server/lib/src/plugins/jwskeygen.rs +++ b/server/lib/src/plugins/jwskeygen.rs @@ -45,7 +45,7 @@ impl Plugin for JwsKeygen { impl JwsKeygen { fn modify_inner( - qs: &mut QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut [Entry], ) -> Result<(), OperationError> { cand.iter_mut().try_for_each(|e| { @@ -88,20 +88,6 @@ impl JwsKeygen { } } - if qs.get_domain_version() < DOMAIN_LEVEL_6 && - (e.attribute_equality(Attribute::Class, &EntryClass::ServiceAccount.into()) || - e.attribute_equality(Attribute::Class, &EntryClass::SyncAccount.into())) && - !e.attribute_pres(Attribute::JwsEs256PrivateKey) { - security_info!("regenerating jws es256 private key"); - let jwssigner = JwsEs256Signer::generate_es256() - .map_err(|e| { - admin_error!(err = ?e, "Unable to generate ES256 JwsSigner private key"); - OperationError::CryptographyError - })?; - let v = Value::JwsKeyEs256(jwssigner); - e.add_ava(Attribute::JwsEs256PrivateKey, v); - } - Ok(()) }) } diff --git a/server/lib/src/server/migrations.rs b/server/lib/src/server/migrations.rs index 10bd56d91..1d2232654 100644 --- a/server/lib/src/server/migrations.rs +++ b/server/lib/src/server/migrations.rs @@ -158,7 +158,7 @@ impl QueryServer { // If we are new enough to support patches, and we are lower than the target patch level // then a reload will be applied after we raise the patch level. - if domain_target_level >= DOMAIN_LEVEL_7 && domain_patch_level < DOMAIN_TGT_PATCH_LEVEL { + if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL { write_txn .internal_modify_uuid( UUID_DOMAIN_INFO, @@ -294,346 +294,6 @@ impl QueryServerWriteTransaction<'_> { } } - /// Migration domain level 6 to 7 - #[instrument(level = "info", skip_all)] - pub(crate) fn migrate_domain_6_to_7(&mut self) -> Result<(), OperationError> { - if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_7 { - error!("Unable to raise domain level from 6 to 7."); - return Err(OperationError::MG0004DomainLevelInDevelopment); - } - - // ============== Apply constraints =============== - - // Due to changes in gidnumber allocation, in the *extremely* unlikely - // case that a user's ID was generated outside the valid range, we re-request - // the creation of their gid number to proceed. - let filter = filter!(f_and!([ - f_or!([ - f_eq(Attribute::Class, EntryClass::PosixAccount.into()), - f_eq(Attribute::Class, EntryClass::PosixGroup.into()) - ]), - // This logic gets a bit messy but it would be: - // If ! ( - // (GID_REGULAR_USER_MIN < value < GID_REGULAR_USER_MAX) || - // (GID_UNUSED_A_MIN < value < GID_UNUSED_A_MAX) || - // (GID_UNUSED_B_MIN < value < GID_UNUSED_B_MAX) || - // (GID_UNUSED_C_MIN < value < GID_UNUSED_D_MAX) - // ) - f_andnot(f_or!([ - f_and!([ - // The gid value must be less than GID_REGULAR_USER_MAX - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MAX) - ), - // This bit of mental gymnastics is "greater than". - // The gid value must not be less than USER_MIN - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MIN) - )) - ]), - f_and!([ - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MAX) - ), - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MIN) - )) - ]), - f_and!([ - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MAX) - ), - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MIN) - )) - ]), - // If both of these conditions are true we get: - // C_MIN < value < D_MAX, which the outer and-not inverts. - f_and!([ - // The gid value must be less than GID_UNUSED_D_MAX - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_D_MAX) - ), - // This bit of mental gymnastics is "greater than". - // The gid value must not be less than C_MIN - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_C_MIN) - )) - ]), - ])) - ])); - - let results = self.internal_search(filter).map_err(|err| { - error!(?err, "migrate_domain_6_to_7 -> Error"); - err - })?; - - if !results.is_empty() { - error!("Unable to proceed. Not all entries meet gid/uid constraints."); - for entry in results { - error!(gid_invalid = ?entry.get_display_id()); - } - return Err(OperationError::MG0005GidConstraintsNotMet); - } - - // =========== Apply changes ============== - - // For each oauth2 client, if it is missing a landing page then we clone the origin - // into landing. This is because previously we implied the landing to be origin if - // unset, but now landing is the primary url and implies an origin. - let filter = filter!(f_and!([ - f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()), - f_pres(Attribute::OAuth2RsOrigin), - f_andnot(f_pres(Attribute::OAuth2RsOriginLanding)), - ])); - - let pre_candidates = self.internal_search(filter).map_err(|err| { - error!(?err, "migrate_domain_6_to_7 internal search failure"); - err - })?; - - let modset: Vec<_> = pre_candidates - .into_iter() - .filter_map(|ent| { - ent.get_ava_single_url(Attribute::OAuth2RsOrigin) - .map(|origin_url| { - // Copy the origin url to the landing. - let modlist = vec![Modify::Present( - Attribute::OAuth2RsOriginLanding, - Value::Url(origin_url.clone()), - )]; - - (ent.get_uuid(), ModifyList::new_list(modlist)) - }) - }) - .collect(); - - // If there is nothing, we don't need to do anything. - if !modset.is_empty() { - self.internal_batch_modify(modset.into_iter())?; - } - - // Do this before schema change since domain info has cookie key - // as may at this point. - // - // Domain info should have the attribute private cookie key removed. - let modlist = ModifyList::new_list(vec![ - Modify::Purged(Attribute::PrivateCookieKey), - Modify::Purged(Attribute::Es256PrivateKeyDer), - Modify::Purged(Attribute::FernetPrivateKeyStr), - ]); - - self.internal_modify_uuid(UUID_DOMAIN_INFO, &modlist)?; - - let filter = filter!(f_or!([ - f_eq(Attribute::Class, EntryClass::ServiceAccount.into()), - f_eq(Attribute::Class, EntryClass::SyncAccount.into()) - ])); - - let modlist = ModifyList::new_list(vec![Modify::Purged(Attribute::JwsEs256PrivateKey)]); - - self.internal_modify(&filter, &modlist)?; - - // Now update schema - let idm_schema_classes = [ - SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(), - SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(), - SCHEMA_ATTR_REFERS_DL7.clone().into(), - SCHEMA_ATTR_CERTIFICATE_DL7.clone().into(), - SCHEMA_ATTR_OAUTH2_RS_ORIGIN_DL7.clone().into(), - SCHEMA_ATTR_OAUTH2_STRICT_REDIRECT_URI_DL7.clone().into(), - SCHEMA_ATTR_MAIL_DL7.clone().into(), - SCHEMA_ATTR_LEGALNAME_DL7.clone().into(), - SCHEMA_ATTR_DISPLAYNAME_DL7.clone().into(), - SCHEMA_CLASS_DOMAIN_INFO_DL7.clone().into(), - SCHEMA_CLASS_SERVICE_ACCOUNT_DL7.clone().into(), - SCHEMA_CLASS_SYNC_ACCOUNT_DL7.clone().into(), - SCHEMA_CLASS_CLIENT_CERTIFICATE_DL7.clone().into(), - SCHEMA_CLASS_OAUTH2_RS_DL7.clone().into(), - ]; - - idm_schema_classes - .into_iter() - .try_for_each(|entry| self.internal_migrate_or_create(entry)) - .map_err(|err| { - error!(?err, "migrate_domain_6_to_7 -> Error"); - err - })?; - - self.reload()?; - - // Update access controls - let idm_data = [ - BUILTIN_GROUP_PEOPLE_SELF_NAME_WRITE_DL7 - .clone() - .try_into()?, - IDM_PEOPLE_SELF_MAIL_WRITE_DL7.clone().try_into()?, - BUILTIN_GROUP_CLIENT_CERTIFICATE_ADMINS_DL7 - .clone() - .try_into()?, - IDM_HIGH_PRIVILEGE_DL7.clone().try_into()?, - ]; - - idm_data - .into_iter() - .try_for_each(|entry| { - self.internal_migrate_or_create_ignore_attrs(entry, &[Attribute::Member]) - }) - .map_err(|err| { - error!(?err, "migrate_domain_6_to_7 -> Error"); - err - })?; - - let idm_data = [ - IDM_ACP_SELF_WRITE_DL7.clone().into(), - IDM_ACP_SELF_NAME_WRITE_DL7.clone().into(), - IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone().into(), - IDM_ACP_OAUTH2_MANAGE_DL7.clone().into(), - ]; - - idm_data - .into_iter() - .try_for_each(|entry| self.internal_migrate_or_create(entry)) - .map_err(|err| { - error!(?err, "migrate_domain_6_to_7 -> Error"); - err - })?; - - Ok(()) - } - - /// Patch Application - This triggers a one-shot fixup task for issue #2756 - /// to correct the content of dyngroups after the dyngroups are now loaded. - #[instrument(level = "info", skip_all)] - pub(crate) fn migrate_domain_patch_level_1(&mut self) -> Result<(), OperationError> { - admin_warn!("applying domain patch 1."); - - debug_assert!(*self.phase >= ServerPhase::SchemaReady); - - let filter = filter!(f_eq(Attribute::Class, EntryClass::DynGroup.into())); - let modlist = modlist!([m_pres(Attribute::Class, &EntryClass::DynGroup.into())]); - - self.internal_modify(&filter, &modlist).map(|()| { - info!("forced dyngroups to re-calculate memberships"); - }) - } - - /// Migration domain level 7 to 8 - #[instrument(level = "info", skip_all)] - pub(crate) fn migrate_domain_7_to_8(&mut self) -> Result<(), OperationError> { - if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_8 { - error!("Unable to raise domain level from 7 to 8."); - return Err(OperationError::MG0004DomainLevelInDevelopment); - } - - // ============== Apply constraints =============== - let filter = filter!(f_and!([ - f_eq(Attribute::Class, EntryClass::Account.into()), - f_pres(Attribute::PrimaryCredential), - ])); - - let results = self.internal_search(filter)?; - - let affected_entries = results - .into_iter() - .filter_map(|entry| { - if entry - .get_ava_single_credential(Attribute::PrimaryCredential) - .map(|cred| cred.has_securitykey()) - .unwrap_or_default() - { - Some(entry.get_display_id()) - } else { - None - } - }) - .collect::>(); - - if !affected_entries.is_empty() { - error!("Unable to proceed. Some accounts still use legacy security keys, which need to be removed."); - for sk_present in affected_entries { - error!(%sk_present); - } - return Err(OperationError::MG0006SKConstraintsNotMet); - } - - // Check oauth2 strict uri - let filter = filter!(f_and!([ - f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()), - f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)), - ])); - - let results = self.internal_search(filter)?; - - let affected_entries = results - .into_iter() - .map(|entry| entry.get_display_id()) - .collect::>(); - - if !affected_entries.is_empty() { - error!("Unable to proceed. Not all oauth2 clients have strict redirect verification enabled."); - for missing_oauth2_strict_redirect_uri in affected_entries { - error!(%missing_oauth2_strict_redirect_uri); - } - return Err(OperationError::MG0007Oauth2StrictConstraintsNotMet); - } - - // =========== Apply changes ============== - - let idm_schema_classes = [ - SCHEMA_ATTR_LINKED_GROUP_DL8.clone().into(), - SCHEMA_ATTR_APPLICATION_PASSWORD_DL8.clone().into(), - SCHEMA_CLASS_APPLICATION_DL8.clone().into(), - SCHEMA_CLASS_PERSON_DL8.clone().into(), - SCHEMA_CLASS_DOMAIN_INFO_DL8.clone().into(), - SCHEMA_ATTR_ALLOW_PRIMARY_CRED_FALLBACK_DL8.clone().into(), - SCHEMA_CLASS_ACCOUNT_POLICY_DL8.clone().into(), - ]; - - idm_schema_classes - .into_iter() - .try_for_each(|entry| self.internal_migrate_or_create(entry)) - .map_err(|err| { - error!(?err, "migrate_domain_6_to_7 -> Error"); - err - })?; - - self.reload()?; - - // Update access controls. - let idm_data = [ - BUILTIN_GROUP_APPLICATION_ADMINS.clone().try_into()?, - IDM_ACP_SELF_READ_DL8.clone().into(), - IDM_ACP_SELF_WRITE_DL8.clone().into(), - IDM_ACP_APPLICATION_MANAGE_DL8.clone().into(), - IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone().into(), - // Add the new types for mail server - BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8.clone().try_into()?, - BUILTIN_IDM_MAIL_SERVERS_DL8.clone().try_into()?, - IDM_ACP_MAIL_SERVERS_DL8.clone().into(), - IDM_ACP_DOMAIN_ADMIN_DL8.clone().into(), - IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone().into(), - ]; - - idm_data - .into_iter() - .try_for_each(|entry| self.internal_migrate_or_create(entry)) - .map_err(|err| { - error!(?err, "migrate_domain_7_to_8 -> Error"); - err - })?; - - Ok(()) - } - /// Migration domain level 8 to 9 (1.5.0) #[instrument(level = "info", skip_all)] pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> { @@ -764,6 +424,21 @@ impl QueryServerWriteTransaction<'_> { return Err(OperationError::MG0004DomainLevelInDevelopment); } + // =========== Apply changes ============== + + // Now update schema + let idm_schema_changes = [SCHEMA_CLASS_DOMAIN_INFO_DL10.clone().into()]; + + idm_schema_changes + .into_iter() + .try_for_each(|entry| self.internal_migrate_or_create(entry)) + .map_err(|err| { + error!(?err, "migrate_domain_9_to_10 -> Error"); + err + })?; + + self.reload()?; + Ok(()) } @@ -828,7 +503,7 @@ impl QueryServerWriteTransaction<'_> { // // DO NOT MODIFY THIS DEFINITION let idm_schema: Vec = vec![ - SCHEMA_ATTR_MAIL.clone().into(), + // SCHEMA_ATTR_MAIL.clone().into(), SCHEMA_ATTR_ACCOUNT_EXPIRE.clone().into(), SCHEMA_ATTR_ACCOUNT_VALID_FROM.clone().into(), SCHEMA_ATTR_API_TOKEN_SESSION.clone().into(), @@ -838,7 +513,7 @@ impl QueryServerWriteTransaction<'_> { SCHEMA_ATTR_BADLIST_PASSWORD.clone().into(), SCHEMA_ATTR_CREDENTIAL_UPDATE_INTENT_TOKEN.clone().into(), SCHEMA_ATTR_ATTESTED_PASSKEYS.clone().into(), - SCHEMA_ATTR_DISPLAYNAME.clone().into(), + // SCHEMA_ATTR_DISPLAYNAME.clone().into(), SCHEMA_ATTR_DOMAIN_DISPLAY_NAME.clone().into(), SCHEMA_ATTR_DOMAIN_LDAP_BASEDN.clone().into(), SCHEMA_ATTR_DOMAIN_NAME.clone().into(), @@ -853,7 +528,7 @@ impl QueryServerWriteTransaction<'_> { SCHEMA_ATTR_GIDNUMBER.clone().into(), SCHEMA_ATTR_GRANT_UI_HINT.clone().into(), SCHEMA_ATTR_JWS_ES256_PRIVATE_KEY.clone().into(), - SCHEMA_ATTR_LEGALNAME.clone().into(), + // SCHEMA_ATTR_LEGALNAME.clone().into(), SCHEMA_ATTR_LOGINSHELL.clone().into(), SCHEMA_ATTR_NAME_HISTORY.clone().into(), SCHEMA_ATTR_NSUNIQUEID.clone().into(), @@ -867,7 +542,7 @@ impl QueryServerWriteTransaction<'_> { SCHEMA_ATTR_OAUTH2_RS_IMPLICIT_SCOPES.clone().into(), SCHEMA_ATTR_OAUTH2_RS_NAME.clone().into(), SCHEMA_ATTR_OAUTH2_RS_ORIGIN_LANDING.clone().into(), - SCHEMA_ATTR_OAUTH2_RS_ORIGIN.clone().into(), + // SCHEMA_ATTR_OAUTH2_RS_ORIGIN.clone().into(), SCHEMA_ATTR_OAUTH2_RS_SCOPE_MAP.clone().into(), SCHEMA_ATTR_OAUTH2_RS_SUP_SCOPE_MAP.clone().into(), SCHEMA_ATTR_OAUTH2_RS_TOKEN_KEY.clone().into(), @@ -902,6 +577,17 @@ impl QueryServerWriteTransaction<'_> { // DL7 SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(), SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(), + SCHEMA_ATTR_REFERS_DL7.clone().into(), + SCHEMA_ATTR_CERTIFICATE_DL7.clone().into(), + SCHEMA_ATTR_OAUTH2_RS_ORIGIN_DL7.clone().into(), + SCHEMA_ATTR_OAUTH2_STRICT_REDIRECT_URI_DL7.clone().into(), + SCHEMA_ATTR_MAIL_DL7.clone().into(), + SCHEMA_ATTR_LEGALNAME_DL7.clone().into(), + SCHEMA_ATTR_DISPLAYNAME_DL7.clone().into(), + // DL8 + SCHEMA_ATTR_LINKED_GROUP_DL8.clone().into(), + SCHEMA_ATTR_APPLICATION_PASSWORD_DL8.clone().into(), + SCHEMA_ATTR_ALLOW_PRIMARY_CRED_FALLBACK_DL8.clone().into(), ]; let r = idm_schema @@ -928,14 +614,14 @@ impl QueryServerWriteTransaction<'_> { // DL4 SCHEMA_CLASS_OAUTH2_RS_PUBLIC_DL4.clone().into(), // DL5 - SCHEMA_CLASS_PERSON_DL5.clone().into(), + // SCHEMA_CLASS_PERSON_DL5.clone().into(), SCHEMA_CLASS_ACCOUNT_DL5.clone().into(), - SCHEMA_CLASS_OAUTH2_RS_DL5.clone().into(), + // SCHEMA_CLASS_OAUTH2_RS_DL5.clone().into(), SCHEMA_CLASS_OAUTH2_RS_BASIC_DL5.clone().into(), // DL6 - SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(), - SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(), - SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(), + // SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(), + // SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(), + // SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(), SCHEMA_CLASS_GROUP_DL6.clone().into(), SCHEMA_CLASS_KEY_PROVIDER_DL6.clone().into(), SCHEMA_CLASS_KEY_PROVIDER_INTERNAL_DL6.clone().into(), @@ -943,7 +629,18 @@ impl QueryServerWriteTransaction<'_> { SCHEMA_CLASS_KEY_OBJECT_JWT_ES256_DL6.clone().into(), SCHEMA_CLASS_KEY_OBJECT_JWE_A128GCM_DL6.clone().into(), SCHEMA_CLASS_KEY_OBJECT_INTERNAL_DL6.clone().into(), - SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(), + // SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(), + // DL7 + // SCHEMA_CLASS_DOMAIN_INFO_DL7.clone().into(), + SCHEMA_CLASS_SERVICE_ACCOUNT_DL7.clone().into(), + SCHEMA_CLASS_SYNC_ACCOUNT_DL7.clone().into(), + SCHEMA_CLASS_CLIENT_CERTIFICATE_DL7.clone().into(), + SCHEMA_CLASS_OAUTH2_RS_DL7.clone().into(), + // DL8 + SCHEMA_CLASS_ACCOUNT_POLICY_DL8.clone().into(), + SCHEMA_CLASS_APPLICATION_DL8.clone().into(), + SCHEMA_CLASS_PERSON_DL8.clone().into(), + SCHEMA_CLASS_DOMAIN_INFO_DL8.clone().into(), ]; let r: Result<(), _> = idm_schema_classes_dl1 @@ -1034,10 +731,10 @@ impl QueryServerWriteTransaction<'_> { IDM_ACP_RADIUS_SERVERS_V1.clone(), IDM_ACP_RADIUS_SECRET_MANAGE_V1.clone(), IDM_ACP_PEOPLE_SELF_WRITE_MAIL_V1.clone(), - IDM_ACP_SELF_READ_V1.clone(), - IDM_ACP_SELF_WRITE_V1.clone(), + // IDM_ACP_SELF_READ_V1.clone(), + // IDM_ACP_SELF_WRITE_V1.clone(), IDM_ACP_ACCOUNT_SELF_WRITE_V1.clone(), - IDM_ACP_SELF_NAME_WRITE_V1.clone(), + // IDM_ACP_SELF_NAME_WRITE_V1.clone(), IDM_ACP_ALL_ACCOUNTS_POSIX_READ_V1.clone(), IDM_ACP_SYSTEM_CONFIG_ACCOUNT_POLICY_MANAGE_V1.clone(), IDM_ACP_GROUP_UNIX_MANAGE_V1.clone(), @@ -1059,13 +756,26 @@ impl QueryServerWriteTransaction<'_> { IDM_ACP_SERVICE_ACCOUNT_MANAGE_V1.clone(), // DL4 // DL5 - IDM_ACP_OAUTH2_MANAGE_DL5.clone(), + // IDM_ACP_OAUTH2_MANAGE_DL5.clone(), // DL6 - IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone(), + // IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone(), IDM_ACP_PEOPLE_CREATE_DL6.clone(), IDM_ACP_GROUP_MANAGE_DL6.clone(), IDM_ACP_ACCOUNT_MAIL_READ_DL6.clone(), - IDM_ACP_DOMAIN_ADMIN_DL6.clone(), + // IDM_ACP_DOMAIN_ADMIN_DL6.clone(), + // DL7 + // IDM_ACP_SELF_WRITE_DL7.clone(), + IDM_ACP_SELF_NAME_WRITE_DL7.clone(), + IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone(), + IDM_ACP_OAUTH2_MANAGE_DL7.clone(), + // DL8 + IDM_ACP_SELF_READ_DL8.clone(), + IDM_ACP_SELF_WRITE_DL8.clone(), + IDM_ACP_APPLICATION_MANAGE_DL8.clone(), + IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone(), + IDM_ACP_MAIL_SERVERS_DL8.clone(), + IDM_ACP_DOMAIN_ADMIN_DL8.clone(), + IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone(), ]; let res: Result<(), _> = idm_entries @@ -1095,19 +805,6 @@ impl QueryServerReadTransaction<'_> { let mut report_items = Vec::with_capacity(1); - if current_level <= DOMAIN_LEVEL_6 && upgrade_level >= DOMAIN_LEVEL_7 { - let item = self - .domain_upgrade_check_6_to_7_gidnumber() - .map_err(|err| { - error!( - ?err, - "Failed to perform domain upgrade check 6 to 7 - gidnumber" - ); - err - })?; - report_items.push(item); - } - if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 { let item = self .domain_upgrade_check_7_to_8_security_keys() @@ -1141,94 +838,6 @@ impl QueryServerReadTransaction<'_> { }) } - pub(crate) fn domain_upgrade_check_6_to_7_gidnumber( - &mut self, - ) -> Result { - let filter = filter!(f_and!([ - f_or!([ - f_eq(Attribute::Class, EntryClass::PosixAccount.into()), - f_eq(Attribute::Class, EntryClass::PosixGroup.into()) - ]), - // This logic gets a bit messy but it would be: - // If ! ( - // (GID_REGULAR_USER_MIN < value < GID_REGULAR_USER_MAX) || - // (GID_UNUSED_A_MIN < value < GID_UNUSED_A_MAX) || - // (GID_UNUSED_B_MIN < value < GID_UNUSED_B_MAX) || - // (GID_UNUSED_C_MIN < value < GID_UNUSED_D_MAX) - // ) - f_andnot(f_or!([ - f_and!([ - // The gid value must be less than GID_REGULAR_USER_MAX - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MAX) - ), - // This bit of mental gymnastics is "greater than". - // The gid value must not be less than USER_MIN - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MIN) - )) - ]), - f_and!([ - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MAX) - ), - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MIN) - )) - ]), - f_and!([ - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MAX) - ), - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MIN) - )) - ]), - // If both of these conditions are true we get: - // C_MIN < value < D_MAX, which the outer and-not inverts. - f_and!([ - // The gid value must be less than GID_UNUSED_D_MAX - f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_D_MAX) - ), - // This bit of mental gymnastics is "greater than". - // The gid value must not be less than C_MIN - f_andnot(f_lt( - Attribute::GidNumber, - PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_C_MIN) - )) - ]), - ])) - ])); - - let results = self.internal_search(filter)?; - - let affected_entries = results - .into_iter() - .map(|entry| entry.get_display_id()) - .collect::>(); - - let status = if affected_entries.is_empty() { - ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber - } else { - ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber - }; - - Ok(ProtoDomainUpgradeCheckItem { - status, - from_level: DOMAIN_LEVEL_6, - to_level: DOMAIN_LEVEL_7, - affected_entries, - }) - } - pub(crate) fn domain_upgrade_check_7_to_8_security_keys( &mut self, ) -> Result { @@ -1300,7 +909,7 @@ impl QueryServerReadTransaction<'_> { #[cfg(test)] mod tests { - use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus}; + // use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus}; use crate::prelude::*; #[qs_test] @@ -1329,9 +938,8 @@ mod tests { } } - #[qs_test(domain_level=DOMAIN_LEVEL_6)] - async fn test_migrations_dl6_dl7(server: &QueryServer) { - // Assert our instance was setup to version 6 + #[qs_test(domain_level=DOMAIN_LEVEL_8)] + async fn test_migrations_dl8_dl9(server: &QueryServer) { let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); let db_domain_version = write_txn @@ -1340,167 +948,95 @@ mod tests { .get_ava_single_uint32(Attribute::Version) .expect("Attribute Version not present"); - assert_eq!(db_domain_version, DOMAIN_LEVEL_6); - - // Create an oauth2 client that doesn't have a landing url set. - let oauth2_client_uuid = Uuid::new_v4(); - - let ea: Entry = entry_init!( - (Attribute::Class, EntryClass::Object.to_value()), - (Attribute::Class, EntryClass::Account.to_value()), - (Attribute::Uuid, Value::Uuid(oauth2_client_uuid)), - ( - Attribute::Class, - EntryClass::OAuth2ResourceServer.to_value() - ), - ( - Attribute::Class, - EntryClass::OAuth2ResourceServerPublic.to_value() - ), - (Attribute::Name, Value::new_iname("test_resource_server")), - ( - Attribute::DisplayName, - Value::new_utf8s("test_resource_server") - ), - ( - Attribute::OAuth2RsOrigin, - Value::new_url_s("https://demo.example.com").unwrap() - ) - ); - - write_txn - .internal_create(vec![ea]) - .expect("Unable to create oauth2 client"); - - // Set the version to 7. - write_txn - .internal_apply_domain_migration(DOMAIN_LEVEL_7) - .expect("Unable to set domain level to version 7"); - - // post migration verification. - let domain_entry = write_txn - .internal_search_uuid(UUID_DOMAIN_INFO) - .expect("Unable to access domain entry"); - - assert!(!domain_entry.attribute_pres(Attribute::PrivateCookieKey)); - - let oauth2_entry = write_txn - .internal_search_uuid(oauth2_client_uuid) - .expect("Unable to access oauth2 client entry"); - - let origin = oauth2_entry - .get_ava_single_url(Attribute::OAuth2RsOrigin) - .expect("Unable to access oauth2 client origin"); - - // The origin should have been cloned to the landing. - let landing = oauth2_entry - .get_ava_single_url(Attribute::OAuth2RsOriginLanding) - .expect("Unable to access oauth2 client landing"); - - assert_eq!(origin, landing); - - write_txn.commit().expect("Unable to commit"); - } - - #[qs_test(domain_level=DOMAIN_LEVEL_7)] - async fn test_migrations_dl7_dl8(server: &QueryServer) { - // Assert our instance was setup to version 7 - let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); - - let db_domain_version = write_txn - .internal_search_uuid(UUID_DOMAIN_INFO) - .expect("unable to access domain entry") - .get_ava_single_uint32(Attribute::Version) - .expect("Attribute Version not present"); - - assert_eq!(db_domain_version, DOMAIN_LEVEL_7); - - // Create an oauth2 client that doesn't have a landing url set. - let oauth2_client_uuid = Uuid::new_v4(); - - let ea: Entry = entry_init!( - (Attribute::Class, EntryClass::Object.to_value()), - (Attribute::Class, EntryClass::Account.to_value()), - (Attribute::Uuid, Value::Uuid(oauth2_client_uuid)), - ( - Attribute::Class, - EntryClass::OAuth2ResourceServer.to_value() - ), - ( - Attribute::Class, - EntryClass::OAuth2ResourceServerPublic.to_value() - ), - (Attribute::Name, Value::new_iname("test_resource_server")), - ( - Attribute::DisplayName, - Value::new_utf8s("test_resource_server") - ), - ( - Attribute::OAuth2RsOriginLanding, - Value::new_url_s("https://demo.example.com/oauth2").unwrap() - ), - ( - Attribute::OAuth2RsOrigin, - Value::new_url_s("https://demo.example.com").unwrap() - ) - ); - - write_txn - .internal_create(vec![ea]) - .expect("Unable to create oauth2 client"); + assert_eq!(db_domain_version, DOMAIN_LEVEL_8); write_txn.commit().expect("Unable to commit"); - // pre migration verification. + // == pre migration verification. == // check we currently would fail a migration. - let mut read_txn = server.read().await.unwrap(); - - match read_txn.domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri() { - Ok(ProtoDomainUpgradeCheckItem { - status: ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri, - .. - }) => { - trace!("Failed as expected, very good."); - } - other => { - error!(?other); - unreachable!(); - } - }; - - drop(read_txn); - - // Okay, fix the problem. + // let mut read_txn = server.read().await.unwrap(); + // drop(read_txn); let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); - write_txn - .internal_modify_uuid( - oauth2_client_uuid, - &ModifyList::new_purge_and_set( - Attribute::OAuth2StrictRedirectUri, - Value::Bool(true), - ), - ) - .expect("Unable to enforce strict mode."); + // Fix any issues - // Set the version to 8. + // == Increase the version == write_txn - .internal_apply_domain_migration(DOMAIN_LEVEL_8) - .expect("Unable to set domain level to version 8"); + .internal_apply_domain_migration(DOMAIN_LEVEL_9) + .expect("Unable to set domain level to version 9"); // post migration verification. write_txn.commit().expect("Unable to commit"); } - #[qs_test(domain_level=DOMAIN_LEVEL_8)] - async fn test_migrations_dl8_dl9(_server: &QueryServer) {} - #[qs_test(domain_level=DOMAIN_LEVEL_9)] - async fn test_migrations_dl9_dl10(_server: &QueryServer) {} + async fn test_migrations_dl9_dl10(server: &QueryServer) { + let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); + + let db_domain_version = write_txn + .internal_search_uuid(UUID_DOMAIN_INFO) + .expect("unable to access domain entry") + .get_ava_single_uint32(Attribute::Version) + .expect("Attribute Version not present"); + + assert_eq!(db_domain_version, DOMAIN_LEVEL_9); + + write_txn.commit().expect("Unable to commit"); + + // == pre migration verification. == + // check we currently would fail a migration. + + // let mut read_txn = server.read().await.unwrap(); + // drop(read_txn); + + let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); + + // Fix any issues + + // == Increase the version == + write_txn + .internal_apply_domain_migration(DOMAIN_LEVEL_10) + .expect("Unable to set domain level to version 10"); + + // post migration verification. + + write_txn.commit().expect("Unable to commit"); + } #[qs_test(domain_level=DOMAIN_LEVEL_10)] - async fn test_migrations_dl10_dl11(_server: &QueryServer) {} + async fn test_migrations_dl10_dl11(server: &QueryServer) { + let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); + + let db_domain_version = write_txn + .internal_search_uuid(UUID_DOMAIN_INFO) + .expect("unable to access domain entry") + .get_ava_single_uint32(Attribute::Version) + .expect("Attribute Version not present"); + + assert_eq!(db_domain_version, DOMAIN_LEVEL_10); + + write_txn.commit().expect("Unable to commit"); + + // == pre migration verification. == + // check we currently would fail a migration. + + // let mut read_txn = server.read().await.unwrap(); + // drop(read_txn); + + let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap(); + + // Fix any issues + + // == Increase the version == + write_txn + .internal_apply_domain_migration(DOMAIN_LEVEL_11) + .expect("Unable to set domain level to version 11"); + + // post migration verification. + + write_txn.commit().expect("Unable to commit"); + } } diff --git a/server/lib/src/server/mod.rs b/server/lib/src/server/mod.rs index 51ad616d1..cdafef5b1 100644 --- a/server/lib/src/server/mod.rs +++ b/server/lib/src/server/mod.rs @@ -1238,13 +1238,6 @@ pub trait QueryServerTransaction<'a> { } fn get_domain_key_object_handle(&self) -> Result, OperationError> { - #[cfg(test)] - if self.get_domain_version() < DOMAIN_LEVEL_6 { - // We must be in tests, and this is a DL5 to 6 test. For this we'll just make - // an ephemeral provider. - return Ok(crate::server::keys::KeyObjectInternal::new_test()); - }; - self.get_key_providers() .get_key_object_handle(UUID_DOMAIN_INFO) .ok_or(OperationError::KP0031KeyObjectNotFound) @@ -2335,7 +2328,7 @@ impl<'a> QueryServerWriteTransaction<'a> { debug!(domain_previous_patch_level = ?previous_patch_level, domain_target_patch_level = ?domain_info_patch_level); // We have to check for DL0 since that's the initialisation level. - if previous_version <= DOMAIN_LEVEL_5 && previous_version != DOMAIN_LEVEL_0 { + if previous_version < DOMAIN_MIN_REMIGRATION_LEVEL && previous_version != DOMAIN_LEVEL_0 { error!("UNABLE TO PROCEED. You are attempting a Skip update which is NOT SUPPORTED. You must upgrade one-version of Kanidm at a time."); error!("For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html"); error!(domain_previous_version = ?previous_version, domain_target_version = ?domain_info_version); @@ -2343,21 +2336,8 @@ impl<'a> QueryServerWriteTransaction<'a> { return Err(OperationError::MG0008SkipUpgradeAttempted); } - if previous_version <= DOMAIN_LEVEL_6 && domain_info_version >= DOMAIN_LEVEL_7 { - self.migrate_domain_6_to_7()?; - } - - // Similar to the older system info migration handler, these allow "one shot" fixes - // to be issued and run by bumping the patch level. - if previous_patch_level < PATCH_LEVEL_1 && domain_info_patch_level >= PATCH_LEVEL_1 { - self.migrate_domain_patch_level_1()?; - } - - if previous_version <= DOMAIN_LEVEL_7 && domain_info_version >= DOMAIN_LEVEL_8 { - self.migrate_domain_7_to_8()?; - } - if previous_version <= DOMAIN_LEVEL_8 && domain_info_version >= DOMAIN_LEVEL_9 { + // 1.4 -> 1.5 self.migrate_domain_8_to_9()?; } @@ -2366,10 +2346,12 @@ impl<'a> QueryServerWriteTransaction<'a> { } if previous_version <= DOMAIN_LEVEL_9 && domain_info_version >= DOMAIN_LEVEL_10 { + // 1.5 -> 1.6 self.migrate_domain_9_to_10()?; } if previous_version <= DOMAIN_LEVEL_10 && domain_info_version >= DOMAIN_LEVEL_11 { + // 1.6 -> 1.7 self.migrate_domain_10_to_11()?; } @@ -2394,7 +2376,7 @@ impl<'a> QueryServerWriteTransaction<'a> { let display_name = domain_entry .get_ava_single_utf8(Attribute::DomainDisplayName) .map(str::to_string) - .ok_or(OperationError::InvalidEntryState)?; + .unwrap_or_else(|| format!("Kanidm {}", domain_name)); let domain_ldap_allow_unix_pw_bind = domain_entry .get_ava_single_bool(Attribute::LdapAllowUnixPwBind)