diff --git a/server/lib/src/constants/acp.rs b/server/lib/src/constants/acp.rs index be1836345..eae453e92 100644 --- a/server/lib/src/constants/acp.rs +++ b/server/lib/src/constants/acp.rs @@ -470,7 +470,7 @@ lazy_static! { EntryClass::AccessControlModify, EntryClass::AccessControlSearch ], - name: "idm_acp_group_entry_managed_by", + name: "idm_acp_group_entry_managed_by_modify", uuid: UUID_IDM_ACP_GROUP_ENTRY_MANAGED_BY_MODIFY, description: "Builtin IDM Control for allowing entry_managed_by to be set on group entries", receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_ACCESS_CONTROL_ADMINS]), @@ -918,7 +918,7 @@ lazy_static! { EntryClass::AccessControlModify, EntryClass::AccessControlSearch ], - name: "idm_acp_hp_oauth2_manage_priv", + name: "idm_acp_oauth2_manage", uuid: UUID_IDM_ACP_OAUTH2_MANAGE_V1, description: "Builtin IDM Control for managing OAuth2 resource server integrations.", receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_OAUTH2_ADMINS]), @@ -1315,7 +1315,7 @@ lazy_static! { EntryClass::AccessControlProfile, EntryClass::AccessControlModify, ], - name: "idm_people_self_acp_write_mail", + name: "idm_acp_people_self_write_mail", uuid: UUID_IDM_ACP_PEOPLE_SELF_WRITE_MAIL, description: "Builtin IDM Control for self write of mail for people accounts.", receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_PEOPLE_SELF_MAIL_WRITE]), @@ -1570,7 +1570,7 @@ lazy_static! { lazy_static! { pub static ref IDM_ACP_ACCOUNT_SELF_WRITE_V1: BuiltinAcp = BuiltinAcp { - name: "idm_acp_self_account_write", + name: "idm_acp_account_self_write", uuid: UUID_IDM_ACP_ACCOUNT_SELF_WRITE_V1, description: "Builtin IDM Control for self write - required for accounts to update their own session state.", classes: vec![ @@ -1974,7 +1974,7 @@ lazy_static! { EntryClass::AccessControlProfile, EntryClass::AccessControlModify, ], - name: "idm_acp_people_account_policy_manage", + name: "idm_acp_people_manage", uuid: UUID_IDM_ACP_PEOPLE_MANAGE_V1, description: "Builtin IDM Control for management of peoples non sensitive attributes.", receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_PEOPLE_ADMINS]), @@ -2301,7 +2301,7 @@ lazy_static! { EntryClass::AccessControlModify, EntryClass::AccessControlSearch ], - name: "idm_acp_service_account_entry_managed_by", + name: "idm_acp_service_account_entry_managed_by_modify", uuid: UUID_IDM_ACP_SERVICE_ACCOUNT_ENTRY_MANAGED_BY_MODIFY, description: "Builtin IDM Control for allowing entry_managed_by to be set on service account entries", diff --git a/server/lib/src/constants/mod.rs b/server/lib/src/constants/mod.rs index 559b70baa..95e1346d4 100644 --- a/server/lib/src/constants/mod.rs +++ b/server/lib/src/constants/mod.rs @@ -73,6 +73,7 @@ pub const DOMAIN_LEVEL_8: DomainVersion = 8; /// Domain Level introduced with 1.5.0. /// Deprecated as of 1.7.0 pub const DOMAIN_LEVEL_9: DomainVersion = 9; +pub const PATCH_LEVEL_2: u32 = 2; /// Domain Level introduced with 1.6.0. /// Deprecated as of 1.8.0 @@ -89,7 +90,7 @@ pub const DOMAIN_PREVIOUS_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_8; // the NEXT level that users will upgrade too. pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_9; // The current patch level if any out of band fixes are required. -pub const DOMAIN_TGT_PATCH_LEVEL: u32 = PATCH_LEVEL_1; +pub const DOMAIN_TGT_PATCH_LEVEL: u32 = PATCH_LEVEL_2; // The target domain functional level for the SUBSEQUENT release/dev cycle. pub const DOMAIN_TGT_NEXT_LEVEL: DomainVersion = DOMAIN_LEVEL_10; // The maximum supported domain functional level diff --git a/server/lib/src/constants/schema.rs b/server/lib/src/constants/schema.rs index 2ad8b5560..19a9d9907 100644 --- a/server/lib/src/constants/schema.rs +++ b/server/lib/src/constants/schema.rs @@ -1119,6 +1119,8 @@ pub static ref SCHEMA_CLASS_DOMAIN_INFO_DL6: SchemaClass = SchemaClass { Attribute::PrivateCookieKey, Attribute::FernetPrivateKeyStr, Attribute::Es256PrivateKeyDer, + Attribute::PatchLevel, + Attribute::DomainDevelopmentTaint, ], systemmust: vec![ Attribute::Name, diff --git a/server/lib/src/server/migrations.rs b/server/lib/src/server/migrations.rs index 4e4842549..9297751f2 100644 --- a/server/lib/src/server/migrations.rs +++ b/server/lib/src/server/migrations.rs @@ -123,6 +123,9 @@ impl QueryServer { "After setting internal domain info" ); + let mut reload_required = false; + + // If the database domain info is a lower version than our target level, we reload. if domain_info_version < domain_target_level { write_txn .internal_modify_uuid( @@ -138,10 +141,7 @@ impl QueryServer { // Reload if anything in migrations requires it - this triggers the domain migrations // which in turn can trigger schema reloads etc. - write_txn.reload()?; - // Force a reindex here since schema probably changed and we aren't at the - // runtime phase where it will trigger on its own yet. - write_txn.reindex()?; + reload_required = true; } else if domain_development_taint { // This forces pre-release versions to re-migrate each start up. This solves // the domain-version-sprawl issue so that during a development cycle we can @@ -154,12 +154,12 @@ impl QueryServer { // AND // We did not already need a version migration as above write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?; - write_txn.reload()?; - // Force a reindex here since schema probably changed and we aren't at the - // runtime phase where it will trigger on its own yet. - write_txn.reindex()?; + + reload_required = true; } + // If we are new enough to support patches, and we are lower than the target patch level + // then a reload will be applied after we raise the patch level. if domain_target_level >= DOMAIN_LEVEL_7 && domain_patch_level < DOMAIN_TGT_PATCH_LEVEL { write_txn .internal_modify_uuid( @@ -170,13 +170,25 @@ impl QueryServer { ), ) .map(|()| { - warn!("Domain level has been raised to {}", domain_target_level); + warn!( + "Domain patch level has been raised to {}", + domain_patch_level + ); })?; - // Run the patch migrations if any. - write_txn.reload()?; + reload_required = true; }; + // Execute whatever operations we have batched up and ready to go. This is needed + // to preserve ordering of the operations - if we reloaded after a remigrate then + // we would have skipped the patch level fix which needs to have occured *first*. + if reload_required { + write_txn.reload()?; + // We are not yet at the schema phase where reindexes will auto-trigger + // so if one was required, do it now. + write_txn.reindex()?; + } + // Now set the db/domain devel taint flag to match our current release status // if it changes. This is what breaks the cycle of db taint from dev -> stable let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some(); @@ -665,6 +677,81 @@ impl<'a> QueryServerWriteTransaction<'a> { Ok(()) } + /// Patch Application - This triggers a one-shot fixup task for issue #3178 + /// to force access controls to re-migrate in existing databases so that they're + /// content matches expected values. + #[instrument(level = "info", skip_all)] + pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> { + admin_warn!("applying domain patch 2."); + + debug_assert!(*self.phase >= ServerPhase::SchemaReady); + + let idm_data = [ + IDM_ACP_ACCOUNT_MAIL_READ_DL6.clone().into(), + IDM_ACP_ACCOUNT_SELF_WRITE_V1.clone().into(), + IDM_ACP_ACCOUNT_UNIX_EXTEND_V1.clone().into(), + IDM_ACP_ACP_MANAGE_V1.clone().into(), + IDM_ACP_ALL_ACCOUNTS_POSIX_READ_V1.clone().into(), + IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone().into(), + IDM_ACP_APPLICATION_MANAGE_DL8.clone().into(), + IDM_ACP_DOMAIN_ADMIN_DL8.clone().into(), + IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone().into(), + IDM_ACP_GROUP_ENTRY_MANAGED_BY_MODIFY_V1.clone().into(), + IDM_ACP_GROUP_ENTRY_MANAGER_V1.clone().into(), + IDM_ACP_GROUP_MANAGE_DL6.clone().into(), + IDM_ACP_GROUP_READ_V1.clone().into(), + IDM_ACP_GROUP_UNIX_MANAGE_V1.clone().into(), + IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone().into(), + IDM_ACP_HP_GROUP_UNIX_MANAGE_V1.clone().into(), + IDM_ACP_HP_PEOPLE_CREDENTIAL_RESET_V1.clone().into(), + IDM_ACP_HP_SERVICE_ACCOUNT_ENTRY_MANAGED_BY_MODIFY_V1 + .clone() + .into(), + IDM_ACP_MAIL_SERVERS_DL8.clone().into(), + IDM_ACP_OAUTH2_MANAGE_DL9.clone().into(), + IDM_ACP_PEOPLE_CREATE_DL6.clone().into(), + IDM_ACP_PEOPLE_CREDENTIAL_RESET_V1.clone().into(), + IDM_ACP_PEOPLE_DELETE_V1.clone().into(), + IDM_ACP_PEOPLE_MANAGE_V1.clone().into(), + IDM_ACP_PEOPLE_PII_MANAGE_V1.clone().into(), + IDM_ACP_PEOPLE_PII_READ_V1.clone().into(), + IDM_ACP_PEOPLE_READ_V1.clone().into(), + IDM_ACP_PEOPLE_SELF_WRITE_MAIL_V1.clone().into(), + IDM_ACP_RADIUS_SECRET_MANAGE_V1.clone().into(), + IDM_ACP_RADIUS_SERVERS_V1.clone().into(), + IDM_ACP_RECYCLE_BIN_REVIVE_V1.clone().into(), + IDM_ACP_RECYCLE_BIN_SEARCH_V1.clone().into(), + IDM_ACP_SCHEMA_WRITE_ATTRS_V1.clone().into(), + IDM_ACP_SCHEMA_WRITE_CLASSES_V1.clone().into(), + IDM_ACP_SELF_NAME_WRITE_DL7.clone().into(), + IDM_ACP_SELF_READ_DL8.clone().into(), + IDM_ACP_SELF_WRITE_DL8.clone().into(), + IDM_ACP_SERVICE_ACCOUNT_CREATE_V1.clone().into(), + IDM_ACP_SERVICE_ACCOUNT_DELETE_V1.clone().into(), + IDM_ACP_SERVICE_ACCOUNT_ENTRY_MANAGED_BY_MODIFY_V1 + .clone() + .into(), + IDM_ACP_SERVICE_ACCOUNT_ENTRY_MANAGER_V1.clone().into(), + IDM_ACP_SERVICE_ACCOUNT_MANAGE_V1.clone().into(), + IDM_ACP_SYNC_ACCOUNT_MANAGE_V1.clone().into(), + IDM_ACP_SYSTEM_CONFIG_ACCOUNT_POLICY_MANAGE_V1 + .clone() + .into(), + ]; + + idm_data + .into_iter() + .try_for_each(|entry| self.internal_migrate_or_create(entry)) + .map_err(|err| { + error!(?err, "migrate_domain_patch_level_2 -> Error"); + err + })?; + + self.reload()?; + + Ok(()) + } + /// Migration domain level 9 to 10 (1.6.0) #[instrument(level = "info", skip_all)] pub(crate) fn migrate_domain_9_to_10(&mut self) -> Result<(), OperationError> { @@ -797,6 +884,9 @@ impl<'a> QueryServerWriteTransaction<'a> { SCHEMA_ATTR_KEY_ACTION_ROTATE_DL6.clone().into(), SCHEMA_ATTR_KEY_ACTION_REVOKE_DL6.clone().into(), SCHEMA_ATTR_KEY_ACTION_IMPORT_JWS_ES256_DL6.clone().into(), + // DL7 + SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(), + SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(), ]; let r = idm_schema @@ -829,7 +919,6 @@ impl<'a> QueryServerWriteTransaction<'a> { SCHEMA_CLASS_OAUTH2_RS_BASIC_DL5.clone().into(), // DL6 SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(), - SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(), SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(), SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(), SCHEMA_CLASS_GROUP_DL6.clone().into(), @@ -839,6 +928,7 @@ impl<'a> QueryServerWriteTransaction<'a> { SCHEMA_CLASS_KEY_OBJECT_JWT_ES256_DL6.clone().into(), SCHEMA_CLASS_KEY_OBJECT_JWE_A128GCM_DL6.clone().into(), SCHEMA_CLASS_KEY_OBJECT_INTERNAL_DL6.clone().into(), + SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(), ]; let r: Result<(), _> = idm_schema_classes_dl1 diff --git a/server/lib/src/server/mod.rs b/server/lib/src/server/mod.rs index ab9ee77cd..ec6e2287e 100644 --- a/server/lib/src/server/mod.rs +++ b/server/lib/src/server/mod.rs @@ -2075,6 +2075,10 @@ impl<'a> QueryServerWriteTransaction<'a> { self.migrate_domain_8_to_9()?; } + if previous_patch_level < PATCH_LEVEL_2 && domain_info_patch_level >= PATCH_LEVEL_2 { + self.migrate_domain_patch_level_2()?; + } + if previous_version <= DOMAIN_LEVEL_9 && domain_info_version >= DOMAIN_LEVEL_10 { self.migrate_domain_9_to_10()?; }