mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 20:47:01 +01:00
Re-migrate all acps to force updating (#3184)
* Re-migrate all acps to force updating * Update server/lib/src/server/migrations.rs --------- Co-authored-by: James Hodgkinson <james@terminaloutcomes.com>
This commit is contained in:
parent
c3e42ba257
commit
4f55b1cc33
|
@ -470,7 +470,7 @@ lazy_static! {
|
|||
EntryClass::AccessControlModify,
|
||||
EntryClass::AccessControlSearch
|
||||
],
|
||||
name: "idm_acp_group_entry_managed_by",
|
||||
name: "idm_acp_group_entry_managed_by_modify",
|
||||
uuid: UUID_IDM_ACP_GROUP_ENTRY_MANAGED_BY_MODIFY,
|
||||
description: "Builtin IDM Control for allowing entry_managed_by to be set on group entries",
|
||||
receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_ACCESS_CONTROL_ADMINS]),
|
||||
|
@ -918,7 +918,7 @@ lazy_static! {
|
|||
EntryClass::AccessControlModify,
|
||||
EntryClass::AccessControlSearch
|
||||
],
|
||||
name: "idm_acp_hp_oauth2_manage_priv",
|
||||
name: "idm_acp_oauth2_manage",
|
||||
uuid: UUID_IDM_ACP_OAUTH2_MANAGE_V1,
|
||||
description: "Builtin IDM Control for managing OAuth2 resource server integrations.",
|
||||
receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_OAUTH2_ADMINS]),
|
||||
|
@ -1315,7 +1315,7 @@ lazy_static! {
|
|||
EntryClass::AccessControlProfile,
|
||||
EntryClass::AccessControlModify,
|
||||
],
|
||||
name: "idm_people_self_acp_write_mail",
|
||||
name: "idm_acp_people_self_write_mail",
|
||||
uuid: UUID_IDM_ACP_PEOPLE_SELF_WRITE_MAIL,
|
||||
description: "Builtin IDM Control for self write of mail for people accounts.",
|
||||
receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_PEOPLE_SELF_MAIL_WRITE]),
|
||||
|
@ -1570,7 +1570,7 @@ lazy_static! {
|
|||
|
||||
lazy_static! {
|
||||
pub static ref IDM_ACP_ACCOUNT_SELF_WRITE_V1: BuiltinAcp = BuiltinAcp {
|
||||
name: "idm_acp_self_account_write",
|
||||
name: "idm_acp_account_self_write",
|
||||
uuid: UUID_IDM_ACP_ACCOUNT_SELF_WRITE_V1,
|
||||
description: "Builtin IDM Control for self write - required for accounts to update their own session state.",
|
||||
classes: vec![
|
||||
|
@ -1974,7 +1974,7 @@ lazy_static! {
|
|||
EntryClass::AccessControlProfile,
|
||||
EntryClass::AccessControlModify,
|
||||
],
|
||||
name: "idm_acp_people_account_policy_manage",
|
||||
name: "idm_acp_people_manage",
|
||||
uuid: UUID_IDM_ACP_PEOPLE_MANAGE_V1,
|
||||
description: "Builtin IDM Control for management of peoples non sensitive attributes.",
|
||||
receiver: BuiltinAcpReceiver::Group(vec![UUID_IDM_PEOPLE_ADMINS]),
|
||||
|
@ -2301,7 +2301,7 @@ lazy_static! {
|
|||
EntryClass::AccessControlModify,
|
||||
EntryClass::AccessControlSearch
|
||||
],
|
||||
name: "idm_acp_service_account_entry_managed_by",
|
||||
name: "idm_acp_service_account_entry_managed_by_modify",
|
||||
uuid: UUID_IDM_ACP_SERVICE_ACCOUNT_ENTRY_MANAGED_BY_MODIFY,
|
||||
description:
|
||||
"Builtin IDM Control for allowing entry_managed_by to be set on service account entries",
|
||||
|
|
|
@ -73,6 +73,7 @@ pub const DOMAIN_LEVEL_8: DomainVersion = 8;
|
|||
/// Domain Level introduced with 1.5.0.
|
||||
/// Deprecated as of 1.7.0
|
||||
pub const DOMAIN_LEVEL_9: DomainVersion = 9;
|
||||
pub const PATCH_LEVEL_2: u32 = 2;
|
||||
|
||||
// The minimum level that we can re-migrate from.
|
||||
// This should be DOMAIN_TGT_LEVEL minus 2
|
||||
|
@ -85,7 +86,7 @@ pub const DOMAIN_PREVIOUS_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_7;
|
|||
// the NEXT level that users will upgrade too.
|
||||
pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_8;
|
||||
// The current patch level if any out of band fixes are required.
|
||||
pub const DOMAIN_TGT_PATCH_LEVEL: u32 = PATCH_LEVEL_1;
|
||||
pub const DOMAIN_TGT_PATCH_LEVEL: u32 = PATCH_LEVEL_2;
|
||||
// The target domain functional level for the SUBSEQUENT release/dev cycle.
|
||||
pub const DOMAIN_TGT_NEXT_LEVEL: DomainVersion = DOMAIN_LEVEL_9;
|
||||
// The maximum supported domain functional level
|
||||
|
|
|
@ -1119,6 +1119,8 @@ pub static ref SCHEMA_CLASS_DOMAIN_INFO_DL6: SchemaClass = SchemaClass {
|
|||
Attribute::PrivateCookieKey,
|
||||
Attribute::FernetPrivateKeyStr,
|
||||
Attribute::Es256PrivateKeyDer,
|
||||
Attribute::PatchLevel,
|
||||
Attribute::DomainDevelopmentTaint,
|
||||
],
|
||||
systemmust: vec![
|
||||
Attribute::Name,
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
use kanidm_proto::internal::{
|
||||
|
@ -123,6 +121,9 @@ impl QueryServer {
|
|||
"After setting internal domain info"
|
||||
);
|
||||
|
||||
let mut reload_required = false;
|
||||
|
||||
// If the database domain info is a lower version than our target level, we reload.
|
||||
if domain_info_version < domain_target_level {
|
||||
write_txn
|
||||
.internal_modify_uuid(
|
||||
|
@ -138,10 +139,7 @@ impl QueryServer {
|
|||
|
||||
// Reload if anything in migrations requires it - this triggers the domain migrations
|
||||
// which in turn can trigger schema reloads etc.
|
||||
write_txn.reload()?;
|
||||
// Force a reindex here since schema probably changed and we aren't at the
|
||||
// runtime phase where it will trigger on its own yet.
|
||||
write_txn.reindex()?;
|
||||
reload_required = true;
|
||||
} else if domain_development_taint {
|
||||
// This forces pre-release versions to re-migrate each start up. This solves
|
||||
// the domain-version-sprawl issue so that during a development cycle we can
|
||||
|
@ -154,12 +152,12 @@ impl QueryServer {
|
|||
// AND
|
||||
// We did not already need a version migration as above
|
||||
write_txn.domain_remigrate(DOMAIN_PREVIOUS_TGT_LEVEL)?;
|
||||
write_txn.reload()?;
|
||||
// Force a reindex here since schema probably changed and we aren't at the
|
||||
// runtime phase where it will trigger on its own yet.
|
||||
write_txn.reindex()?;
|
||||
|
||||
reload_required = true;
|
||||
}
|
||||
|
||||
// If we are new enough to support patches, and we are lower than the target patch level
|
||||
// then a reload will be applied after we raise the patch level.
|
||||
if domain_target_level >= DOMAIN_LEVEL_7 && domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
|
||||
write_txn
|
||||
.internal_modify_uuid(
|
||||
|
@ -170,13 +168,25 @@ impl QueryServer {
|
|||
),
|
||||
)
|
||||
.map(|()| {
|
||||
warn!("Domain level has been raised to {}", domain_target_level);
|
||||
warn!(
|
||||
"Domain patch level has been raised to {}",
|
||||
domain_patch_level
|
||||
);
|
||||
})?;
|
||||
|
||||
// Run the patch migrations if any.
|
||||
write_txn.reload()?;
|
||||
reload_required = true;
|
||||
};
|
||||
|
||||
// Execute whatever operations we have batched up and ready to go. This is needed
|
||||
// to preserve ordering of the operations - if we reloaded after a remigrate then
|
||||
// we would have skipped the patch level fix which needs to have occured *first*.
|
||||
if reload_required {
|
||||
write_txn.reload()?;
|
||||
// We are not yet at the schema phase where reindexes will auto-trigger
|
||||
// so if one was required, do it now.
|
||||
write_txn.reindex()?;
|
||||
}
|
||||
|
||||
// Now set the db/domain devel taint flag to match our current release status
|
||||
// if it changes. This is what breaks the cycle of db taint from dev -> stable
|
||||
let current_devel_flag = option_env!("KANIDM_PRE_RELEASE").is_some();
|
||||
|
@ -665,6 +675,81 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Patch Application - This triggers a one-shot fixup task for issue #3178
|
||||
/// to force access controls to re-migrate in existing databases so that they're
|
||||
/// content matches expected values.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub(crate) fn migrate_domain_patch_level_2(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("applying domain patch 2.");
|
||||
|
||||
debug_assert!(*self.phase >= ServerPhase::SchemaReady);
|
||||
|
||||
let idm_data = [
|
||||
IDM_ACP_ACCOUNT_MAIL_READ_DL6.clone().into(),
|
||||
IDM_ACP_ACCOUNT_SELF_WRITE_V1.clone().into(),
|
||||
IDM_ACP_ACCOUNT_UNIX_EXTEND_V1.clone().into(),
|
||||
IDM_ACP_ACP_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_ALL_ACCOUNTS_POSIX_READ_V1.clone().into(),
|
||||
IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone().into(),
|
||||
IDM_ACP_APPLICATION_MANAGE_DL8.clone().into(),
|
||||
IDM_ACP_DOMAIN_ADMIN_DL8.clone().into(),
|
||||
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone().into(),
|
||||
IDM_ACP_GROUP_ENTRY_MANAGED_BY_MODIFY_V1.clone().into(),
|
||||
IDM_ACP_GROUP_ENTRY_MANAGER_V1.clone().into(),
|
||||
IDM_ACP_GROUP_MANAGE_DL6.clone().into(),
|
||||
IDM_ACP_GROUP_READ_V1.clone().into(),
|
||||
IDM_ACP_GROUP_UNIX_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone().into(),
|
||||
IDM_ACP_HP_GROUP_UNIX_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_HP_PEOPLE_CREDENTIAL_RESET_V1.clone().into(),
|
||||
IDM_ACP_HP_SERVICE_ACCOUNT_ENTRY_MANAGED_BY_MODIFY_V1
|
||||
.clone()
|
||||
.into(),
|
||||
IDM_ACP_MAIL_SERVERS_DL8.clone().into(),
|
||||
IDM_ACP_OAUTH2_MANAGE_DL7.clone().into(),
|
||||
IDM_ACP_PEOPLE_CREATE_DL6.clone().into(),
|
||||
IDM_ACP_PEOPLE_CREDENTIAL_RESET_V1.clone().into(),
|
||||
IDM_ACP_PEOPLE_DELETE_V1.clone().into(),
|
||||
IDM_ACP_PEOPLE_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_PEOPLE_PII_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_PEOPLE_PII_READ_V1.clone().into(),
|
||||
IDM_ACP_PEOPLE_READ_V1.clone().into(),
|
||||
IDM_ACP_PEOPLE_SELF_WRITE_MAIL_V1.clone().into(),
|
||||
IDM_ACP_RADIUS_SECRET_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_RADIUS_SERVERS_V1.clone().into(),
|
||||
IDM_ACP_RECYCLE_BIN_REVIVE_V1.clone().into(),
|
||||
IDM_ACP_RECYCLE_BIN_SEARCH_V1.clone().into(),
|
||||
IDM_ACP_SCHEMA_WRITE_ATTRS_V1.clone().into(),
|
||||
IDM_ACP_SCHEMA_WRITE_CLASSES_V1.clone().into(),
|
||||
IDM_ACP_SELF_NAME_WRITE_DL7.clone().into(),
|
||||
IDM_ACP_SELF_READ_DL8.clone().into(),
|
||||
IDM_ACP_SELF_WRITE_DL8.clone().into(),
|
||||
IDM_ACP_SERVICE_ACCOUNT_CREATE_V1.clone().into(),
|
||||
IDM_ACP_SERVICE_ACCOUNT_DELETE_V1.clone().into(),
|
||||
IDM_ACP_SERVICE_ACCOUNT_ENTRY_MANAGED_BY_MODIFY_V1
|
||||
.clone()
|
||||
.into(),
|
||||
IDM_ACP_SERVICE_ACCOUNT_ENTRY_MANAGER_V1.clone().into(),
|
||||
IDM_ACP_SERVICE_ACCOUNT_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_SYNC_ACCOUNT_MANAGE_V1.clone().into(),
|
||||
IDM_ACP_SYSTEM_CONFIG_ACCOUNT_POLICY_MANAGE_V1
|
||||
.clone()
|
||||
.into(),
|
||||
];
|
||||
|
||||
idm_data
|
||||
.into_iter()
|
||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
||||
.map_err(|err| {
|
||||
error!(?err, "migrate_domain_patch_level_2 -> Error");
|
||||
err
|
||||
})?;
|
||||
|
||||
self.reload()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
|
||||
admin_debug!("initialise_schema_core -> start ...");
|
||||
|
@ -786,6 +871,9 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
SCHEMA_ATTR_KEY_ACTION_ROTATE_DL6.clone().into(),
|
||||
SCHEMA_ATTR_KEY_ACTION_REVOKE_DL6.clone().into(),
|
||||
SCHEMA_ATTR_KEY_ACTION_IMPORT_JWS_ES256_DL6.clone().into(),
|
||||
// DL7
|
||||
SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(),
|
||||
SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(),
|
||||
];
|
||||
|
||||
let r = idm_schema
|
||||
|
@ -818,7 +906,6 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
SCHEMA_CLASS_OAUTH2_RS_BASIC_DL5.clone().into(),
|
||||
// DL6
|
||||
SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(),
|
||||
SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(),
|
||||
SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(),
|
||||
SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(),
|
||||
SCHEMA_CLASS_GROUP_DL6.clone().into(),
|
||||
|
@ -828,6 +915,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
SCHEMA_CLASS_KEY_OBJECT_JWT_ES256_DL6.clone().into(),
|
||||
SCHEMA_CLASS_KEY_OBJECT_JWE_A128GCM_DL6.clone().into(),
|
||||
SCHEMA_CLASS_KEY_OBJECT_INTERNAL_DL6.clone().into(),
|
||||
SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(),
|
||||
];
|
||||
|
||||
let r: Result<(), _> = idm_schema_classes_dl1
|
||||
|
|
|
@ -2075,6 +2075,10 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
self.migrate_domain_8_to_9()?;
|
||||
}
|
||||
|
||||
if previous_patch_level < PATCH_LEVEL_2 && domain_info_patch_level >= PATCH_LEVEL_2 {
|
||||
self.migrate_domain_patch_level_2()?;
|
||||
}
|
||||
|
||||
// This is here to catch when we increase domain levels but didn't create the migration
|
||||
// hooks. If this fails it probably means you need to add another migration hook
|
||||
// in the above.
|
||||
|
|
Loading…
Reference in a new issue