Update our domain TGT level (#2776)

This commit is contained in:
Firstyear 2024-05-17 16:06:14 +10:00 committed by GitHub
parent ac9a90abf3
commit 39ac38e266
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 30 additions and 537 deletions

View file

@ -66,6 +66,7 @@ cargo install wasm-bindgen-cli
- [ ] git checkout -b YYYYMMDD-dev-version
- [ ] update version to +1 and add dev tag in ./Cargo.toml
- [ ] update version to +1 and add dev tag in ./Makefile
- [ ] update `DOMAIN_*_LEVEL` in server/lib/src/constants/mod.rs
## Final Release Check List

View file

@ -60,6 +60,7 @@ pub const DOMAIN_LEVEL_2: DomainVersion = 2;
pub const DOMAIN_LEVEL_3: DomainVersion = 3;
/// Deprcated as of 1.2.0
pub const DOMAIN_LEVEL_4: DomainVersion = 4;
/// Deprcated as of 1.3.0
pub const DOMAIN_LEVEL_5: DomainVersion = 5;
@ -71,18 +72,22 @@ pub const DOMAIN_LEVEL_6: DomainVersion = 6;
/// Deprcated as of 1.5.0
pub const DOMAIN_LEVEL_7: DomainVersion = 7;
/// Domain Level introduced with 1.4.0.
/// Deprcated as of 1.6.0
pub const DOMAIN_LEVEL_8: DomainVersion = 8;
// The minimum level that we can re-migrate from
pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_2;
pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_5;
// The minimum supported domain functional level
pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL;
// The previous releases domain functional level
pub const DOMAIN_PREVIOUS_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_5;
pub const DOMAIN_PREVIOUS_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_6;
// The target supported domain functional level
pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_6;
pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_7;
// The maximum supported domain functional level
pub const DOMAIN_MAX_LEVEL: DomainVersion = DOMAIN_LEVEL_6;
pub const DOMAIN_MAX_LEVEL: DomainVersion = DOMAIN_LEVEL_7;
// The maximum supported domain functional level
pub const DOMAIN_NEXT_LEVEL: DomainVersion = DOMAIN_LEVEL_7;
pub const DOMAIN_NEXT_LEVEL: DomainVersion = DOMAIN_LEVEL_8;
// On test builds define to 60 seconds
#[cfg(test)]

View file

@ -564,9 +564,6 @@ mod tests {
"domain_name": ["example.net.au"],
"domain_display_name": ["example.net.au"],
"domain_ssid": ["Example_Wifi"],
"fernet_private_key_str": ["ABCD"],
"es256_private_key_der" : ["MTIz"],
"private_cookie_key" : ["MTIz"],
"version": ["1"]
}
}"#,
@ -607,9 +604,6 @@ mod tests {
"domain_name": ["example.net.au"],
"domain_display_name": ["example.net.au"],
"domain_ssid": ["Example_Wifi"],
"fernet_private_key_str": ["ABCD"],
"es256_private_key_der" : ["MTIz"],
"private_cookie_key" : ["MTIz"],
"version": ["1"]
}
}"#,
@ -640,9 +634,6 @@ mod tests {
"domain_name": ["example.net.au"],
"domain_display_name": ["example.net.au"],
"domain_ssid": ["Example_Wifi"],
"fernet_private_key_str": ["ABCD"],
"es256_private_key_der" : ["MTIz"],
"private_cookie_key" : ["MTIz"],
"version": ["1"]
}
}"#,

View file

@ -503,7 +503,7 @@ pub trait AccessControlsTransaction<'a> {
);
false
} else {
security_access!("passed pres, rem, classes check.");
debug!("passed pres, rem, classes check.");
true
} // if acc == false
}
@ -688,7 +688,7 @@ pub trait AccessControlsTransaction<'a> {
});
if r {
security_access!("allowed create of {} entries ✅", entries.len());
debug!("allowed create of {} entries ✅", entries.len());
} else {
security_access!("denied ❌ - create may not proceed");
}

View file

@ -42,90 +42,6 @@ impl QueryServer {
write_txn.reload()?;
// Now, based on the system version apply migrations. You may ask "should you not
// be doing migrations before indexes?". And this is a very good question! The issue
// is within a migration we must be able to search for content by pres index, and those
// rely on us being indexed! It *is* safe to index content even if the
// migration would cause a value type change (ie name changing from iutf8s to iname) because
// the indexing subsystem is schema/value agnostic - the fact the values still let their keys
// be extracted, means that the pres indexes will be valid even though the entries are pending
// migration. We must be sure to NOT use EQ/SUB indexes in the migration code however!
//
// If we are "in the process of being setup" this is 0, and the migrations will have no
// effect as ... there is nothing to migrate! It allows reset of the version to 0 to force
// db migrations to take place.
let system_info_version = match write_txn.internal_search_uuid(UUID_SYSTEM_INFO) {
Ok(e) => Ok(e.get_ava_single_uint32(Attribute::Version).unwrap_or(0)),
Err(OperationError::NoMatchingEntries) => Ok(0),
Err(r) => Err(r),
}?;
admin_debug!(?system_info_version);
if system_info_version > 0 {
if system_info_version <= 9 {
error!("Your instance of Kanidm is version 1.1.0-alpha.10 or lower, and you are trying to perform a skip upgrade. This will not work.");
error!("You need to upgrade one version at a time to ensure upgrade migrations are performed in the correct order.");
return Err(OperationError::InvalidState);
}
if system_info_version < 9 {
write_txn.migrate_8_to_9()?;
}
if system_info_version < 10 {
write_txn.migrate_9_to_10()?;
}
if system_info_version < 11 {
write_txn.migrate_10_to_11()?;
}
if system_info_version < 12 {
write_txn.migrate_11_to_12()?;
}
if system_info_version < 13 {
write_txn.migrate_12_to_13()?;
}
if system_info_version < 14 {
write_txn.migrate_13_to_14()?;
}
if system_info_version < 15 {
write_txn.migrate_14_to_15()?;
}
if system_info_version < 16 {
write_txn.migrate_15_to_16()?;
}
if system_info_version < 17 {
write_txn.initialise_schema_idm()?;
write_txn.reload()?;
write_txn.migrate_16_to_17()?;
}
if system_info_version < 18 {
// Automate fix for #2391 - during the changes to the access controls
// and the recent domain migration work, this stage was not being run
// if a larger "jump" of migrations was performed such as rc.15 to main.
//
// This allows "forcing" a single once off run of init idm *before*
// the domain migrations kick in again.
write_txn.initialise_idm()?;
}
if system_info_version < 19 {
write_txn.migrate_18_to_19()?;
}
}
// Reload if anything in the (older) system migrations requires it.
write_txn.reload()?;
// This is what tells us if the domain entry existed before or not. This
// is now the primary method of migrations and version detection.
let db_domain_version = match write_txn.internal_search_uuid(UUID_DOMAIN_INFO) {
@ -300,441 +216,6 @@ impl<'a> QueryServerWriteTransaction<'a> {
}
}
/// Migrate 8 to 9
///
/// This migration updates properties of oauth2 relying server properties. First, it changes
/// the former basic value to a secret utf8string.
///
/// The second change improves the current scope system to remove the implicit scope type.
#[instrument(level = "debug", skip_all)]
pub fn migrate_8_to_9(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 8 to 9 migration.");
let filt = filter_all!(f_or!([
f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
f_eq(
Attribute::Class,
EntryClass::OAuth2ResourceServerBasic.into()
),
]));
let pre_candidates = self.internal_search(filt).map_err(|e| {
admin_error!(err = ?e, "migrate_8_to_9 internal search failure");
e
})?;
// If there is nothing, we don't need to do anything.
if pre_candidates.is_empty() {
admin_info!("migrate_8_to_9 no entries to migrate, complete");
return Ok(());
}
// Change the value type.
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
.iter()
.map(|er| {
er.as_ref()
.clone()
.invalidate(self.cid.clone(), &self.trim_cid)
})
.collect();
candidates.iter_mut().try_for_each(|er| {
// Migrate basic secrets if they exist.
let nvs = er
.get_ava_set(Attribute::OAuth2RsBasicSecret)
.and_then(|vs| vs.as_utf8_iter())
.and_then(|vs_iter| {
ValueSetSecret::from_iter(vs_iter.map(|s: &str| s.to_string()))
});
if let Some(nvs) = nvs {
er.set_ava_set(Attribute::OAuth2RsBasicSecret, nvs)
}
// Migrate implicit scopes if they exist.
let nv = if let Some(vs) = er.get_ava_set(Attribute::OAuth2RsImplicitScopes) {
vs.as_oauthscope_set()
.map(|v| Value::OauthScopeMap(UUID_IDM_ALL_PERSONS, v.clone()))
} else {
None
};
if let Some(nv) = nv {
er.add_ava(Attribute::OAuth2RsScopeMap, nv)
}
er.purge_ava(Attribute::OAuth2RsImplicitScopes);
Ok(())
})?;
// Schema check all.
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, SchemaError> = candidates
.into_iter()
.map(|e| e.validate(&self.schema).map(|e| e.seal(&self.schema)))
.collect();
let norm_cand: Vec<Entry<_, _>> = match res {
Ok(v) => v,
Err(e) => {
admin_error!("migrate_8_to_9 schema error -> {:?}", e);
return Err(OperationError::SchemaViolation(e));
}
};
// Write them back.
self.be_txn
.modify(&self.cid, &pre_candidates, &norm_cand)
.map_err(|e| {
admin_error!("migrate_8_to_9 modification failure -> {:?}", e);
e
})
// Complete
}
/// Migrate 9 to 10
///
/// This forces a load and rewrite of all credentials stored on all accounts so that they are
/// updated to new on-disk formats. This will allow us to purge some older on disk formats in
/// a future version.
///
/// An extended feature of this is the ability to store multiple TOTP's per entry.
#[instrument(level = "info", skip_all)]
pub fn migrate_9_to_10(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 9 to 10 migration.");
let filter = filter!(f_or!([
f_pres(Attribute::PrimaryCredential),
f_pres(Attribute::UnixPassword),
]));
// This "does nothing" since everything has object anyway, but it forces the entry to be
// loaded and rewritten.
let modlist = ModifyList::new_append(Attribute::Class, EntryClass::Object.to_value());
self.internal_modify(&filter, &modlist)
// Complete
}
/// Migrate 10 to 11
///
/// This forces a load of all credentials, and then examines if any are "passkey" capable. If they
/// are, they are migrated to the passkey type, allowing us to deprecate and remove the older
/// credential behaviour.
///
#[instrument(level = "info", skip_all)]
pub fn migrate_10_to_11(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 9 to 10 migration.");
let filter = filter!(f_pres(Attribute::PrimaryCredential));
let pre_candidates = self.internal_search(filter).map_err(|e| {
admin_error!(err = ?e, "migrate_10_to_11 internal search failure");
e
})?;
// First, filter based on if any credentials present actually are the legacy
// webauthn type.
let modset: Vec<_> = pre_candidates
.into_iter()
.filter_map(|ent| {
ent.get_ava_single_credential(Attribute::PrimaryCredential)
.and_then(|cred| cred.passkey_ref().ok())
.map(|pk_map| {
let modlist = pk_map
.iter()
.map(|(t, k)| {
Modify::Present(
"passkeys".into(),
Value::Passkey(Uuid::new_v4(), t.clone(), k.clone()),
)
})
.chain(std::iter::once(m_purge(Attribute::PrimaryCredential)))
.collect();
(ent.get_uuid(), ModifyList::new_list(modlist))
})
})
.collect();
// If there is nothing, we don't need to do anything.
if modset.is_empty() {
admin_info!("migrate_10_to_11 no entries to migrate, complete");
return Ok(());
}
// Apply the batch mod.
self.internal_batch_modify(modset.into_iter())
}
/// Migrate 11 to 12
///
/// Rewrite api-tokens from session to a dedicated API token type.
///
#[instrument(level = "info", skip_all)]
pub fn migrate_11_to_12(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 11 to 12 migration.");
// sync_token_session
let filter = filter!(f_or!([
f_pres(Attribute::ApiTokenSession),
f_pres(Attribute::SyncTokenSession),
]));
let mut mod_candidates = self.internal_search_writeable(&filter).map_err(|e| {
admin_error!(err = ?e, "migrate_11_to_12 internal search failure");
e
})?;
// If there is nothing, we don't need to do anything.
if mod_candidates.is_empty() {
admin_info!("migrate_11_to_12 no entries to migrate, complete");
return Ok(());
}
// First, filter based on if any credentials present actually are the legacy
// webauthn type.
for (_, ent) in mod_candidates.iter_mut() {
if let Some(api_token_session) = ent.pop_ava(Attribute::ApiTokenSession) {
let api_token_session =
api_token_session
.migrate_session_to_apitoken()
.map_err(|e| {
error!(
"Failed to convert {} from session -> apitoken",
Attribute::ApiTokenSession
);
e
})?;
ent.set_ava_set(Attribute::ApiTokenSession, api_token_session);
}
if let Some(sync_token_session) = ent.pop_ava(Attribute::SyncTokenSession) {
let sync_token_session =
sync_token_session
.migrate_session_to_apitoken()
.map_err(|e| {
error!("Failed to convert sync_token_session from session -> apitoken");
e
})?;
ent.set_ava_set(Attribute::SyncTokenSession, sync_token_session);
}
}
// Apply the batch mod.
self.internal_apply_writable(mod_candidates)
}
#[instrument(level = "info", skip_all)]
/// Deletes the Domain info privatecookiekey to force a regeneration as we changed the format
pub fn migrate_12_to_13(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 12 to 13 migration.");
let filter = filter!(f_and!([
f_eq(Attribute::Class, EntryClass::DomainInfo.into()),
f_eq(Attribute::Uuid, PVUUID_DOMAIN_INFO.clone()),
]));
// Delete the existing cookie key to trigger a regeneration.
let modlist = ModifyList::new_purge(Attribute::PrivateCookieKey);
self.internal_modify(&filter, &modlist)
// Complete
}
#[instrument(level = "info", skip_all)]
/// - Deletes the incorrectly added "member" attribute on dynamic groups
pub fn migrate_13_to_14(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 13 to 14 migration.");
let filter = filter!(f_eq(
Attribute::Class,
EntryClass::DynGroup.to_partialvalue()
));
// Delete the incorrectly added "member" attr.
let modlist = ModifyList::new_purge(Attribute::Member);
self.internal_modify(&filter, &modlist)
// Complete
}
#[instrument(level = "info", skip_all)]
/// - Deletes the non-existing attribute for idverification private key which triggers it to regen
pub fn migrate_14_to_15(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 14 to 15 migration.");
let filter = filter!(f_eq(Attribute::Class, EntryClass::Person.into()));
// Delete the non-existing attr for idv private key which triggers it to regen.
let modlist = ModifyList::new_purge(Attribute::IdVerificationEcKey);
self.internal_modify(&filter, &modlist)
// Complete
}
#[instrument(level = "info", skip_all)]
/// - updates the system config to include the new session expiry values.
/// - adds the account policy object to idm_all_accounts
pub fn migrate_15_to_16(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 15 to 16 migration.");
let sysconfig_entry = match self.internal_search_uuid(UUID_SYSTEM_CONFIG) {
Ok(entry) => entry,
Err(OperationError::NoMatchingEntries) => return Ok(()),
Err(e) => return Err(e),
};
let mut all_account_modlist = Vec::with_capacity(3);
all_account_modlist.push(Modify::Present(
Attribute::Class.into(),
EntryClass::AccountPolicy.to_value(),
));
if let Some(auth_exp) = sysconfig_entry.get_ava_single_uint32(Attribute::AuthSessionExpiry)
{
all_account_modlist.push(Modify::Present(
Attribute::AuthSessionExpiry.into(),
Value::Uint32(auth_exp),
));
}
if let Some(priv_exp) = sysconfig_entry.get_ava_single_uint32(Attribute::PrivilegeExpiry) {
all_account_modlist.push(Modify::Present(
Attribute::PrivilegeExpiry.into(),
Value::Uint32(priv_exp),
));
}
self.internal_batch_modify(
[
(
UUID_SYSTEM_CONFIG,
ModifyList::new_list(vec![
Modify::Purged(Attribute::AuthSessionExpiry.into()),
Modify::Purged(Attribute::PrivilegeExpiry.into()),
]),
),
(
UUID_IDM_ALL_ACCOUNTS,
ModifyList::new_list(all_account_modlist),
),
]
.into_iter(),
)
// Complete
}
#[instrument(level = "info", skip_all)]
/// This migration will:
/// * ensure that all access controls have the needed group receiver type
/// * delete legacy entries that are no longer needed.
pub fn migrate_16_to_17(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 16 to 17 migration.");
let filter = filter!(f_and!([
f_or!([
f_pres(Attribute::AcpReceiverGroup),
f_pres(Attribute::AcpTargetScope),
]),
f_eq(
Attribute::Class,
EntryClass::AccessControlProfile.to_partialvalue()
)
]));
// Delete the incorrectly added "member" attr.
let modlist = ModifyList::new_list(vec![
Modify::Present(
Attribute::Class.into(),
EntryClass::AccessControlReceiverGroup.to_value(),
),
Modify::Present(
Attribute::Class.into(),
EntryClass::AccessControlTargetScope.to_value(),
),
]);
self.internal_modify(&filter, &modlist)?;
let delete_entries = [
UUID_IDM_ACP_OAUTH2_READ_PRIV_V1,
UUID_IDM_ACP_RADIUS_SECRET_READ_PRIV_V1,
UUID_IDM_ACP_PEOPLE_ACCOUNT_PASSWORD_IMPORT_PRIV_V1,
UUID_IDM_ACP_SYSTEM_CONFIG_SESSION_EXP_PRIV_V1,
UUID_IDM_ACP_HP_GROUP_WRITE_PRIV_V1,
UUID_IDM_ACP_HP_GROUP_MANAGE_PRIV_V1,
UUID_IDM_ACP_HP_PEOPLE_WRITE_PRIV_V1,
UUID_IDM_ACP_ACCOUNT_READ_PRIV_V1,
UUID_IDM_ACP_ACCOUNT_WRITE_PRIV_V1,
UUID_IDM_ACP_ACCOUNT_MANAGE_PRIV_V1,
UUID_IDM_ACP_HP_ACCOUNT_READ_PRIV_V1,
UUID_IDM_ACP_HP_ACCOUNT_WRITE_PRIV_V1,
UUID_IDM_ACP_GROUP_WRITE_PRIV_V1,
UUID_IDM_ACP_HP_PEOPLE_EXTEND_PRIV_V1,
UUID_IDM_ACP_PEOPLE_EXTEND_PRIV_V1,
UUID_IDM_ACP_HP_ACCOUNT_MANAGE_PRIV_V1,
UUID_IDM_HP_ACP_SERVICE_ACCOUNT_INTO_PERSON_MIGRATE_V1,
UUID_IDM_HP_ACP_ACCOUNT_UNIX_EXTEND_PRIV_V1,
UUID_IDM_HP_ACP_SYNC_ACCOUNT_MANAGE_PRIV_V1,
UUID_IDM_ACP_ACCOUNT_UNIX_EXTEND_PRIV_V1,
UUID_IDM_ACP_RADIUS_SECRET_WRITE_PRIV_V1,
UUID_IDM_HP_ACP_GROUP_UNIX_EXTEND_PRIV_V1,
UUID_IDM_ACP_GROUP_UNIX_EXTEND_PRIV_V1,
UUID_IDM_ACP_HP_PEOPLE_READ_PRIV_V1,
UUID_IDM_ACP_PEOPLE_WRITE_PRIV_V1,
UUID_IDM_HP_SYNC_ACCOUNT_MANAGE_PRIV,
UUID_IDM_RADIUS_SECRET_WRITE_PRIV_V1,
UUID_IDM_RADIUS_SECRET_READ_PRIV_V1,
UUID_IDM_PEOPLE_ACCOUNT_PASSWORD_IMPORT_PRIV,
UUID_IDM_PEOPLE_EXTEND_PRIV,
UUID_IDM_HP_PEOPLE_EXTEND_PRIV,
UUID_IDM_HP_GROUP_MANAGE_PRIV,
UUID_IDM_HP_GROUP_WRITE_PRIV,
UUID_IDM_HP_SERVICE_ACCOUNT_INTO_PERSON_MIGRATE_PRIV,
UUID_IDM_GROUP_ACCOUNT_POLICY_MANAGE_PRIV,
UUID_IDM_HP_GROUP_UNIX_EXTEND_PRIV,
UUID_IDM_GROUP_WRITE_PRIV,
UUID_IDM_GROUP_UNIX_EXTEND_PRIV,
UUID_IDM_HP_ACCOUNT_UNIX_EXTEND_PRIV,
UUID_IDM_ACCOUNT_UNIX_EXTEND_PRIV,
UUID_IDM_PEOPLE_WRITE_PRIV,
UUID_IDM_HP_PEOPLE_READ_PRIV,
UUID_IDM_HP_PEOPLE_WRITE_PRIV,
UUID_IDM_PEOPLE_WRITE_PRIV,
UUID_IDM_ACCOUNT_READ_PRIV,
UUID_IDM_ACCOUNT_MANAGE_PRIV,
UUID_IDM_ACCOUNT_WRITE_PRIV,
UUID_IDM_HP_ACCOUNT_READ_PRIV,
UUID_IDM_HP_ACCOUNT_MANAGE_PRIV,
UUID_IDM_HP_ACCOUNT_WRITE_PRIV,
];
let res: Result<(), _> = delete_entries
.into_iter()
.try_for_each(|entry_uuid| self.internal_delete_uuid_if_exists(entry_uuid));
if res.is_ok() {
admin_debug!("migrate 16 to 17 -> result Ok!");
} else {
admin_error!(?res, "migrate 16 to 17 -> result");
}
debug_assert!(res.is_ok());
res
}
#[instrument(level = "info", skip_all)]
/// Automate fix for #2470 - force the domain version to be lowered, to allow
/// it to re-raise and force re-run migrations. This is because we accidentally
/// were "overwriting" the changes from domain migrations on startup due to
/// a logic error. At this point in the startup, the server phase is lower than
/// domain info ready, so the change won't immediately trigger remigrations. Rather
/// it will force them later in the startup.
pub fn migrate_18_to_19(&mut self) -> Result<(), OperationError> {
admin_warn!("starting 18 to 19 migration.");
debug_assert!(*self.phase < ServerPhase::DomainInfoReady);
if *self.phase >= ServerPhase::DomainInfoReady {
error!("Unable to perform system migration as server phase is greater or equal to domain info ready");
return Err(OperationError::MG0003ServerPhaseInvalidForMigration);
};
self.internal_modify_uuid(
UUID_DOMAIN_INFO,
&ModifyList::new_purge_and_set(Attribute::Version, Value::new_uint32(DOMAIN_LEVEL_2)),
)
.map(|()| {
warn!(
"Domain level has been temporarily lowered to {}",
DOMAIN_LEVEL_2
);
})
}
#[instrument(level = "info", skip_all)]
/// This migration will
/// * Trigger a "once off" mfa account policy rule on all persons.
@ -1005,7 +486,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
/// Migration domain level 6 to 7
#[instrument(level = "info", skip_all)]
pub(crate) fn migrate_domain_6_to_7(&mut self) -> Result<(), OperationError> {
if !cfg!(test) {
if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_7 {
error!("Unable to raise domain level from 6 to 7.");
return Err(OperationError::MG0004DomainLevelInDevelopment);
}
@ -1139,6 +620,17 @@ impl<'a> QueryServerWriteTransaction<'a> {
Ok(())
}
/// Migration domain level 7 to 8
#[instrument(level = "info", skip_all)]
pub(crate) fn migrate_domain_7_to_8(&mut self) -> Result<(), OperationError> {
if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_8 {
error!("Unable to raise domain level from 7 to 8.");
return Err(OperationError::MG0004DomainLevelInDevelopment);
}
Ok(())
}
#[instrument(level = "info", skip_all)]
pub fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
admin_debug!("initialise_schema_core -> start ...");

View file

@ -1804,10 +1804,14 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.migrate_domain_6_to_7()?;
}
if previous_version <= DOMAIN_LEVEL_7 && domain_info_version >= DOMAIN_LEVEL_8 {
self.migrate_domain_7_to_8()?;
}
// This is here to catch when we increase domain levels but didn't create the migration
// hooks. If this fails it probably means you need to add another migration hook
// in the above.
debug_assert!(domain_info_version <= DOMAIN_LEVEL_7);
debug_assert!(domain_info_version <= DOMAIN_MAX_LEVEL);
Ok(())
}