mirror of
https://github.com/kanidm/kanidm.git
synced 2025-05-24 18:03:54 +02:00
More cleanup
This commit is contained in:
parent
722153eedf
commit
3d43925123
server/lib/src
|
@ -364,36 +364,7 @@ lazy_static! {
|
|||
};
|
||||
|
||||
/// This must be the last group to init to include the UUID of the other high priv groups.
|
||||
pub static ref IDM_HIGH_PRIVILEGE_V1: BuiltinGroup = BuiltinGroup {
|
||||
name: "idm_high_privilege",
|
||||
uuid: UUID_IDM_HIGH_PRIVILEGE,
|
||||
entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS),
|
||||
description: "Builtin IDM provided groups with high levels of access that should be audited and limited in modification.",
|
||||
members: vec![
|
||||
UUID_SYSTEM_ADMINS,
|
||||
UUID_IDM_ADMINS,
|
||||
UUID_DOMAIN_ADMINS,
|
||||
UUID_IDM_SERVICE_DESK,
|
||||
UUID_IDM_RECYCLE_BIN_ADMINS,
|
||||
UUID_IDM_SCHEMA_ADMINS,
|
||||
UUID_IDM_ACCESS_CONTROL_ADMINS,
|
||||
UUID_IDM_OAUTH2_ADMINS,
|
||||
UUID_IDM_RADIUS_ADMINS,
|
||||
UUID_IDM_ACCOUNT_POLICY_ADMINS,
|
||||
UUID_IDM_RADIUS_SERVERS,
|
||||
UUID_IDM_GROUP_ADMINS,
|
||||
UUID_IDM_UNIX_ADMINS,
|
||||
UUID_IDM_PEOPLE_PII_READ,
|
||||
UUID_IDM_PEOPLE_ADMINS,
|
||||
UUID_IDM_PEOPLE_ON_BOARDING,
|
||||
UUID_IDM_SERVICE_ACCOUNT_ADMINS,
|
||||
UUID_IDM_HIGH_PRIVILEGE,
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
/// This must be the last group to init to include the UUID of the other high priv groups.
|
||||
pub static ref IDM_HIGH_PRIVILEGE_DL7: BuiltinGroup = BuiltinGroup {
|
||||
pub static ref IDM_HIGH_PRIVILEGE_DL8: BuiltinGroup = BuiltinGroup {
|
||||
name: "idm_high_privilege",
|
||||
uuid: UUID_IDM_HIGH_PRIVILEGE,
|
||||
entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS),
|
||||
|
@ -417,12 +388,14 @@ lazy_static! {
|
|||
UUID_IDM_PEOPLE_ON_BOARDING,
|
||||
UUID_IDM_SERVICE_ACCOUNT_ADMINS,
|
||||
UUID_IDM_CLIENT_CERTIFICATE_ADMINS,
|
||||
UUID_IDM_APPLICATION_ADMINS,
|
||||
UUID_IDM_MAIL_ADMINS,
|
||||
UUID_IDM_HIGH_PRIVILEGE,
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
pub static ref BUILTIN_GROUP_APPLICATION_ADMINS: BuiltinGroup = BuiltinGroup {
|
||||
pub static ref BUILTIN_GROUP_APPLICATION_ADMINS_DL8: BuiltinGroup = BuiltinGroup {
|
||||
name: "idm_application_admins",
|
||||
uuid: UUID_IDM_APPLICATION_ADMINS,
|
||||
description: "Builtin Application Administration Group.",
|
||||
|
@ -449,7 +422,6 @@ pub fn idm_builtin_non_admin_groups() -> Vec<&'static BuiltinGroup> {
|
|||
&BUILTIN_GROUP_PEOPLE_PII_READ,
|
||||
&BUILTIN_GROUP_PEOPLE_ON_BOARDING,
|
||||
&BUILTIN_GROUP_SERVICE_ACCOUNT_ADMINS,
|
||||
&BUILTIN_GROUP_APPLICATION_ADMINS,
|
||||
&BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8,
|
||||
&IDM_GROUP_ADMINS_V1,
|
||||
&IDM_ALL_PERSONS,
|
||||
|
@ -459,9 +431,10 @@ pub fn idm_builtin_non_admin_groups() -> Vec<&'static BuiltinGroup> {
|
|||
&BUILTIN_GROUP_PEOPLE_SELF_NAME_WRITE_DL7,
|
||||
&IDM_PEOPLE_SELF_MAIL_WRITE_DL7,
|
||||
&BUILTIN_GROUP_CLIENT_CERTIFICATE_ADMINS_DL7,
|
||||
&BUILTIN_GROUP_APPLICATION_ADMINS_DL8,
|
||||
// Write deps on read, so write must be added first.
|
||||
// All members must exist before we write HP
|
||||
&IDM_HIGH_PRIVILEGE_DL7,
|
||||
&IDM_HIGH_PRIVILEGE_DL8,
|
||||
// other things
|
||||
&IDM_UI_ENABLE_EXPERIMENTAL_FEATURES,
|
||||
&IDM_ACCOUNT_MAIL_READ,
|
||||
|
|
|
@ -54,10 +54,6 @@ pub type DomainVersion = u32;
|
|||
/// previously.
|
||||
pub const DOMAIN_LEVEL_0: DomainVersion = 0;
|
||||
|
||||
/// Domain Level introduced with 1.2.0.
|
||||
/// Deprecated as of 1.4.0
|
||||
pub const DOMAIN_LEVEL_6: DomainVersion = 6;
|
||||
|
||||
/// Domain Level introduced with 1.3.0.
|
||||
/// Deprecated as of 1.5.0
|
||||
pub const DOMAIN_LEVEL_7: DomainVersion = 7;
|
||||
|
@ -81,7 +77,7 @@ pub const DOMAIN_LEVEL_11: DomainVersion = 11;
|
|||
|
||||
// The minimum level that we can re-migrate from.
|
||||
// This should be DOMAIN_TGT_LEVEL minus 2
|
||||
pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_6;
|
||||
pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_8;
|
||||
// The minimum supported domain functional level (for replication)
|
||||
pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL;
|
||||
// The previous releases domain functional level
|
||||
|
|
|
@ -5,7 +5,7 @@ use base64::{
|
|||
Engine as _,
|
||||
};
|
||||
|
||||
use compact_jwt::{Jws, JwsCompact, JwsEs256Signer, JwsSigner};
|
||||
use compact_jwt::{Jws, JwsCompact};
|
||||
use kanidm_proto::internal::{ApiTokenPurpose, ScimSyncToken};
|
||||
use kanidm_proto::scim_v1::*;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
@ -25,7 +25,6 @@ pub(crate) struct SyncAccount {
|
|||
pub name: String,
|
||||
pub uuid: Uuid,
|
||||
pub sync_tokens: BTreeMap<Uuid, ApiToken>,
|
||||
pub jws_key: Option<JwsEs256Signer>,
|
||||
}
|
||||
|
||||
macro_rules! try_from_entry {
|
||||
|
@ -40,15 +39,6 @@ macro_rules! try_from_entry {
|
|||
.map(|s| s.to_string())
|
||||
.ok_or(OperationError::MissingAttribute(Attribute::Name))?;
|
||||
|
||||
let jws_key = $value
|
||||
.get_ava_single_jws_key_es256(Attribute::JwsEs256PrivateKey)
|
||||
.cloned()
|
||||
.map(|jws_key| {
|
||||
jws_key
|
||||
.set_sign_option_embed_jwk(true)
|
||||
.set_sign_option_legacy_kid(true)
|
||||
});
|
||||
|
||||
let sync_tokens = $value
|
||||
.get_ava_as_apitoken_map(Attribute::SyncTokenSession)
|
||||
.cloned()
|
||||
|
@ -60,7 +50,6 @@ macro_rules! try_from_entry {
|
|||
name,
|
||||
uuid,
|
||||
sync_tokens,
|
||||
jws_key,
|
||||
})
|
||||
}};
|
||||
}
|
||||
|
@ -123,16 +112,6 @@ impl IdmServerProxyWriteTransaction<'_> {
|
|||
gte: &GenerateScimSyncTokenEvent,
|
||||
ct: Duration,
|
||||
) -> Result<JwsCompact, OperationError> {
|
||||
// Get the target signing key.
|
||||
let sync_account = self
|
||||
.qs_write
|
||||
.internal_search_uuid(gte.target)
|
||||
.and_then(|entry| SyncAccount::try_from_entry_rw(&entry))
|
||||
.map_err(|e| {
|
||||
admin_error!(?e, "Failed to search service account");
|
||||
e
|
||||
})?;
|
||||
|
||||
let session_id = Uuid::new_v4();
|
||||
let issued_at = time::OffsetDateTime::UNIX_EPOCH + ct;
|
||||
|
||||
|
@ -185,25 +164,9 @@ impl IdmServerProxyWriteTransaction<'_> {
|
|||
})?;
|
||||
|
||||
// The modify succeeded and was allowed, now sign the token for return.
|
||||
if self.qs_write.get_domain_version() < DOMAIN_LEVEL_6 {
|
||||
sync_account
|
||||
.jws_key
|
||||
.as_ref()
|
||||
.ok_or_else(|| {
|
||||
admin_error!("Unable to sign sync token, no sync keys available");
|
||||
OperationError::CryptographyError
|
||||
})
|
||||
.and_then(|jws_key| {
|
||||
jws_key.sign(&token).map_err(|err| {
|
||||
admin_error!(?err, "Unable to sign sync token");
|
||||
OperationError::CryptographyError
|
||||
})
|
||||
})
|
||||
} else {
|
||||
self.qs_write
|
||||
.get_domain_key_object_handle()?
|
||||
.jws_es256_sign(&token, ct)
|
||||
}
|
||||
self.qs_write
|
||||
.get_domain_key_object_handle()?
|
||||
.jws_es256_sign(&token, ct)
|
||||
// Done!
|
||||
}
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::collections::BTreeMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use compact_jwt::{Jws, JwsCompact, JwsEs256Signer, JwsSigner};
|
||||
use compact_jwt::{Jws, JwsCompact};
|
||||
use kanidm_proto::internal::ApiToken as ProtoApiToken;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
|
@ -23,15 +23,6 @@ macro_rules! try_from_entry {
|
|||
));
|
||||
}
|
||||
|
||||
let jws_key = $value
|
||||
.get_ava_single_jws_key_es256(Attribute::JwsEs256PrivateKey)
|
||||
.cloned()
|
||||
.map(|jws_key| {
|
||||
jws_key
|
||||
.set_sign_option_embed_jwk(true)
|
||||
.set_sign_option_legacy_kid(true)
|
||||
});
|
||||
|
||||
let api_tokens = $value
|
||||
.get_ava_as_apitoken_map(Attribute::ApiTokenSession)
|
||||
.cloned()
|
||||
|
@ -48,7 +39,6 @@ macro_rules! try_from_entry {
|
|||
valid_from,
|
||||
expire,
|
||||
api_tokens,
|
||||
jws_key,
|
||||
})
|
||||
}};
|
||||
}
|
||||
|
@ -60,8 +50,6 @@ pub struct ServiceAccount {
|
|||
pub expire: Option<OffsetDateTime>,
|
||||
|
||||
pub api_tokens: BTreeMap<Uuid, ApiToken>,
|
||||
|
||||
pub jws_key: Option<JwsEs256Signer>,
|
||||
}
|
||||
|
||||
impl ServiceAccount {
|
||||
|
@ -253,25 +241,9 @@ impl IdmServerProxyWriteTransaction<'_> {
|
|||
err
|
||||
})?;
|
||||
|
||||
if self.qs_write.get_domain_version() < DOMAIN_LEVEL_6 {
|
||||
service_account
|
||||
.jws_key
|
||||
.as_ref()
|
||||
.ok_or_else(|| {
|
||||
admin_error!("Unable to sign sync token, no sync keys available");
|
||||
OperationError::CryptographyError
|
||||
})
|
||||
.and_then(|jws_key| {
|
||||
jws_key.sign(&token).map_err(|err| {
|
||||
admin_error!(?err, "Unable to sign sync token");
|
||||
OperationError::CryptographyError
|
||||
})
|
||||
})
|
||||
} else {
|
||||
self.qs_write
|
||||
.get_domain_key_object_handle()?
|
||||
.jws_es256_sign(&token, ct)
|
||||
}
|
||||
self.qs_write
|
||||
.get_domain_key_object_handle()?
|
||||
.jws_es256_sign(&token, ct)
|
||||
}
|
||||
|
||||
pub fn service_account_destroy_api_token(
|
||||
|
|
|
@ -62,10 +62,7 @@ pub const GID_UNUSED_D_MAX: u32 = 0x7fff_ffff;
|
|||
|
||||
pub struct GidNumber {}
|
||||
|
||||
fn apply_gidnumber<T: Clone>(
|
||||
e: &mut Entry<EntryInvalid, T>,
|
||||
domain_version: DomainVersion,
|
||||
) -> Result<(), OperationError> {
|
||||
fn apply_gidnumber<T: Clone>(e: &mut Entry<EntryInvalid, T>) -> Result<(), OperationError> {
|
||||
if (e.attribute_equality(Attribute::Class, &EntryClass::PosixGroup.into())
|
||||
|| e.attribute_equality(Attribute::Class, &EntryClass::PosixAccount.into()))
|
||||
&& !e.attribute_pres(Attribute::GidNumber)
|
||||
|
@ -89,48 +86,33 @@ fn apply_gidnumber<T: Clone>(
|
|||
e.set_ava(&Attribute::GidNumber, once(gid_v));
|
||||
Ok(())
|
||||
} else if let Some(gid) = e.get_ava_single_uint32(Attribute::GidNumber) {
|
||||
if domain_version <= DOMAIN_LEVEL_6 {
|
||||
if gid < GID_REGULAR_USER_MIN {
|
||||
error!(
|
||||
"Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}",
|
||||
gid,
|
||||
GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX,
|
||||
GID_UNUSED_C_MIN, GID_UNUSED_C_MAX,
|
||||
GID_UNUSED_D_MIN, GID_UNUSED_D_MAX
|
||||
);
|
||||
Err(OperationError::PL0001GidOverlapsSystemRange)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
// If they provided us with a gid number, ensure it's in a safe range.
|
||||
if (GID_REGULAR_USER_MIN..=GID_REGULAR_USER_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_A_MIN..=GID_UNUSED_A_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_B_MIN..= GID_UNUSED_B_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_C_MIN..=GID_UNUSED_C_MAX).contains(&gid)
|
||||
// We won't ever generate an id in the nspawn range, but we do secretly allow
|
||||
// it to be set for compatibility with services like freeipa or openldap. TBH
|
||||
// most people don't even use systemd nspawn anyway ...
|
||||
//
|
||||
// I made this design choice to avoid a tunable that may confuse people to
|
||||
// its purpose. This way things "just work" for imports and existing systems
|
||||
// but we do the right thing in the future.
|
||||
|| (GID_NSPAWN_MIN..=GID_NSPAWN_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_D_MIN..=GID_UNUSED_D_MAX).contains(&gid)
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
// If they provided us with a gid number, ensure it's in a safe range.
|
||||
if (GID_REGULAR_USER_MIN..=GID_REGULAR_USER_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_A_MIN..=GID_UNUSED_A_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_B_MIN..= GID_UNUSED_B_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_C_MIN..=GID_UNUSED_C_MAX).contains(&gid)
|
||||
// We won't ever generate an id in the nspawn range, but we do secretly allow
|
||||
// it to be set for compatibility with services like freeipa or openldap. TBH
|
||||
// most people don't even use systemd nspawn anyway ...
|
||||
//
|
||||
// I made this design choice to avoid a tunable that may confuse people to
|
||||
// its purpose. This way things "just work" for imports and existing systems
|
||||
// but we do the right thing in the future.
|
||||
|| (GID_NSPAWN_MIN..=GID_NSPAWN_MAX).contains(&gid)
|
||||
|| (GID_UNUSED_D_MIN..=GID_UNUSED_D_MAX).contains(&gid)
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
// Note that here we don't advertise that we allow the nspawn range to be set, even
|
||||
// though we do allow it.
|
||||
error!(
|
||||
"Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}",
|
||||
gid,
|
||||
GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX,
|
||||
GID_UNUSED_C_MIN, GID_UNUSED_C_MAX,
|
||||
GID_UNUSED_D_MIN, GID_UNUSED_D_MAX
|
||||
);
|
||||
Err(OperationError::PL0001GidOverlapsSystemRange)
|
||||
}
|
||||
// Note that here we don't advertise that we allow the nspawn range to be set, even
|
||||
// though we do allow it.
|
||||
error!(
|
||||
"Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}",
|
||||
gid,
|
||||
GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX,
|
||||
GID_UNUSED_C_MIN, GID_UNUSED_C_MAX,
|
||||
GID_UNUSED_D_MIN, GID_UNUSED_D_MAX
|
||||
);
|
||||
Err(OperationError::PL0001GidOverlapsSystemRange)
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
|
@ -144,37 +126,31 @@ impl Plugin for GidNumber {
|
|||
|
||||
#[instrument(level = "debug", name = "gidnumber_pre_create_transform", skip_all)]
|
||||
fn pre_create_transform(
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
_qs: &mut QueryServerWriteTransaction,
|
||||
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
|
||||
_ce: &CreateEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
let dv = qs.get_domain_version();
|
||||
cand.iter_mut()
|
||||
.try_for_each(|cand| apply_gidnumber(cand, dv))
|
||||
cand.iter_mut().try_for_each(apply_gidnumber)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", name = "gidnumber_pre_modify", skip_all)]
|
||||
fn pre_modify(
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
_qs: &mut QueryServerWriteTransaction,
|
||||
_pre_cand: &[Arc<EntrySealedCommitted>],
|
||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||
_me: &ModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
let dv = qs.get_domain_version();
|
||||
cand.iter_mut()
|
||||
.try_for_each(|cand| apply_gidnumber(cand, dv))
|
||||
cand.iter_mut().try_for_each(apply_gidnumber)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", name = "gidnumber_pre_batch_modify", skip_all)]
|
||||
fn pre_batch_modify(
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
_qs: &mut QueryServerWriteTransaction,
|
||||
_pre_cand: &[Arc<EntrySealedCommitted>],
|
||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||
_me: &BatchModifyEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
let dv = qs.get_domain_version();
|
||||
cand.iter_mut()
|
||||
.try_for_each(|cand| apply_gidnumber(cand, dv))
|
||||
cand.iter_mut().try_for_each(apply_gidnumber)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,9 +162,7 @@ mod tests {
|
|||
};
|
||||
use crate::prelude::*;
|
||||
|
||||
use kanidm_proto::internal::DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus;
|
||||
|
||||
#[qs_test(domain_level=DOMAIN_LEVEL_7)]
|
||||
#[qs_test]
|
||||
async fn test_gidnumber_generate(server: &QueryServer) {
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
||||
|
||||
|
@ -423,85 +397,4 @@ mod tests {
|
|||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
|
||||
#[qs_test(domain_level=DOMAIN_LEVEL_6)]
|
||||
async fn test_gidnumber_domain_level_6(server: &QueryServer) {
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
||||
|
||||
// This will be INVALID in DL 7 but it's allowed for DL6
|
||||
let user_a_uuid = uuid!("d90fb0cb-6785-4f36-94cb-e364d9c13255");
|
||||
{
|
||||
let op_result = server_txn.internal_create(vec![entry_init!(
|
||||
(Attribute::Class, EntryClass::Account.to_value()),
|
||||
(Attribute::Class, EntryClass::PosixAccount.to_value()),
|
||||
(Attribute::Name, Value::new_iname("testperson_2")),
|
||||
(Attribute::Uuid, Value::Uuid(user_a_uuid)),
|
||||
// NOTE HERE: We do GID_UNUSED_A_MIN minus 1 which isn't accepted
|
||||
// on DL7
|
||||
(Attribute::GidNumber, Value::Uint32(GID_UNUSED_A_MIN - 1)),
|
||||
(Attribute::Description, Value::new_utf8s("testperson")),
|
||||
(Attribute::DisplayName, Value::new_utf8s("testperson"))
|
||||
)]);
|
||||
|
||||
assert!(op_result.is_ok());
|
||||
|
||||
let user_a = server_txn
|
||||
.internal_search_uuid(user_a_uuid)
|
||||
.expect("Unable to access user");
|
||||
|
||||
let user_a_uid = user_a
|
||||
.get_ava_single_uint32(Attribute::GidNumber)
|
||||
.expect("gidnumber not present on account");
|
||||
|
||||
assert_eq!(user_a_uid, GID_UNUSED_A_MIN - 1);
|
||||
}
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
// Now, do the DL6 upgrade check - will FAIL because the above user has an invalid ID.
|
||||
let mut server_txn = server.read().await.unwrap();
|
||||
|
||||
let check_item = server_txn
|
||||
.domain_upgrade_check_6_to_7_gidnumber()
|
||||
.expect("Failed to perform migration check.");
|
||||
|
||||
assert_eq!(
|
||||
check_item.status,
|
||||
ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber
|
||||
);
|
||||
|
||||
drop(server_txn);
|
||||
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
||||
|
||||
// Test rejection of important gid values.
|
||||
let user_b_uuid = uuid!("33afc396-2434-47e5-b143-05176148b50e");
|
||||
// Test that an entry when modified to have posix attributes, if a gidnumber
|
||||
// is provided then it is respected.
|
||||
{
|
||||
let op_result = server_txn.internal_create(vec![entry_init!(
|
||||
(Attribute::Class, EntryClass::Account.to_value()),
|
||||
(Attribute::Class, EntryClass::Person.to_value()),
|
||||
(Attribute::Name, Value::new_iname("testperson_6")),
|
||||
(Attribute::Uuid, Value::Uuid(user_b_uuid)),
|
||||
(Attribute::Description, Value::new_utf8s("testperson")),
|
||||
(Attribute::DisplayName, Value::new_utf8s("testperson"))
|
||||
)]);
|
||||
|
||||
assert!(op_result.is_ok());
|
||||
|
||||
for id in [0, 500, GID_REGULAR_USER_MIN - 1] {
|
||||
let modlist = modlist!([
|
||||
m_pres(Attribute::Class, &EntryClass::PosixAccount.to_value()),
|
||||
m_pres(Attribute::GidNumber, &Value::Uint32(id))
|
||||
]);
|
||||
let op_result = server_txn.internal_modify_uuid(user_b_uuid, &modlist);
|
||||
|
||||
trace!(?id);
|
||||
assert_eq!(op_result, Err(OperationError::PL0001GidOverlapsSystemRange));
|
||||
}
|
||||
}
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -158,7 +158,7 @@ impl QueryServer {
|
|||
|
||||
// If we are new enough to support patches, and we are lower than the target patch level
|
||||
// then a reload will be applied after we raise the patch level.
|
||||
if domain_target_level >= DOMAIN_LEVEL_7 && domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
|
||||
if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
|
||||
write_txn
|
||||
.internal_modify_uuid(
|
||||
UUID_DOMAIN_INFO,
|
||||
|
@ -294,114 +294,6 @@ impl QueryServerWriteTransaction<'_> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Migration domain level 7 to 8
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub(crate) fn migrate_domain_7_to_8(&mut self) -> Result<(), OperationError> {
|
||||
if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_8 {
|
||||
error!("Unable to raise domain level from 7 to 8.");
|
||||
return Err(OperationError::MG0004DomainLevelInDevelopment);
|
||||
}
|
||||
|
||||
// ============== Apply constraints ===============
|
||||
let filter = filter!(f_and!([
|
||||
f_eq(Attribute::Class, EntryClass::Account.into()),
|
||||
f_pres(Attribute::PrimaryCredential),
|
||||
]));
|
||||
|
||||
let results = self.internal_search(filter)?;
|
||||
|
||||
let affected_entries = results
|
||||
.into_iter()
|
||||
.filter_map(|entry| {
|
||||
if entry
|
||||
.get_ava_single_credential(Attribute::PrimaryCredential)
|
||||
.map(|cred| cred.has_securitykey())
|
||||
.unwrap_or_default()
|
||||
{
|
||||
Some(entry.get_display_id())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if !affected_entries.is_empty() {
|
||||
error!("Unable to proceed. Some accounts still use legacy security keys, which need to be removed.");
|
||||
for sk_present in affected_entries {
|
||||
error!(%sk_present);
|
||||
}
|
||||
return Err(OperationError::MG0006SKConstraintsNotMet);
|
||||
}
|
||||
|
||||
// Check oauth2 strict uri
|
||||
let filter = filter!(f_and!([
|
||||
f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
|
||||
f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
|
||||
]));
|
||||
|
||||
let results = self.internal_search(filter)?;
|
||||
|
||||
let affected_entries = results
|
||||
.into_iter()
|
||||
.map(|entry| entry.get_display_id())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if !affected_entries.is_empty() {
|
||||
error!("Unable to proceed. Not all oauth2 clients have strict redirect verification enabled.");
|
||||
for missing_oauth2_strict_redirect_uri in affected_entries {
|
||||
error!(%missing_oauth2_strict_redirect_uri);
|
||||
}
|
||||
return Err(OperationError::MG0007Oauth2StrictConstraintsNotMet);
|
||||
}
|
||||
|
||||
// =========== Apply changes ==============
|
||||
|
||||
let idm_schema_classes = [
|
||||
SCHEMA_ATTR_LINKED_GROUP_DL8.clone().into(),
|
||||
SCHEMA_ATTR_APPLICATION_PASSWORD_DL8.clone().into(),
|
||||
SCHEMA_CLASS_APPLICATION_DL8.clone().into(),
|
||||
SCHEMA_CLASS_PERSON_DL8.clone().into(),
|
||||
SCHEMA_CLASS_DOMAIN_INFO_DL8.clone().into(),
|
||||
SCHEMA_ATTR_ALLOW_PRIMARY_CRED_FALLBACK_DL8.clone().into(),
|
||||
SCHEMA_CLASS_ACCOUNT_POLICY_DL8.clone().into(),
|
||||
];
|
||||
|
||||
idm_schema_classes
|
||||
.into_iter()
|
||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
||||
.map_err(|err| {
|
||||
error!(?err, "migrate_domain_6_to_7 -> Error");
|
||||
err
|
||||
})?;
|
||||
|
||||
self.reload()?;
|
||||
|
||||
// Update access controls.
|
||||
let idm_data = [
|
||||
BUILTIN_GROUP_APPLICATION_ADMINS.clone().try_into()?,
|
||||
IDM_ACP_SELF_READ_DL8.clone().into(),
|
||||
IDM_ACP_SELF_WRITE_DL8.clone().into(),
|
||||
IDM_ACP_APPLICATION_MANAGE_DL8.clone().into(),
|
||||
IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone().into(),
|
||||
// Add the new types for mail server
|
||||
BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8.clone().try_into()?,
|
||||
BUILTIN_IDM_MAIL_SERVERS_DL8.clone().try_into()?,
|
||||
IDM_ACP_MAIL_SERVERS_DL8.clone().into(),
|
||||
IDM_ACP_DOMAIN_ADMIN_DL8.clone().into(),
|
||||
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone().into(),
|
||||
];
|
||||
|
||||
idm_data
|
||||
.into_iter()
|
||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
||||
.map_err(|err| {
|
||||
error!(?err, "migrate_domain_7_to_8 -> Error");
|
||||
err
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Migration domain level 8 to 9 (1.5.0)
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> {
|
||||
|
@ -692,6 +584,10 @@ impl QueryServerWriteTransaction<'_> {
|
|||
SCHEMA_ATTR_MAIL_DL7.clone().into(),
|
||||
SCHEMA_ATTR_LEGALNAME_DL7.clone().into(),
|
||||
SCHEMA_ATTR_DISPLAYNAME_DL7.clone().into(),
|
||||
// DL8
|
||||
SCHEMA_ATTR_LINKED_GROUP_DL8.clone().into(),
|
||||
SCHEMA_ATTR_APPLICATION_PASSWORD_DL8.clone().into(),
|
||||
SCHEMA_ATTR_ALLOW_PRIMARY_CRED_FALLBACK_DL8.clone().into(),
|
||||
];
|
||||
|
||||
let r = idm_schema
|
||||
|
@ -718,12 +614,12 @@ impl QueryServerWriteTransaction<'_> {
|
|||
// DL4
|
||||
SCHEMA_CLASS_OAUTH2_RS_PUBLIC_DL4.clone().into(),
|
||||
// DL5
|
||||
SCHEMA_CLASS_PERSON_DL5.clone().into(),
|
||||
// SCHEMA_CLASS_PERSON_DL5.clone().into(),
|
||||
SCHEMA_CLASS_ACCOUNT_DL5.clone().into(),
|
||||
// SCHEMA_CLASS_OAUTH2_RS_DL5.clone().into(),
|
||||
SCHEMA_CLASS_OAUTH2_RS_BASIC_DL5.clone().into(),
|
||||
// DL6
|
||||
SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(),
|
||||
// SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(),
|
||||
// SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(),
|
||||
// SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(),
|
||||
SCHEMA_CLASS_GROUP_DL6.clone().into(),
|
||||
|
@ -735,11 +631,16 @@ impl QueryServerWriteTransaction<'_> {
|
|||
SCHEMA_CLASS_KEY_OBJECT_INTERNAL_DL6.clone().into(),
|
||||
// SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(),
|
||||
// DL7
|
||||
SCHEMA_CLASS_DOMAIN_INFO_DL7.clone().into(),
|
||||
// SCHEMA_CLASS_DOMAIN_INFO_DL7.clone().into(),
|
||||
SCHEMA_CLASS_SERVICE_ACCOUNT_DL7.clone().into(),
|
||||
SCHEMA_CLASS_SYNC_ACCOUNT_DL7.clone().into(),
|
||||
SCHEMA_CLASS_CLIENT_CERTIFICATE_DL7.clone().into(),
|
||||
SCHEMA_CLASS_OAUTH2_RS_DL7.clone().into(),
|
||||
// DL8
|
||||
SCHEMA_CLASS_ACCOUNT_POLICY_DL8.clone().into(),
|
||||
SCHEMA_CLASS_APPLICATION_DL8.clone().into(),
|
||||
SCHEMA_CLASS_PERSON_DL8.clone().into(),
|
||||
SCHEMA_CLASS_DOMAIN_INFO_DL8.clone().into(),
|
||||
];
|
||||
|
||||
let r: Result<(), _> = idm_schema_classes_dl1
|
||||
|
@ -830,7 +731,7 @@ impl QueryServerWriteTransaction<'_> {
|
|||
IDM_ACP_RADIUS_SERVERS_V1.clone(),
|
||||
IDM_ACP_RADIUS_SECRET_MANAGE_V1.clone(),
|
||||
IDM_ACP_PEOPLE_SELF_WRITE_MAIL_V1.clone(),
|
||||
IDM_ACP_SELF_READ_V1.clone(),
|
||||
// IDM_ACP_SELF_READ_V1.clone(),
|
||||
// IDM_ACP_SELF_WRITE_V1.clone(),
|
||||
IDM_ACP_ACCOUNT_SELF_WRITE_V1.clone(),
|
||||
// IDM_ACP_SELF_NAME_WRITE_V1.clone(),
|
||||
|
@ -857,16 +758,24 @@ impl QueryServerWriteTransaction<'_> {
|
|||
// DL5
|
||||
// IDM_ACP_OAUTH2_MANAGE_DL5.clone(),
|
||||
// DL6
|
||||
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone(),
|
||||
// IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone(),
|
||||
IDM_ACP_PEOPLE_CREATE_DL6.clone(),
|
||||
IDM_ACP_GROUP_MANAGE_DL6.clone(),
|
||||
IDM_ACP_ACCOUNT_MAIL_READ_DL6.clone(),
|
||||
IDM_ACP_DOMAIN_ADMIN_DL6.clone(),
|
||||
// IDM_ACP_DOMAIN_ADMIN_DL6.clone(),
|
||||
// DL7
|
||||
IDM_ACP_SELF_WRITE_DL7.clone(),
|
||||
// IDM_ACP_SELF_WRITE_DL7.clone(),
|
||||
IDM_ACP_SELF_NAME_WRITE_DL7.clone(),
|
||||
IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone(),
|
||||
IDM_ACP_OAUTH2_MANAGE_DL7.clone(),
|
||||
// DL8
|
||||
IDM_ACP_SELF_READ_DL8.clone(),
|
||||
IDM_ACP_SELF_WRITE_DL8.clone(),
|
||||
IDM_ACP_APPLICATION_MANAGE_DL8.clone(),
|
||||
IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone(),
|
||||
IDM_ACP_MAIL_SERVERS_DL8.clone(),
|
||||
IDM_ACP_DOMAIN_ADMIN_DL8.clone(),
|
||||
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone(),
|
||||
];
|
||||
|
||||
let res: Result<(), _> = idm_entries
|
||||
|
@ -896,19 +805,6 @@ impl QueryServerReadTransaction<'_> {
|
|||
|
||||
let mut report_items = Vec::with_capacity(1);
|
||||
|
||||
if current_level <= DOMAIN_LEVEL_6 && upgrade_level >= DOMAIN_LEVEL_7 {
|
||||
let item = self
|
||||
.domain_upgrade_check_6_to_7_gidnumber()
|
||||
.map_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
"Failed to perform domain upgrade check 6 to 7 - gidnumber"
|
||||
);
|
||||
err
|
||||
})?;
|
||||
report_items.push(item);
|
||||
}
|
||||
|
||||
if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
|
||||
let item = self
|
||||
.domain_upgrade_check_7_to_8_security_keys()
|
||||
|
@ -942,94 +838,6 @@ impl QueryServerReadTransaction<'_> {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) fn domain_upgrade_check_6_to_7_gidnumber(
|
||||
&mut self,
|
||||
) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
|
||||
let filter = filter!(f_and!([
|
||||
f_or!([
|
||||
f_eq(Attribute::Class, EntryClass::PosixAccount.into()),
|
||||
f_eq(Attribute::Class, EntryClass::PosixGroup.into())
|
||||
]),
|
||||
// This logic gets a bit messy but it would be:
|
||||
// If ! (
|
||||
// (GID_REGULAR_USER_MIN < value < GID_REGULAR_USER_MAX) ||
|
||||
// (GID_UNUSED_A_MIN < value < GID_UNUSED_A_MAX) ||
|
||||
// (GID_UNUSED_B_MIN < value < GID_UNUSED_B_MAX) ||
|
||||
// (GID_UNUSED_C_MIN < value < GID_UNUSED_D_MAX)
|
||||
// )
|
||||
f_andnot(f_or!([
|
||||
f_and!([
|
||||
// The gid value must be less than GID_REGULAR_USER_MAX
|
||||
f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MAX)
|
||||
),
|
||||
// This bit of mental gymnastics is "greater than".
|
||||
// The gid value must not be less than USER_MIN
|
||||
f_andnot(f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MIN)
|
||||
))
|
||||
]),
|
||||
f_and!([
|
||||
f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MAX)
|
||||
),
|
||||
f_andnot(f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MIN)
|
||||
))
|
||||
]),
|
||||
f_and!([
|
||||
f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MAX)
|
||||
),
|
||||
f_andnot(f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MIN)
|
||||
))
|
||||
]),
|
||||
// If both of these conditions are true we get:
|
||||
// C_MIN < value < D_MAX, which the outer and-not inverts.
|
||||
f_and!([
|
||||
// The gid value must be less than GID_UNUSED_D_MAX
|
||||
f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_D_MAX)
|
||||
),
|
||||
// This bit of mental gymnastics is "greater than".
|
||||
// The gid value must not be less than C_MIN
|
||||
f_andnot(f_lt(
|
||||
Attribute::GidNumber,
|
||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_C_MIN)
|
||||
))
|
||||
]),
|
||||
]))
|
||||
]));
|
||||
|
||||
let results = self.internal_search(filter)?;
|
||||
|
||||
let affected_entries = results
|
||||
.into_iter()
|
||||
.map(|entry| entry.get_display_id())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let status = if affected_entries.is_empty() {
|
||||
ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber
|
||||
} else {
|
||||
ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber
|
||||
};
|
||||
|
||||
Ok(ProtoDomainUpgradeCheckItem {
|
||||
status,
|
||||
from_level: DOMAIN_LEVEL_6,
|
||||
to_level: DOMAIN_LEVEL_7,
|
||||
affected_entries,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
|
||||
&mut self,
|
||||
) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
|
||||
|
@ -1101,7 +909,7 @@ impl QueryServerReadTransaction<'_> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
|
||||
// use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
|
||||
use crate::prelude::*;
|
||||
|
||||
#[qs_test]
|
||||
|
@ -1130,99 +938,6 @@ mod tests {
|
|||
}
|
||||
}
|
||||
|
||||
#[qs_test(domain_level=DOMAIN_LEVEL_7)]
|
||||
async fn test_migrations_dl7_dl8(server: &QueryServer) {
|
||||
// Assert our instance was setup to version 7
|
||||
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||
|
||||
let db_domain_version = write_txn
|
||||
.internal_search_uuid(UUID_DOMAIN_INFO)
|
||||
.expect("unable to access domain entry")
|
||||
.get_ava_single_uint32(Attribute::Version)
|
||||
.expect("Attribute Version not present");
|
||||
|
||||
assert_eq!(db_domain_version, DOMAIN_LEVEL_7);
|
||||
|
||||
// Create an oauth2 client that doesn't have a landing url set.
|
||||
let oauth2_client_uuid = Uuid::new_v4();
|
||||
|
||||
let ea: Entry<EntryInit, EntryNew> = entry_init!(
|
||||
(Attribute::Class, EntryClass::Object.to_value()),
|
||||
(Attribute::Class, EntryClass::Account.to_value()),
|
||||
(Attribute::Uuid, Value::Uuid(oauth2_client_uuid)),
|
||||
(
|
||||
Attribute::Class,
|
||||
EntryClass::OAuth2ResourceServer.to_value()
|
||||
),
|
||||
(
|
||||
Attribute::Class,
|
||||
EntryClass::OAuth2ResourceServerPublic.to_value()
|
||||
),
|
||||
(Attribute::Name, Value::new_iname("test_resource_server")),
|
||||
(
|
||||
Attribute::DisplayName,
|
||||
Value::new_utf8s("test_resource_server")
|
||||
),
|
||||
(
|
||||
Attribute::OAuth2RsOriginLanding,
|
||||
Value::new_url_s("https://demo.example.com/oauth2").unwrap()
|
||||
),
|
||||
(
|
||||
Attribute::OAuth2RsOrigin,
|
||||
Value::new_url_s("https://demo.example.com").unwrap()
|
||||
)
|
||||
);
|
||||
|
||||
write_txn
|
||||
.internal_create(vec![ea])
|
||||
.expect("Unable to create oauth2 client");
|
||||
|
||||
write_txn.commit().expect("Unable to commit");
|
||||
|
||||
// pre migration verification.
|
||||
// check we currently would fail a migration.
|
||||
|
||||
let mut read_txn = server.read().await.unwrap();
|
||||
|
||||
match read_txn.domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri() {
|
||||
Ok(ProtoDomainUpgradeCheckItem {
|
||||
status: ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri,
|
||||
..
|
||||
}) => {
|
||||
trace!("Failed as expected, very good.");
|
||||
}
|
||||
other => {
|
||||
error!(?other);
|
||||
unreachable!();
|
||||
}
|
||||
};
|
||||
|
||||
drop(read_txn);
|
||||
|
||||
// Okay, fix the problem.
|
||||
|
||||
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||
|
||||
write_txn
|
||||
.internal_modify_uuid(
|
||||
oauth2_client_uuid,
|
||||
&ModifyList::new_purge_and_set(
|
||||
Attribute::OAuth2StrictRedirectUri,
|
||||
Value::Bool(true),
|
||||
),
|
||||
)
|
||||
.expect("Unable to enforce strict mode.");
|
||||
|
||||
// Set the version to 8.
|
||||
write_txn
|
||||
.internal_apply_domain_migration(DOMAIN_LEVEL_8)
|
||||
.expect("Unable to set domain level to version 8");
|
||||
|
||||
// post migration verification.
|
||||
|
||||
write_txn.commit().expect("Unable to commit");
|
||||
}
|
||||
|
||||
#[qs_test(domain_level=DOMAIN_LEVEL_8)]
|
||||
async fn test_migrations_dl8_dl9(server: &QueryServer) {
|
||||
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||
|
|
|
@ -1238,13 +1238,6 @@ pub trait QueryServerTransaction<'a> {
|
|||
}
|
||||
|
||||
fn get_domain_key_object_handle(&self) -> Result<Arc<KeyObject>, OperationError> {
|
||||
#[cfg(test)]
|
||||
if self.get_domain_version() < DOMAIN_LEVEL_6 {
|
||||
// We must be in tests, and this is a DL5 to 6 test. For this we'll just make
|
||||
// an ephemeral provider.
|
||||
return Ok(crate::server::keys::KeyObjectInternal::new_test());
|
||||
};
|
||||
|
||||
self.get_key_providers()
|
||||
.get_key_object_handle(UUID_DOMAIN_INFO)
|
||||
.ok_or(OperationError::KP0031KeyObjectNotFound)
|
||||
|
@ -2335,7 +2328,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
debug!(domain_previous_patch_level = ?previous_patch_level, domain_target_patch_level = ?domain_info_patch_level);
|
||||
|
||||
// We have to check for DL0 since that's the initialisation level.
|
||||
if previous_version <= DOMAIN_MIN_REMIGRATION_LEVEL && previous_version != DOMAIN_LEVEL_0 {
|
||||
if previous_version < DOMAIN_MIN_REMIGRATION_LEVEL && previous_version != DOMAIN_LEVEL_0 {
|
||||
error!("UNABLE TO PROCEED. You are attempting a Skip update which is NOT SUPPORTED. You must upgrade one-version of Kanidm at a time.");
|
||||
error!("For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html");
|
||||
error!(domain_previous_version = ?previous_version, domain_target_version = ?domain_info_version);
|
||||
|
@ -2343,11 +2336,6 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
return Err(OperationError::MG0008SkipUpgradeAttempted);
|
||||
}
|
||||
|
||||
if previous_version <= DOMAIN_LEVEL_7 && domain_info_version >= DOMAIN_LEVEL_8 {
|
||||
// 1.3 -> 1.4
|
||||
self.migrate_domain_7_to_8()?;
|
||||
}
|
||||
|
||||
if previous_version <= DOMAIN_LEVEL_8 && domain_info_version >= DOMAIN_LEVEL_9 {
|
||||
// 1.4 -> 1.5
|
||||
self.migrate_domain_8_to_9()?;
|
||||
|
|
Loading…
Reference in a new issue