Check for same version with backup/restore (#2789)

This commit is contained in:
Firstyear 2024-05-23 11:48:37 +10:00 committed by GitHub
parent 1e4f6e85ca
commit c1235a7186
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
6 changed files with 83 additions and 44 deletions

View file

@ -146,8 +146,8 @@ elements in a simpler and correct way out of the box in comparison.
<details> <details>
<summary>Rauthy</summary> <summary>Rauthy</summary>
Rauthy is a minimal OIDC provider. It supports WebAuthn just like Kanidm - they actually use our library Rauthy is a minimal OIDC provider. It supports WebAuthn just like Kanidm - they actually use our
for it! library for it!
Rauthy only provides support for OIDC and so is unable to support other use cases like RADIUS and Rauthy only provides support for OIDC and so is unable to support other use cases like RADIUS and
unix authentication. unix authentication.
@ -160,15 +160,15 @@ then Kanidm will support those.
<details> <details>
<summary>Authentik / Authelia / Zitadel</summary> <summary>Authentik / Authelia / Zitadel</summary>
Authentik is an IDM provider written in Python and, Authelia and Zitadel are written in Go. Authentik is an IDM provider written in Python and, Authelia and Zitadel are written in Go. all
all similar to Kanidm in the features it offers but notably all have weaker support for similar to Kanidm in the features it offers but notably all have weaker support for unix
unix authentication and do not support the same level of authentication policy as Kanidm. Notably, authentication and do not support the same level of authentication policy as Kanidm. Notably, all
all are missing WebAuthn Attestation. are missing WebAuthn Attestation.
All three use an external SQL server such as PostgreSQL. This can create a potential single source All three use an external SQL server such as PostgreSQL. This can create a potential single source
of failure and performance limitation compared to Kanidm which opted to write our own high of failure and performance limitation compared to Kanidm which opted to write our own high
performance database and replication system instead based on our experience with enterprise performance database and replication system instead based on our experience with enterprise LDAP
LDAP servers. servers.
</details> </details>

View file

@ -4,6 +4,9 @@ With any Identity Management (IDM) software, it's important you have the capabil
case of a disaster - be that physical damage or a mistake. Kanidm supports backup and restore of the case of a disaster - be that physical damage or a mistake. Kanidm supports backup and restore of the
database with three methods. database with three methods.
It is important that you only attempt to restore data with the same version of the server that the
backup originated from.
## Method 1 - Automatic Backup ## Method 1 - Automatic Backup
Automatic backups can be generated online by a `kanidmd server` instance by including the Automatic backups can be generated online by a `kanidmd server` instance by including the

View file

@ -97,6 +97,14 @@ pub fn apply_profile() {
println!("cargo:rustc-env=KANIDM_PRE_RELEASE=1"); println!("cargo:rustc-env=KANIDM_PRE_RELEASE=1");
} }
// For some checks we only want the series (i.e. exclude the patch version).
let version_major = env!("CARGO_PKG_VERSION_MAJOR");
let version_minor = env!("CARGO_PKG_VERSION_MINOR");
println!(
"cargo:rustc-env=KANIDM_PKG_SERIES={}.{}",
version_major, version_minor
);
match profile_cfg.cpu_flags { match profile_cfg.cpu_flags {
CpuOptLevel::apple_m1 => println!("cargo:rustc-env=RUSTFLAGS=-Ctarget-cpu=apple_m1"), CpuOptLevel::apple_m1 => println!("cargo:rustc-env=RUSTFLAGS=-Ctarget-cpu=apple_m1"),
CpuOptLevel::none => {} CpuOptLevel::none => {}

View file

@ -132,6 +132,10 @@ pub enum OperationError {
// Value Errors // Value Errors
VL0001ValueSshPublicKeyString, VL0001ValueSshPublicKeyString,
// DB low level errors.
DB0001MismatchedRestoreVersion,
DB0002MismatchedRestoreVersion,
// SCIM // SCIM
SC0001IncomingSshPublicKey, SC0001IncomingSshPublicKey,
// Migration // Migration

View file

@ -56,18 +56,15 @@ pub struct DbEntry {
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
#[serde(untagged)] #[serde(untagged)]
pub enum DbBackup { pub enum DbBackup {
V1(Vec<DbEntry>), // Because of untagged, this has to be in order of newest
V2 { // to oldest as untagged does a first-match when deserialising.
db_s_uuid: Uuid, V5 {
db_d_uuid: Uuid, version: String,
db_ts_max: Duration,
entries: Vec<DbEntry>,
},
V3 {
db_s_uuid: Uuid, db_s_uuid: Uuid,
db_d_uuid: Uuid, db_d_uuid: Uuid,
db_ts_max: Duration, db_ts_max: Duration,
keyhandles: BTreeMap<KeyHandleId, KeyHandle>, keyhandles: BTreeMap<KeyHandleId, KeyHandle>,
repl_meta: DbReplMeta,
entries: Vec<DbEntry>, entries: Vec<DbEntry>,
}, },
V4 { V4 {
@ -78,6 +75,20 @@ pub enum DbBackup {
repl_meta: DbReplMeta, repl_meta: DbReplMeta,
entries: Vec<DbEntry>, entries: Vec<DbEntry>,
}, },
V3 {
db_s_uuid: Uuid,
db_d_uuid: Uuid,
db_ts_max: Duration,
keyhandles: BTreeMap<KeyHandleId, KeyHandle>,
entries: Vec<DbEntry>,
},
V2 {
db_s_uuid: Uuid,
db_d_uuid: Uuid,
db_ts_max: Duration,
entries: Vec<DbEntry>,
},
V1(Vec<DbEntry>),
} }
fn from_vec_dbval1(attr_val: NonEmpty<DbValueV1>) -> Result<DbValueSetV2, OperationError> { fn from_vec_dbval1(attr_val: NonEmpty<DbValueV1>) -> Result<DbValueSetV2, OperationError> {

View file

@ -901,7 +901,9 @@ pub trait BackendTransaction {
let keyhandles = idlayer.get_key_handles()?; let keyhandles = idlayer.get_key_handles()?;
let bak = DbBackup::V4 { let bak = DbBackup::V5 {
// remember env is evaled at compile time.
version: env!("KANIDM_PKG_SERIES").to_string(),
db_s_uuid, db_s_uuid,
db_d_uuid, db_d_uuid,
db_ts_max, db_ts_max,
@ -1756,8 +1758,8 @@ impl<'a> BackendWriteTransaction<'a> {
OperationError::SerdeJsonError OperationError::SerdeJsonError
})?; })?;
let (dbentries, repl_meta) = match dbbak { let (dbentries, repl_meta, maybe_version) = match dbbak {
DbBackup::V1(dbentries) => (dbentries, None), DbBackup::V1(dbentries) => (dbentries, None, None),
DbBackup::V2 { DbBackup::V2 {
db_s_uuid, db_s_uuid,
db_d_uuid, db_d_uuid,
@ -1768,7 +1770,7 @@ impl<'a> BackendWriteTransaction<'a> {
idlayer.write_db_s_uuid(db_s_uuid)?; idlayer.write_db_s_uuid(db_s_uuid)?;
idlayer.write_db_d_uuid(db_d_uuid)?; idlayer.write_db_d_uuid(db_d_uuid)?;
idlayer.set_db_ts_max(db_ts_max)?; idlayer.set_db_ts_max(db_ts_max)?;
(entries, None) (entries, None, None)
} }
DbBackup::V3 { DbBackup::V3 {
db_s_uuid, db_s_uuid,
@ -1782,7 +1784,7 @@ impl<'a> BackendWriteTransaction<'a> {
idlayer.write_db_d_uuid(db_d_uuid)?; idlayer.write_db_d_uuid(db_d_uuid)?;
idlayer.set_db_ts_max(db_ts_max)?; idlayer.set_db_ts_max(db_ts_max)?;
idlayer.set_key_handles(keyhandles)?; idlayer.set_key_handles(keyhandles)?;
(entries, None) (entries, None, None)
} }
DbBackup::V4 { DbBackup::V4 {
db_s_uuid, db_s_uuid,
@ -1797,8 +1799,34 @@ impl<'a> BackendWriteTransaction<'a> {
idlayer.write_db_d_uuid(db_d_uuid)?; idlayer.write_db_d_uuid(db_d_uuid)?;
idlayer.set_db_ts_max(db_ts_max)?; idlayer.set_db_ts_max(db_ts_max)?;
idlayer.set_key_handles(keyhandles)?; idlayer.set_key_handles(keyhandles)?;
(entries, Some(repl_meta)) (entries, Some(repl_meta), None)
} }
DbBackup::V5 {
version,
db_s_uuid,
db_d_uuid,
db_ts_max,
keyhandles,
repl_meta,
entries,
} => {
// Do stuff.
idlayer.write_db_s_uuid(db_s_uuid)?;
idlayer.write_db_d_uuid(db_d_uuid)?;
idlayer.set_db_ts_max(db_ts_max)?;
idlayer.set_key_handles(keyhandles)?;
(entries, Some(repl_meta), Some(version))
}
};
if let Some(version) = maybe_version {
if version != env!("KANIDM_PKG_SERIES") {
error!("The provided backup data is from server version {} and is unable to be restored on this instance ({})", version, env!("KANIDM_PKG_SERIES"));
return Err(OperationError::DB0001MismatchedRestoreVersion);
}
} else {
error!("The provided backup data is from an older server version and is unable to be restored.");
return Err(OperationError::DB0002MismatchedRestoreVersion);
}; };
// Rebuild the RUV from the backup. // Rebuild the RUV from the backup.
@ -2587,31 +2615,12 @@ mod tests {
// Now here, we need to tamper with the file. // Now here, we need to tamper with the file.
let serialized_string = fs::read_to_string(&db_backup_file_name).unwrap(); let serialized_string = fs::read_to_string(&db_backup_file_name).unwrap();
trace!(?serialized_string);
let mut dbbak: DbBackup = serde_json::from_str(&serialized_string).unwrap(); let mut dbbak: DbBackup = serde_json::from_str(&serialized_string).unwrap();
match &mut dbbak { match &mut dbbak {
DbBackup::V1(_) => { DbBackup::V5 {
// We no longer use these format versions! version: _,
unreachable!()
}
DbBackup::V2 {
db_s_uuid: _,
db_d_uuid: _,
db_ts_max: _,
entries,
} => {
let _ = entries.pop();
}
DbBackup::V3 {
db_s_uuid: _,
db_d_uuid: _,
db_ts_max: _,
keyhandles: _,
entries,
} => {
let _ = entries.pop();
}
DbBackup::V4 {
db_s_uuid: _, db_s_uuid: _,
db_d_uuid: _, db_d_uuid: _,
db_ts_max: _, db_ts_max: _,
@ -2621,6 +2630,10 @@ mod tests {
} => { } => {
let _ = entries.pop(); let _ = entries.pop();
} }
_ => {
// We no longer use these format versions!
unreachable!()
}
}; };
let serialized_entries_str = serde_json::to_string_pretty(&dbbak).unwrap(); let serialized_entries_str = serde_json::to_string_pretty(&dbbak).unwrap();