From fe24056fdc745dfbe943ede94bad2fd3ead9aceb Mon Sep 17 00:00:00 2001 From: Firstyear Date: Wed, 15 Feb 2023 10:25:51 +1000 Subject: [PATCH] 20230130 hackweek replication (#1358) Add initial support for refreshing the content of a new server in a replication topology. This is embedded in test cases only for now. --- kanidm_proto/src/v1.rs | 2 + kanidmd/lib/benches/scaling_10k.rs | 8 +- kanidmd/lib/src/be/dbentry.rs | 24 +- kanidmd/lib/src/be/dbvalue.rs | 8 +- kanidmd/lib/src/be/idl_sqlite.rs | 33 +- kanidmd/lib/src/be/mod.rs | 88 ++- kanidmd/lib/src/constants/mod.rs | 10 +- kanidmd/lib/src/constants/uuids.rs | 1 + kanidmd/lib/src/constants/values.rs | 4 +- kanidmd/lib/src/credential/mod.rs | 200 ++++++- kanidmd/lib/src/credential/totp.rs | 35 ++ kanidmd/lib/src/entry.rs | 712 ++++++++++++----------- kanidmd/lib/src/idm/credupdatesession.rs | 4 +- kanidmd/lib/src/idm/ldap.rs | 16 +- kanidmd/lib/src/idm/mod.rs | 8 +- kanidmd/lib/src/idm/oauth2.rs | 32 +- kanidmd/lib/src/idm/scim.rs | 62 +- kanidmd/lib/src/idm/server.rs | 2 +- kanidmd/lib/src/lib.rs | 4 +- kanidmd/lib/src/macros.rs | 12 - kanidmd/lib/src/plugins/attrunique.rs | 28 +- kanidmd/lib/src/plugins/gidnumber.rs | 6 +- kanidmd/lib/src/plugins/memberof.rs | 57 +- kanidmd/lib/src/plugins/mod.rs | 39 ++ kanidmd/lib/src/plugins/refint.rs | 8 + kanidmd/lib/src/repl/cid.rs | 43 +- kanidmd/lib/src/repl/consumer.rs | 214 +++++++ kanidmd/lib/src/repl/entry-changelog.rs | 601 +++++++++++++++++++ kanidmd/lib/src/repl/entry.rs | 706 ++++++---------------- kanidmd/lib/src/repl/mod.rs | 4 + kanidmd/lib/src/repl/proto.rs | 497 ++++++++++++++++ kanidmd/lib/src/repl/ruv.rs | 46 +- kanidmd/lib/src/repl/supplier.rs | 101 ++++ kanidmd/lib/src/repl/tests.rs | 101 +++- kanidmd/lib/src/schema.rs | 102 +++- kanidmd/lib/src/server/access/mod.rs | 4 +- kanidmd/lib/src/server/create.rs | 6 +- kanidmd/lib/src/server/migrations.rs | 8 +- kanidmd/lib/src/server/mod.rs | 82 +-- kanidmd/lib/src/value.rs | 6 +- kanidmd/lib/src/valueset/address.rs | 65 +++ kanidmd/lib/src/valueset/binary.rs | 28 + kanidmd/lib/src/valueset/bool.rs | 12 + kanidmd/lib/src/valueset/cid.rs | 20 +- kanidmd/lib/src/valueset/cred.rs | 132 +++++ kanidmd/lib/src/valueset/datetime.rs | 26 + kanidmd/lib/src/valueset/iname.rs | 12 + kanidmd/lib/src/valueset/index.rs | 13 + kanidmd/lib/src/valueset/iutf8.rs | 12 + kanidmd/lib/src/valueset/json.rs | 29 +- kanidmd/lib/src/valueset/jws.rs | 76 ++- kanidmd/lib/src/valueset/mod.rs | 47 +- kanidmd/lib/src/valueset/nsuniqueid.rs | 12 + kanidmd/lib/src/valueset/oauth.rs | 39 +- kanidmd/lib/src/valueset/restricted.rs | 12 + kanidmd/lib/src/valueset/secret.rs | 12 + kanidmd/lib/src/valueset/session.rs | 199 +++++++ kanidmd/lib/src/valueset/spn.rs | 19 +- kanidmd/lib/src/valueset/ssh.rs | 19 + kanidmd/lib/src/valueset/syntax.rs | 13 + kanidmd/lib/src/valueset/totp.rs | 23 + kanidmd/lib/src/valueset/uihint.rs | 13 + kanidmd/lib/src/valueset/uint32.rs | 12 + kanidmd/lib/src/valueset/url.rs | 12 + kanidmd/lib/src/valueset/utf8.rs | 12 + kanidmd/lib/src/valueset/uuid.rs | 23 + 66 files changed, 3627 insertions(+), 1189 deletions(-) create mode 100644 kanidmd/lib/src/repl/consumer.rs create mode 100644 kanidmd/lib/src/repl/entry-changelog.rs create mode 100644 kanidmd/lib/src/repl/proto.rs create mode 100644 kanidmd/lib/src/repl/supplier.rs diff --git a/kanidm_proto/src/v1.rs b/kanidm_proto/src/v1.rs index c7c7b9393..301dfff8e 100644 --- a/kanidm_proto/src/v1.rs +++ b/kanidm_proto/src/v1.rs @@ -63,6 +63,7 @@ pub enum ConsistencyError { BackendAllIdsSync, BackendIndexSync, ChangelogDesynchronised(u64), + ChangeStateDesynchronised(u64), RuvInconsistent(String), } @@ -249,6 +250,7 @@ pub enum OperationError { ReplReplayFailure, ReplEntryNotChanged, ReplInvalidRUVState, + ReplDomainLevelUnsatisfiable, } impl PartialEq for OperationError { diff --git a/kanidmd/lib/benches/scaling_10k.rs b/kanidmd/lib/benches/scaling_10k.rs index 1ce0a5f71..fab9ab052 100644 --- a/kanidmd/lib/benches/scaling_10k.rs +++ b/kanidmd/lib/benches/scaling_10k.rs @@ -21,7 +21,7 @@ pub fn scaling_user_create_single(c: &mut Criterion) { group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { b.iter_custom(|iters| { let mut elapsed = Duration::from_secs(0); - println!("iters, size -> {:?}, {:?}", iters, size); + println!("iters, size -> {iters:?}, {size:?}"); for _i in 0..iters { let mut rt = tokio::runtime::Builder::new_current_thread(); @@ -37,7 +37,7 @@ pub fn scaling_user_create_single(c: &mut Criterion) { let start = Instant::now(); for counter in 0..size { let mut idms_prox_write = idms.proxy_write(ct).await; - let name = format!("testperson_{}", counter); + let name = format!("testperson_{counter}"); let e1 = entry_init!( ("class", Value::new_class("object")), ("class", Value::new_class("person")), @@ -74,12 +74,12 @@ pub fn scaling_user_create_batched(c: &mut Criterion) { group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| { b.iter_custom(|iters| { let mut elapsed = Duration::from_secs(0); - println!("iters, size -> {:?}, {:?}", iters, size); + println!("iters, size -> {iters:?}, {size:?}"); let data: Vec<_> = (0..size) .into_iter() .map(|i| { - let name = format!("testperson_{}", i); + let name = format!("testperson_{i}"); entry_init!( ("class", Value::new_class("object")), ("class", Value::new_class("person")), diff --git a/kanidmd/lib/src/be/dbentry.rs b/kanidmd/lib/src/be/dbentry.rs index 955b05621..f74853f05 100644 --- a/kanidmd/lib/src/be/dbentry.rs +++ b/kanidmd/lib/src/be/dbentry.rs @@ -437,9 +437,9 @@ impl std::fmt::Debug for DbEntry { DbEntryVers::V1(dbe_v1) => { write!(f, "v1 - {{ ")?; for (k, vs) in dbe_v1.attrs.iter() { - write!(f, "{} - [", k)?; + write!(f, "{k} - [")?; for v in vs { - write!(f, "{:?}, ", v)?; + write!(f, "{v:?}, ")?; } write!(f, "], ")?; } @@ -448,8 +448,8 @@ impl std::fmt::Debug for DbEntry { DbEntryVers::V2(dbe_v2) => { write!(f, "v2 - {{ ")?; for (k, vs) in dbe_v2.attrs.iter() { - write!(f, "{} - [", k)?; - write!(f, "{:?}, ", vs)?; + write!(f, "{k} - [")?; + write!(f, "{vs:?}, ")?; write!(f, "], ")?; } write!(f, "}}") @@ -466,24 +466,24 @@ impl std::fmt::Display for DbEntry { match dbe_v1.attrs.get("uuid") { Some(uuids) => { for uuid in uuids { - write!(f, "{:?}, ", uuid)?; + write!(f, "{uuid:?}, ")?; } } None => write!(f, "Uuid(INVALID), ")?, }; if let Some(names) = dbe_v1.attrs.get("name") { for name in names { - write!(f, "{:?}, ", name)?; + write!(f, "{name:?}, ")?; } } if let Some(names) = dbe_v1.attrs.get("attributename") { for name in names { - write!(f, "{:?}, ", name)?; + write!(f, "{name:?}, ")?; } } if let Some(names) = dbe_v1.attrs.get("classname") { for name in names { - write!(f, "{:?}, ", name)?; + write!(f, "{name:?}, ")?; } } write!(f, "}}") @@ -492,18 +492,18 @@ impl std::fmt::Display for DbEntry { write!(f, "v2 - {{ ")?; match dbe_v2.attrs.get("uuid") { Some(uuids) => { - write!(f, "{:?}, ", uuids)?; + write!(f, "{uuids:?}, ")?; } None => write!(f, "Uuid(INVALID), ")?, }; if let Some(names) = dbe_v2.attrs.get("name") { - write!(f, "{:?}, ", names)?; + write!(f, "{names:?}, ")?; } if let Some(names) = dbe_v2.attrs.get("attributename") { - write!(f, "{:?}, ", names)?; + write!(f, "{names:?}, ")?; } if let Some(names) = dbe_v2.attrs.get("classname") { - write!(f, "{:?}, ", names)?; + write!(f, "{names:?}, ")?; } write!(f, "}}") } diff --git a/kanidmd/lib/src/be/dbvalue.rs b/kanidmd/lib/src/be/dbvalue.rs index 04bc98164..6314b00d4 100644 --- a/kanidmd/lib/src/be/dbvalue.rs +++ b/kanidmd/lib/src/be/dbvalue.rs @@ -12,8 +12,6 @@ use webauthn_rs_core::proto::{COSEKey, UserVerificationPolicy}; #[derive(Serialize, Deserialize, Debug)] pub struct DbCidV1 { - #[serde(rename = "d")] - pub domain_id: Uuid, #[serde(rename = "s")] pub server_id: Uuid, #[serde(rename = "t")] @@ -276,8 +274,8 @@ impl fmt::Display for DbCred { DbCred::TmpWn { webauthn, uuid } => { write!(f, "TmpWn ( w {}, u {} )", webauthn.len(), uuid) } - DbCred::V2Password { password: _, uuid } => write!(f, "V2Pw ( u {} )", uuid), - DbCred::V2GenPassword { password: _, uuid } => write!(f, "V2GPw ( u {} )", uuid), + DbCred::V2Password { password: _, uuid } => write!(f, "V2Pw ( u {uuid} )"), + DbCred::V2GenPassword { password: _, uuid } => write!(f, "V2GPw ( u {uuid} )"), DbCred::V2PasswordMfa { password: _, totp, @@ -688,7 +686,7 @@ mod tests { let x = vec![dbcred]; let json = serde_json::to_string(&x).unwrap(); - eprintln!("{}", json); + eprintln!("{json}"); let _e_dbcred: Vec = serde_json::from_str(&json).unwrap(); diff --git a/kanidmd/lib/src/be/idl_sqlite.rs b/kanidmd/lib/src/be/idl_sqlite.rs index 447cdb49a..9c12b6426 100644 --- a/kanidmd/lib/src/be/idl_sqlite.rs +++ b/kanidmd/lib/src/be/idl_sqlite.rs @@ -379,7 +379,7 @@ pub trait IdlSqliteTransaction { .or_else(|e| serde_cbor::from_slice(d.as_slice()).map_err(|_| e)) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: Serde CBOR Error"); - eprintln!("CRITICAL: Serde CBOR Error -> {:?}", e); + eprintln!("CRITICAL: Serde CBOR Error -> {e:?}"); OperationError::SerdeCborError })?, ), @@ -410,7 +410,7 @@ pub trait IdlSqliteTransaction { .or_else(|e| serde_cbor::from_slice(d.as_slice()).map_err(|_| e)) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: Serde CBOR Error"); - eprintln!("CRITICAL: Serde CBOR Error -> {:?}", e); + eprintln!("CRITICAL: Serde CBOR Error -> {e:?}"); OperationError::SerdeCborError })?, ), @@ -442,7 +442,7 @@ pub trait IdlSqliteTransaction { .or_else(|_| serde_cbor::from_slice(d.as_slice())) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error"); - eprintln!("CRITICAL: Serde JSON Error -> {:?}", e); + eprintln!("CRITICAL: Serde JSON Error -> {e:?}"); OperationError::SerdeJsonError })?, ), @@ -500,7 +500,7 @@ pub trait IdlSqliteTransaction { .ok_or(OperationError::InvalidEntryId) .and_then(|data| { data.into_dbentry() - .map(|(id, db_e)| (id, format!("{:?}", db_e))) + .map(|(id, db_e)| (id, format!("{db_e:?}"))) }) } @@ -511,7 +511,7 @@ pub trait IdlSqliteTransaction { // TODO: Once we have slopes we can add .exists_table, and assert // it's an idx table. - let query = format!("SELECT key, idl FROM {}", index_name); + let query = format!("SELECT key, idl FROM {index_name}"); let mut stmt = self .get_conn() .prepare(query.as_str()) @@ -1117,7 +1117,7 @@ impl IdlSqliteWriteTransaction { .map(|_| ()) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error"); - eprintln!("CRITICAL: rusqlite error {:?}", e); + eprintln!("CRITICAL: rusqlite error {e:?}"); OperationError::SqliteError }) }) @@ -1165,7 +1165,7 @@ impl IdlSqliteWriteTransaction { pub fn write_db_s_uuid(&self, nsid: Uuid) -> Result<(), OperationError> { let data = serde_json::to_vec(&nsid).map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error"); - eprintln!("CRITICAL: Serde JSON Error -> {:?}", e); + eprintln!("CRITICAL: Serde JSON Error -> {e:?}"); OperationError::SerdeJsonError })?; @@ -1183,7 +1183,7 @@ impl IdlSqliteWriteTransaction { .map(|_| ()) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: ruslite error"); - eprintln!("CRITICAL: rusqlite error {:?}", e); + eprintln!("CRITICAL: rusqlite error {e:?}"); OperationError::SqliteError }) } @@ -1191,7 +1191,7 @@ impl IdlSqliteWriteTransaction { pub fn write_db_d_uuid(&self, nsid: Uuid) -> Result<(), OperationError> { let data = serde_json::to_vec(&nsid).map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error"); - eprintln!("CRITICAL: Serde JSON Error -> {:?}", e); + eprintln!("CRITICAL: Serde JSON Error -> {e:?}"); OperationError::SerdeJsonError })?; @@ -1209,7 +1209,7 @@ impl IdlSqliteWriteTransaction { .map(|_| ()) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error"); - eprintln!("CRITICAL: rusqlite error {:?}", e); + eprintln!("CRITICAL: rusqlite error {e:?}"); OperationError::SqliteError }) } @@ -1217,7 +1217,7 @@ impl IdlSqliteWriteTransaction { pub fn set_db_ts_max(&self, ts: Duration) -> Result<(), OperationError> { let data = serde_json::to_vec(&ts).map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error"); - eprintln!("CRITICAL: Serde JSON Error -> {:?}", e); + eprintln!("CRITICAL: Serde JSON Error -> {e:?}"); OperationError::SerdeJsonError })?; @@ -1235,7 +1235,7 @@ impl IdlSqliteWriteTransaction { .map(|_| ()) .map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error"); - eprintln!("CRITICAL: rusqlite error {:?}", e); + eprintln!("CRITICAL: rusqlite error {e:?}"); OperationError::SqliteError }) } @@ -1280,7 +1280,7 @@ impl IdlSqliteWriteTransaction { pub(crate) fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> { self.set_db_version_key(DBV_INDEXV, v).map_err(|e| { admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error"); - eprintln!("CRITICAL: rusqlite error {:?}", e); + eprintln!("CRITICAL: rusqlite error {e:?}"); OperationError::SqliteError }) } @@ -1515,11 +1515,10 @@ impl IdlSqlite { .with_init(move |c| { c.execute_batch( format!( - "PRAGMA page_size={}; + "PRAGMA page_size={fs_page_size}; PRAGMA journal_mode=WAL; - PRAGMA wal_autocheckpoint={}; - PRAGMA wal_checkpoint(RESTART);", - fs_page_size, checkpoint_pages + PRAGMA wal_autocheckpoint={checkpoint_pages}; + PRAGMA wal_checkpoint(RESTART);" ) .as_str(), ) diff --git a/kanidmd/lib/src/be/mod.rs b/kanidmd/lib/src/be/mod.rs index 7acb14950..546efda12 100644 --- a/kanidmd/lib/src/be/mod.rs +++ b/kanidmd/lib/src/be/mod.rs @@ -965,7 +965,7 @@ impl<'a> BackendWriteTransaction<'a> { // Check that every entry has a change associated // that matches the cid? entries.iter().try_for_each(|e| { - if e.get_changelog().contains_tail_cid(cid) { + if e.get_changestate().contains_tail_cid(cid) { Ok(()) } else { admin_error!( @@ -1004,6 +1004,42 @@ impl<'a> BackendWriteTransaction<'a> { Ok(c_entries) } + #[instrument(level = "debug", name = "be::create", skip_all)] + /// This is similar to create, but used in the replication path as it skips the + /// modification of the RUV and the checking of CIDs since these actions are not + /// required during a replication refresh (else we'd create an infinite replication + /// loop.) + pub fn refresh( + &mut self, + entries: Vec>, + ) -> Result>, OperationError> { + if entries.is_empty() { + admin_error!("No entries provided to BE to create, invalid server call!"); + return Err(OperationError::EmptyRequest); + } + + // Assign id's to all the new entries. + let mut id_max = self.idlayer.get_id2entry_max_id()?; + let c_entries: Vec<_> = entries + .into_iter() + .map(|e| { + id_max += 1; + e.into_sealed_committed_id(id_max) + }) + .collect(); + + self.idlayer.write_identries(c_entries.iter())?; + + self.idlayer.set_id2entry_max_id(id_max); + + // Now update the indexes as required. + for e in c_entries.iter() { + self.entry_index(None, Some(e))? + } + + Ok(c_entries) + } + #[instrument(level = "debug", name = "be::modify", skip_all)] pub fn modify( &mut self, @@ -1019,7 +1055,7 @@ impl<'a> BackendWriteTransaction<'a> { assert!(post_entries.len() == pre_entries.len()); post_entries.iter().try_for_each(|e| { - if e.get_changelog().contains_tail_cid(cid) { + if e.get_changestate().contains_tail_cid(cid) { Ok(()) } else { admin_error!( @@ -1070,28 +1106,28 @@ impl<'a> BackendWriteTransaction<'a> { // Now that we have a list of entries we need to partition them into // two sets. The entries that are tombstoned and ready to reap_tombstones, and // the entries that need to have their change logs trimmed. + // + // Remember, these tombstones can be reaped because they were tombstoned at time + // point 'cid', and since we are now "past" that minimum cid, then other servers + // will also be trimming these out. + // + // Note unlike a changelog impl, we don't need to trim changestates here. We + // only need the RUV trimmed so that we know if other servers are laggin behind! - // First we trim changelogs. Go through each entry, and trim the CL, and write it back. - let mut entries: Vec<_> = entries.iter().map(|er| er.as_ref().clone()).collect(); - - entries - .iter_mut() - .try_for_each(|e| e.get_changelog_mut().trim_up_to(cid))?; - - // Write down the cl trims - self.get_idlayer().write_identries(entries.iter())?; + // What entries are tombstones and ready to be deleted? let (tombstones, leftover): (Vec<_>, Vec<_>) = entries .into_iter() - .partition(|e| e.get_changelog().can_delete()); + .partition(|e| e.get_changestate().can_delete(cid)); + + let ruv_idls = self.get_ruv().ruv_idls(); // Assert that anything leftover still either is *alive* OR is a tombstone // and has entries in the RUV! - let ruv_idls = self.get_ruv().ruv_idls(); if !leftover .iter() - .all(|e| e.get_changelog().is_live() || ruv_idls.contains(e.get_id())) + .all(|e| e.get_changestate().is_live() || ruv_idls.contains(e.get_id())) { admin_error!("Left over entries may be orphaned due to missing RUV entries"); return Err(OperationError::ReplInvalidRUVState); @@ -1114,7 +1150,8 @@ impl<'a> BackendWriteTransaction<'a> { let sz = id_list.len(); self.get_idlayer().delete_identry(id_list.into_iter())?; - // Finally, purge the indexes from the entries we removed. + // Finally, purge the indexes from the entries we removed. These still have + // indexes due to class=tombstone. tombstones .iter() .try_for_each(|e| self.entry_index(Some(e), None))?; @@ -1442,11 +1479,18 @@ impl<'a> BackendWriteTransaction<'a> { Ok(()) } - #[cfg(test)] - pub fn purge_idxs(&mut self) -> Result<(), OperationError> { + fn purge_idxs(&mut self) -> Result<(), OperationError> { unsafe { self.get_idlayer().purge_idxs() } } + pub(crate) fn danger_delete_all_db_content(&mut self) -> Result<(), OperationError> { + unsafe { + self.get_idlayer() + .purge_id2entry() + .and_then(|_| self.purge_idxs()) + } + } + #[cfg(test)] pub fn load_test_idl( &mut self, @@ -1604,6 +1648,12 @@ impl<'a> BackendWriteTransaction<'a> { Ok(nsid) } + /// Manually set a new domain UUID and store it into the DB. This is used + /// as part of a replication refresh. + pub fn set_db_d_uuid(&mut self, nsid: Uuid) -> Result<(), OperationError> { + self.get_idlayer().write_db_d_uuid(nsid) + } + /// This pulls the domain UUID from the database pub fn get_db_d_uuid(&mut self) -> Uuid { #[allow(clippy::expect_used)] @@ -2112,7 +2162,7 @@ mod tests { "{}/.backup_test.json", option_env!("OUT_DIR").unwrap_or("/tmp") ); - eprintln!(" ⚠️ {}", db_backup_file_name); + eprintln!(" ⚠️ {db_backup_file_name}"); run_test!(|be: &mut BackendWriteTransaction| { // Important! Need db metadata setup! be.reset_db_s_uuid().unwrap(); @@ -2168,7 +2218,7 @@ mod tests { "{}/.backup2_test.json", option_env!("OUT_DIR").unwrap_or("/tmp") ); - eprintln!(" ⚠️ {}", db_backup_file_name); + eprintln!(" ⚠️ {db_backup_file_name}"); run_test!(|be: &mut BackendWriteTransaction| { // Important! Need db metadata setup! be.reset_db_s_uuid().unwrap(); diff --git a/kanidmd/lib/src/constants/mod.rs b/kanidmd/lib/src/constants/mod.rs index 48335e6f0..4e96fe588 100644 --- a/kanidmd/lib/src/constants/mod.rs +++ b/kanidmd/lib/src/constants/mod.rs @@ -39,13 +39,15 @@ pub const SYSTEM_INDEX_VERSION: i64 = 28; * who don't muck with the levels, but it means that we can do mixed version * upgrades. */ -pub const DOMAIN_LEVEL_1: u32 = 1; +pub type DomainVersion = u32; + +pub const DOMAIN_LEVEL_1: DomainVersion = 1; // The minimum supported domain functional level -pub const DOMAIN_MIN_LEVEL: u32 = DOMAIN_LEVEL_1; +pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_LEVEL_1; // The target supported domain functional level -pub const DOMAIN_TGT_LEVEL: u32 = DOMAIN_LEVEL_1; +pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_1; // The maximum supported domain functional level -pub const DOMAIN_MAX_LEVEL: u32 = DOMAIN_LEVEL_1; +pub const DOMAIN_MAX_LEVEL: DomainVersion = DOMAIN_LEVEL_1; // On test builds, define to 60 seconds #[cfg(test)] diff --git a/kanidmd/lib/src/constants/uuids.rs b/kanidmd/lib/src/constants/uuids.rs index ffbd8faed..9249c23a8 100644 --- a/kanidmd/lib/src/constants/uuids.rs +++ b/kanidmd/lib/src/constants/uuids.rs @@ -223,6 +223,7 @@ pub const UUID_SCHEMA_ATTR_SYNC_ALLOWED: Uuid = uuid!("00000000-0000-0000-0000-f pub const UUID_SCHEMA_ATTR_EMAILPRIMARY: Uuid = uuid!("00000000-0000-0000-0000-ffff00000126"); pub const UUID_SCHEMA_ATTR_EMAILALTERNATIVE: Uuid = uuid!("00000000-0000-0000-0000-ffff00000127"); pub const UUID_SCHEMA_ATTR_TOTP_IMPORT: Uuid = uuid!("00000000-0000-0000-0000-ffff00000128"); +pub const UUID_SCHEMA_ATTR_REPLICATED: Uuid = uuid!("00000000-0000-0000-0000-ffff00000129"); // System and domain infos // I'd like to strongly criticise william of the past for making poor choices about these allocations. diff --git a/kanidmd/lib/src/constants/values.rs b/kanidmd/lib/src/constants/values.rs index c94c59157..afc1a3e12 100644 --- a/kanidmd/lib/src/constants/values.rs +++ b/kanidmd/lib/src/constants/values.rs @@ -1,4 +1,4 @@ -use super::uuids::UUID_DOMAIN_INFO; +use super::uuids::{UUID_DOMAIN_INFO, UUID_SYSTEM_CONFIG, UUID_SYSTEM_INFO}; use crate::value::{PartialValue, Value}; use url::Url; @@ -37,6 +37,8 @@ lazy_static! { pub static ref PVCLASS_SYSTEM_CONFIG: PartialValue = PartialValue::new_class("system_config"); pub static ref PVCLASS_TOMBSTONE: PartialValue = PartialValue::new_class("tombstone"); pub static ref PVUUID_DOMAIN_INFO: PartialValue = PartialValue::Uuid(UUID_DOMAIN_INFO); + pub static ref PVUUID_SYSTEM_CONFIG: PartialValue = PartialValue::Uuid(UUID_SYSTEM_CONFIG); + pub static ref PVUUID_SYSTEM_INFO: PartialValue = PartialValue::Uuid(UUID_SYSTEM_INFO); pub static ref CLASS_ACCESS_CONTROL_PROFILE: Value = Value::new_class("access_control_profile"); pub static ref CLASS_ACCESS_CONTROL_SEARCH: Value = Value::new_class("access_control_search"); pub static ref CLASS_ACCOUNT: Value = Value::new_class("account"); diff --git a/kanidmd/lib/src/credential/mod.rs b/kanidmd/lib/src/credential/mod.rs index 7bce8410b..bd7b4a5a1 100644 --- a/kanidmd/lib/src/credential/mod.rs +++ b/kanidmd/lib/src/credential/mod.rs @@ -13,6 +13,9 @@ use webauthn_rs::prelude::{AuthenticationResult, Passkey, SecurityKey}; use webauthn_rs_core::proto::{Credential as WebauthnCredential, CredentialV3}; use crate::be::dbvalue::{DbBackupCodeV1, DbCred, DbPasswordV1}; +use crate::repl::proto::{ + ReplBackupCodeV1, ReplCredV1, ReplPasskeyV4V1, ReplPasswordV1, ReplSecurityKeyV4V1, +}; pub mod policy; pub mod softlock; @@ -101,6 +104,30 @@ impl TryFrom for Password { } } +impl TryFrom<&ReplPasswordV1> for Password { + type Error = (); + + fn try_from(value: &ReplPasswordV1) -> Result { + match value { + ReplPasswordV1::PBKDF2 { cost, salt, hash } => Ok(Password { + material: Kdf::PBKDF2(*cost, salt.0.clone(), hash.0.clone()), + }), + ReplPasswordV1::PBKDF2_SHA1 { cost, salt, hash } => Ok(Password { + material: Kdf::PBKDF2_SHA1(*cost, salt.0.clone(), hash.0.clone()), + }), + ReplPasswordV1::PBKDF2_SHA512 { cost, salt, hash } => Ok(Password { + material: Kdf::PBKDF2_SHA512(*cost, salt.0.clone(), hash.0.clone()), + }), + ReplPasswordV1::SSHA512 { salt, hash } => Ok(Password { + material: Kdf::SSHA512(salt.0.clone(), hash.0.clone()), + }), + ReplPasswordV1::NT_MD4 { hash } => Ok(Password { + material: Kdf::NT_MD4(hash.0.clone()), + }), + } + } +} + // OpenLDAP based their PBKDF2 implementation on passlib from python, that uses a // non-standard base64 altchar set and padding that is not supported by // anything else in the world. To manage this, we only ever encode to base64 with @@ -420,6 +447,33 @@ impl Password { } } + pub fn to_repl_v1(&self) -> ReplPasswordV1 { + match &self.material { + Kdf::PBKDF2(cost, salt, hash) => ReplPasswordV1::PBKDF2 { + cost: *cost, + salt: salt.clone().into(), + hash: hash.clone().into(), + }, + Kdf::PBKDF2_SHA1(cost, salt, hash) => ReplPasswordV1::PBKDF2_SHA1 { + cost: *cost, + salt: salt.clone().into(), + hash: hash.clone().into(), + }, + Kdf::PBKDF2_SHA512(cost, salt, hash) => ReplPasswordV1::PBKDF2_SHA512 { + cost: *cost, + salt: salt.clone().into(), + hash: hash.clone().into(), + }, + Kdf::SSHA512(salt, hash) => ReplPasswordV1::SSHA512 { + salt: salt.clone().into(), + hash: hash.clone().into(), + }, + Kdf::NT_MD4(hash) => ReplPasswordV1::NT_MD4 { + hash: hash.clone().into(), + }, + } + } + pub fn requires_upgrade(&self) -> bool { match &self.material { Kdf::PBKDF2_SHA512(cost, salt, hash) | Kdf::PBKDF2(cost, salt, hash) => { @@ -447,6 +501,16 @@ impl TryFrom for BackupCodes { } } +impl TryFrom<&ReplBackupCodeV1> for BackupCodes { + type Error = (); + + fn try_from(value: &ReplBackupCodeV1) -> Result { + Ok(BackupCodes { + code_set: value.codes.iter().cloned().collect(), + }) + } +} + impl BackupCodes { pub fn new(code_set: HashSet) -> Self { BackupCodes { code_set } @@ -465,6 +529,12 @@ impl BackupCodes { code_set: self.code_set.clone(), } } + + pub fn to_repl_v1(&self) -> ReplBackupCodeV1 { + ReplBackupCodeV1 { + codes: self.code_set.iter().cloned().collect(), + } + } } #[derive(Clone, Debug, PartialEq)] @@ -753,6 +823,85 @@ impl TryFrom for Credential { } impl Credential { + pub fn try_from_repl_v1(rc: &ReplCredV1) -> Result<(String, Self), ()> { + match rc { + ReplCredV1::TmpWn { tag, set } => { + let m_uuid: Option = set.get(0).map(|v| v.uuid); + + let v_webauthn = set + .iter() + .map(|passkey| (passkey.tag.clone(), passkey.key.clone())) + .collect(); + let type_ = CredentialType::Webauthn(v_webauthn); + + match (m_uuid, type_.is_valid()) { + (Some(uuid), true) => Ok((tag.clone(), Credential { type_, uuid })), + _ => Err(()), + } + } + ReplCredV1::Password { + tag, + password, + uuid, + } => { + let v_password = Password::try_from(password)?; + let type_ = CredentialType::Password(v_password); + if type_.is_valid() { + Ok((tag.clone(), Credential { type_, uuid: *uuid })) + } else { + Err(()) + } + } + ReplCredV1::GenPassword { + tag, + password, + uuid, + } => { + let v_password = Password::try_from(password)?; + let type_ = CredentialType::GeneratedPassword(v_password); + if type_.is_valid() { + Ok((tag.clone(), Credential { type_, uuid: *uuid })) + } else { + Err(()) + } + } + ReplCredV1::PasswordMfa { + tag, + password, + totp, + backup_code, + webauthn, + uuid, + } => { + let v_password = Password::try_from(password)?; + + let v_totp = totp + .iter() + .map(|(l, dbt)| Totp::try_from(dbt).map(|t| (l.clone(), t))) + .collect::, _>>()?; + + let v_backup_code = match backup_code { + Some(rbc) => Some(BackupCodes::try_from(rbc)?), + None => None, + }; + + let v_webauthn = webauthn + .iter() + .map(|sk| (sk.tag.clone(), sk.key.clone())) + .collect(); + + let type_ = + CredentialType::PasswordMfa(v_password, v_totp, v_webauthn, v_backup_code); + + if type_.is_valid() { + Ok((tag.clone(), Credential { type_, uuid: *uuid })) + } else { + Err(()) + } + } + } + } + /// Create a new credential that contains a CredentialType::Password pub fn new_password_only( policy: &CryptoPolicy, @@ -807,8 +956,7 @@ impl Credential { let mut nmap = map.clone(); if nmap.insert(label.clone(), cred).is_some() { return Err(OperationError::InvalidAttribute(format!( - "Webauthn label '{:?}' already exists", - label + "Webauthn label '{label:?}' already exists" ))); } CredentialType::PasswordMfa(pw.clone(), totp.clone(), nmap, backup_code.clone()) @@ -838,8 +986,7 @@ impl Credential { let mut nmap = map.clone(); if nmap.remove(label).is_none() { return Err(OperationError::InvalidAttribute(format!( - "Removing Webauthn token with label '{:?}': does not exist", - label + "Removing Webauthn token with label '{label:?}': does not exist" ))); } if nmap.is_empty() { @@ -973,6 +1120,51 @@ impl Credential { } } + /// Extract this credential into it's Serialisable Replication form + pub fn to_repl_v1(&self, tag: String) -> ReplCredV1 { + let uuid = self.uuid; + match &self.type_ { + CredentialType::Password(pw) => ReplCredV1::Password { + tag, + password: pw.to_repl_v1(), + uuid, + }, + CredentialType::GeneratedPassword(pw) => ReplCredV1::GenPassword { + tag, + password: pw.to_repl_v1(), + uuid, + }, + CredentialType::PasswordMfa(pw, totp, map, backup_code) => ReplCredV1::PasswordMfa { + tag, + password: pw.to_repl_v1(), + totp: totp + .iter() + .map(|(l, t)| (l.clone(), t.to_repl_v1())) + .collect(), + backup_code: backup_code.as_ref().map(|b| b.to_repl_v1()), + webauthn: map + .iter() + .map(|(k, v)| ReplSecurityKeyV4V1 { + tag: k.clone(), + key: v.clone(), + }) + .collect(), + uuid, + }, + CredentialType::Webauthn(map) => ReplCredV1::TmpWn { + tag, + set: map + .iter() + .map(|(k, v)| ReplPasskeyV4V1 { + uuid, + tag: k.clone(), + key: v.clone(), + }) + .collect(), + }, + } + } + pub(crate) fn update_password(&self, pw: Password) -> Self { let type_ = match &self.type_ { CredentialType::Password(_) | CredentialType::GeneratedPassword(_) => { diff --git a/kanidmd/lib/src/credential/totp.rs b/kanidmd/lib/src/credential/totp.rs index c7da0dad8..816577b71 100644 --- a/kanidmd/lib/src/credential/totp.rs +++ b/kanidmd/lib/src/credential/totp.rs @@ -8,6 +8,7 @@ use openssl::sign::Signer; use rand::prelude::*; use crate::be::dbvalue::{DbTotpAlgoV1, DbTotpV1}; +use crate::repl::proto::{ReplTotpAlgoV1, ReplTotpV1}; // This is 64 bits of entropy, as the examples in https://tools.ietf.org/html/rfc6238 show. const SECRET_SIZE_BYTES: usize = 8; @@ -115,6 +116,27 @@ impl TryFrom for Totp { } } +impl TryFrom<&ReplTotpV1> for Totp { + type Error = (); + + fn try_from(value: &ReplTotpV1) -> Result { + let algo = match value.algo { + ReplTotpAlgoV1::S1 => TotpAlgo::Sha1, + ReplTotpAlgoV1::S256 => TotpAlgo::Sha256, + ReplTotpAlgoV1::S512 => TotpAlgo::Sha512, + }; + + let digits = TotpDigits::try_from(value.digits)?; + + Ok(Totp { + secret: value.key.0.clone(), + step: value.step, + algo, + digits, + }) + } +} + impl TryFrom for Totp { type Error = (); @@ -170,6 +192,19 @@ impl Totp { } } + pub(crate) fn to_repl_v1(&self) -> ReplTotpV1 { + ReplTotpV1 { + key: self.secret.clone().into(), + step: self.step, + algo: match self.algo { + TotpAlgo::Sha1 => ReplTotpAlgoV1::S1, + TotpAlgo::Sha256 => ReplTotpAlgoV1::S256, + TotpAlgo::Sha512 => ReplTotpAlgoV1::S512, + }, + digits: self.digits.into(), + } + } + fn digest(&self, counter: u64) -> Result { let hmac = self.algo.digest(&self.secret, counter)?; // Now take the hmac and encode it as hotp expects. diff --git a/kanidmd/lib/src/entry.rs b/kanidmd/lib/src/entry.rs index 0a6975bcb..2fd92e074 100644 --- a/kanidmd/lib/src/entry.rs +++ b/kanidmd/lib/src/entry.rs @@ -51,45 +51,20 @@ use crate::idm::ldap::ldap_vattr_map; use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid}; use crate::prelude::*; use crate::repl::cid::Cid; -use crate::repl::entry::EntryChangelog; +use crate::repl::proto::ReplEntryV1; + +// use crate::repl::entry::EntryChangelog; +use crate::repl::entry::EntryChangeState; + use crate::schema::{SchemaAttribute, SchemaClass, SchemaTransaction}; use crate::value::{ IndexType, IntentTokenState, Oauth2Session, PartialValue, Session, SyntaxType, Value, }; use crate::valueset::{self, ValueSet}; -// use std::convert::TryFrom; -// use std::str::FromStr; - -// make a trait entry for everything to adhere to? -// * How to get indexes out? -// * How to track pending diffs? - -// Entry is really similar to serde Value, but limits the possibility -// of what certain types could be. -// -// The idea of an entry is that we have -// an entry that looks like: -// -// { -// 'class': ['object', ...], -// 'attr': ['value', ...], -// 'attr': ['value', ...], -// ... -// } -// -// When we send this as a result to clients, we could embed other objects as: -// -// { -// 'attr': [ -// 'value': { -// }, -// ], -// } -// - pub type EntryInitNew = Entry; pub type EntryInvalidNew = Entry; +pub type EntryRefreshNew = Entry; pub type EntrySealedNew = Entry; pub type EntrySealedCommitted = Entry; pub type EntryInvalidCommitted = Entry; @@ -129,7 +104,14 @@ pub struct EntryInit; #[derive(Clone, Debug)] pub struct EntryInvalid { cid: Cid, - eclog: EntryChangelog, + // eclog: EntryChangelog, + ecstate: EntryChangeState, +} + +// Alternate path - this entry came from a full refresh, and already has an entry change state. +#[derive(Clone, Debug)] +pub struct EntryRefresh { + ecstate: EntryChangeState, } /* | @@ -141,8 +123,8 @@ pub struct EntryInvalid { pub struct EntryValid { // Asserted with schema, so we know it has a UUID now ... uuid: Uuid, - cid: Cid, - eclog: EntryChangelog, + // eclog: EntryChangelog, + ecstate: EntryChangeState, } /* | @@ -154,7 +136,8 @@ pub struct EntryValid { #[derive(Clone, Debug)] pub struct EntrySealed { uuid: Uuid, - eclog: EntryChangelog, + // eclog: EntryChangelog, + ecstate: EntryChangeState, } /* | @@ -509,10 +492,11 @@ impl Entry { * This is because we need to capture the set_last_changed attribute in * the create transition. */ - let eclog = EntryChangelog::new(cid.clone(), self.attrs.clone(), schema); + // let eclog = EntryChangelog::new(cid.clone(), self.attrs.clone(), schema); + let ecstate = EntryChangeState::new(&cid, &self.attrs, schema); Entry { - valid: EntryInvalid { cid, eclog }, + valid: EntryInvalid { cid, ecstate }, state: EntryNew, attrs: self.attrs, } @@ -528,10 +512,11 @@ impl Entry { let cid = Cid::new_zero(); self.set_last_changed(cid.clone()); - let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone()); + // let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone()); + let ecstate = EntryChangeState::new_without_schema(&cid, &self.attrs); Entry { - valid: EntryInvalid { cid, eclog }, + valid: EntryInvalid { cid, ecstate }, state: EntryNew, attrs: self.attrs, } @@ -541,12 +526,12 @@ impl Entry { pub unsafe fn into_valid_new(mut self) -> Entry { let cid = Cid::new_zero(); self.set_last_changed(cid.clone()); - let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone()); + // let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone()); + let ecstate = EntryChangeState::new_without_schema(&cid, &self.attrs); Entry { valid: EntryValid { - cid, - eclog, + ecstate, uuid: self.get_uuid().expect("Invalid uuid"), }, state: EntryNew, @@ -558,10 +543,11 @@ impl Entry { pub unsafe fn into_sealed_committed(mut self) -> Entry { let cid = Cid::new_zero(); self.set_last_changed(cid.clone()); - let eclog = EntryChangelog::new_without_schema(cid, self.attrs.clone()); + // let eclog = EntryChangelog::new_without_schema(cid, self.attrs.clone()); + let ecstate = EntryChangeState::new_without_schema(&cid, &self.attrs); let uuid = self.get_uuid().unwrap_or_else(Uuid::new_v4); Entry { - valid: EntrySealed { uuid, eclog }, + valid: EntrySealed { uuid, ecstate }, state: EntryCommitted { id: 0 }, attrs: self.attrs, } @@ -571,12 +557,13 @@ impl Entry { pub unsafe fn into_sealed_new(mut self) -> Entry { let cid = Cid::new_zero(); self.set_last_changed(cid.clone()); - let eclog = EntryChangelog::new_without_schema(cid, self.attrs.clone()); + // let eclog = EntryChangelog::new_without_schema(cid, self.attrs.clone()); + let ecstate = EntryChangeState::new_without_schema(&cid, &self.attrs); Entry { valid: EntrySealed { uuid: self.get_uuid().expect("Invalid uuid"), - eclog, + ecstate, }, state: EntryNew, attrs: self.attrs, @@ -605,21 +592,24 @@ impl Entry { } } -impl Entry { - // This is only used in tests today, but I don't want to cfg test it. - pub(crate) fn get_uuid(&self) -> Option { - self.attrs.get("uuid").and_then(|vs| vs.to_uuid_single()) - } +impl Entry { + pub fn from_repl_entry_v1(repl_entry: &ReplEntryV1) -> Result { + // From the entry, we have to rebuild the ecstate and the attrs. + let (ecstate, attrs) = repl_entry.rehydrate()?; - /// Validate that this entry and its attribute-value sets are conformant to the system's' - /// schema and the relevant syntaxes. + Ok(Entry { + valid: EntryRefresh { ecstate }, + state: EntryNew, + attrs, + }) + } +} + +impl Entry { pub fn validate( self, schema: &dyn SchemaTransaction, ) -> Result, SchemaError> { - let schema_classes = schema.get_classes(); - let schema_attributes = schema.get_attributes(); - let uuid: Uuid = self .attrs .get("uuid") @@ -633,235 +623,48 @@ impl Entry { let ne = Entry { valid: EntryValid { uuid, - cid: self.valid.cid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: self.state, attrs: self.attrs, }; - // Now validate it! - trace!(?ne.attrs, "Entry::validate -> target"); - // We scope here to limit the time of borrow of ne. - { - // First, check we have class on the object .... - if !ne.attribute_pres("class") { - // lrequest_error!("Missing attribute class"); - return Err(SchemaError::NoClassFound); - } + ne.validate(schema) + } +} - // Do we have extensible? - let extensible = ne.attribute_equality("class", &PVCLASS_EXTENSIBLE); +impl Entry { + // This is only used in tests today, but I don't want to cfg test it. + pub(crate) fn get_uuid(&self) -> Option { + self.attrs.get("uuid").and_then(|vs| vs.to_uuid_single()) + } - let entry_classes = ne.get_ava_set("class").ok_or_else(|| { - admin_debug!("Attribute 'class' missing from entry"); - SchemaError::NoClassFound + /// Validate that this entry and its attribute-value sets are conformant to the system's' + /// schema and the relevant syntaxes. + pub fn validate( + self, + schema: &dyn SchemaTransaction, + ) -> Result, SchemaError> { + let uuid: Uuid = self + .attrs + .get("uuid") + .ok_or_else(|| SchemaError::MissingMustAttribute(vec!["uuid".to_string()])) + .and_then(|vs| { + vs.to_uuid_single() + .ok_or_else(|| SchemaError::MissingMustAttribute(vec!["uuid".to_string()])) })?; - let mut invalid_classes = Vec::with_capacity(0); - let mut classes: Vec<&SchemaClass> = Vec::with_capacity(entry_classes.len()); + // Build the new valid entry ... + let ne = Entry { + valid: EntryValid { + uuid, + ecstate: self.valid.ecstate, + }, + state: self.state, + attrs: self.attrs, + }; - // We need to keep the btreeset of entry classes here so we can check the - // requires and excludes. - let entry_classes = if let Some(ec) = entry_classes.as_iutf8_set() { - ec.iter() - .for_each(|s| match schema_classes.get(s.as_str()) { - Some(x) => classes.push(x), - None => { - admin_debug!("invalid class: {:?}", s); - invalid_classes.push(s.to_string()) - } - }); - ec - } else { - admin_debug!("corrupt class attribute"); - return Err(SchemaError::NoClassFound); - }; - - if !invalid_classes.is_empty() { - return Err(SchemaError::InvalidClass(invalid_classes)); - }; - - // Now determine the set of excludes and requires we have, and then - // assert we don't violate them. - - let supplements_classes: Vec<_> = classes - .iter() - .flat_map(|cls| cls.systemsupplements.iter().chain(cls.supplements.iter())) - .collect(); - - // So long as one supplement is present we can continue. - let valid_supplements = if supplements_classes.is_empty() { - // No need to check. - true - } else { - supplements_classes - .iter() - .any(|class| entry_classes.contains(class.as_str())) - }; - - if !valid_supplements { - admin_warn!( - "Validation error, the following possible supplement classes are missing - {:?}", - supplements_classes - ); - let supplements_classes = - supplements_classes.iter().map(|s| s.to_string()).collect(); - return Err(SchemaError::SupplementsNotSatisfied(supplements_classes)); - } - - let excludes_classes: Vec<_> = classes - .iter() - .flat_map(|cls| cls.systemexcludes.iter().chain(cls.excludes.iter())) - .collect(); - - let mut invalid_excludes = Vec::with_capacity(0); - - excludes_classes.iter().for_each(|class| { - if entry_classes.contains(class.as_str()) { - invalid_excludes.push(class.to_string()) - } - }); - - if !invalid_excludes.is_empty() { - admin_warn!( - "Validation error, the following excluded classes are present - {:?}", - invalid_excludes - ); - return Err(SchemaError::ExcludesNotSatisfied(invalid_excludes)); - } - - // What this is really doing is taking a set of classes, and building an - // "overall" class that describes this exact object for checking. IE we - // build a super must/may set from the small class must/may sets. - - // for each class - // add systemmust/must and systemmay/may to their lists - // add anything from must also into may - - // Now from the set of valid classes make a list of must/may - // - // NOTE: We still need this on extensible, because we still need to satisfy - // our other must conditions as well! - let must: Result, _> = classes - .iter() - // Join our class systemmmust + must into one iter - .flat_map(|cls| cls.systemmust.iter().chain(cls.must.iter())) - .map(|s| { - // This should NOT fail - if it does, it means our schema is - // in an invalid state! - schema_attributes.get(s).ok_or(SchemaError::Corrupted) - }) - .collect(); - - let must = must?; - - // Check that all must are inplace - // for each attr in must, check it's present on our ent - let mut missing_must = Vec::with_capacity(0); - must.iter().for_each(|attr| { - let avas = ne.get_ava_set(&attr.name); - if avas.is_none() { - missing_must.push(attr.name.to_string()); - } - }); - - if !missing_must.is_empty() { - admin_warn!( - "Validation error, the following required (must) attributes are missing - {:?}", - missing_must - ); - return Err(SchemaError::MissingMustAttribute(missing_must)); - } - - if extensible { - ne.attrs.iter().try_for_each(|(attr_name, avas)| { - match schema_attributes.get(attr_name) { - Some(a_schema) => { - // Now, for each type we do a *full* check of the syntax - // and validity of the ava. - if a_schema.phantom { - admin_warn!( - "Rejecting attempt to add phantom attribute to extensible object: {}", - attr_name - ); - Err(SchemaError::PhantomAttribute(attr_name.to_string())) - } else { - a_schema.validate_ava(attr_name.as_str(), avas) - // .map_err(|e| lrequest_error!("Failed to validate: {}", attr_name);) - } - } - None => { - admin_error!( - "Invalid Attribute {}, undefined in schema_attributes", - attr_name.to_string() - ); - Err(SchemaError::InvalidAttribute( - attr_name.to_string() - )) - } - } - })?; - } else { - // Note - we do NOT need to check phantom attributes here because they are - // not allowed to exist in the class, which means a phantom attribute can't - // be in the may/must set, and would FAIL our normal checks anyway. - - // The set of "may" is a combination of may and must, since we have already - // asserted that all must requirements are fulfilled. This allows us to - // perform extended attribute checking in a single pass. - let may: Result, _> = classes - .iter() - // Join our class systemmmust + must + systemmay + may into one. - .flat_map(|cls| { - cls.systemmust - .iter() - .chain(cls.must.iter()) - .chain(cls.systemmay.iter()) - .chain(cls.may.iter()) - }) - .map(|s| { - // This should NOT fail - if it does, it means our schema is - // in an invalid state! - Ok((s, schema_attributes.get(s).ok_or(SchemaError::Corrupted)?)) - }) - .collect(); - - let may = may?; - - // TODO #70: Error needs to say what is missing - // We need to return *all* missing attributes, not just the first error - // we find. This will probably take a rewrite of the function definition - // to return a result<_, vec> and for the schema errors to take - // information about what is invalid. It's pretty nontrivial. - - // Check that any other attributes are in may - // for each attr on the object, check it's in the may+must set - ne.attrs.iter().try_for_each(|(attr_name, avas)| { - match may.get(attr_name) { - Some(a_schema) => { - // Now, for each type we do a *full* check of the syntax - // and validity of the ava. - a_schema.validate_ava(attr_name.as_str(), avas) - // .map_err(|e| lrequest_error!("Failed to validate: {}", attr_name); - } - None => { - admin_error!( - "{} - not found in the list of valid attributes for this set of classes - valid attributes are {:?}", - attr_name.to_string(), - may.keys().collect::>() - ); - Err(SchemaError::AttributeNotValidForClass( - attr_name.to_string() - )) - } - } - })?; - } - } // unborrow ne. - - // Well, we got here, so okay! - Ok(ne) + ne.validate(schema) } } @@ -890,9 +693,8 @@ impl Entry { let uuid = self.get_uuid().expect("Invalid uuid"); Entry { valid: EntryValid { - cid: self.valid.cid, uuid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: EntryNew, attrs: self.attrs, @@ -904,8 +706,8 @@ impl Entry { // This will put the modify ahead of the recycle transition. self.add_ava("class", Value::new_class("recycled")); - // Last step before we proceed. - self.valid.eclog.recycled(&self.valid.cid); + // Change state repl doesn't need this flag + // self.valid.ecstate.recycled(&self.valid.cid); Entry { valid: self.valid, @@ -919,8 +721,8 @@ impl Entry { // This will put the modify ahead of the revive transition. self.remove_ava("class", &PVCLASS_RECYCLED); - // Last step before we proceed. - self.valid.eclog.revive(&self.valid.cid); + // Change state repl doesn't need this flag + // self.valid.ecstate.revive(&self.valid.cid); Entry { valid: self.valid, @@ -937,9 +739,8 @@ impl Entry { let uuid = self.get_uuid().expect("Invalid uuid"); Entry { valid: EntryValid { - cid: self.valid.cid, uuid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: EntryNew, attrs: self.attrs, @@ -952,7 +753,7 @@ impl Entry { Entry { valid: EntrySealed { uuid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: EntryCommitted { id: 0 }, attrs: self.attrs, @@ -982,9 +783,8 @@ impl Entry { let uuid = self.get_uuid().unwrap_or_else(Uuid::new_v4); Entry { valid: EntryValid { - cid: self.valid.cid, uuid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: EntryCommitted { id: 0 }, attrs: self.attrs, @@ -999,7 +799,7 @@ impl Entry { Entry { valid: EntrySealed { uuid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: self.state, attrs: self.attrs, @@ -1059,9 +859,11 @@ impl Entry { self } + /* pub fn get_changelog_mut(&mut self) -> &mut EntryChangelog { &mut self.valid.eclog } + */ /// Insert a claim to this entry. This claim can NOT be persisted to disk, this is only /// used during a single Event session. @@ -1139,11 +941,11 @@ impl Entry { pub(crate) fn get_uuid2rdn(&self) -> String { self.attrs .get("spn") - .and_then(|vs| vs.to_proto_string_single().map(|v| format!("spn={}", v))) + .and_then(|vs| vs.to_proto_string_single().map(|v| format!("spn={v}"))) .or_else(|| { self.attrs .get("name") - .and_then(|vs| vs.to_proto_string_single().map(|v| format!("name={}", v))) + .and_then(|vs| vs.to_proto_string_single().map(|v| format!("name={v}"))) }) .unwrap_or_else(|| format!("uuid={}", self.get_uuid().as_hyphenated())) } @@ -1535,10 +1337,11 @@ impl Entry { .and_then(|vs| vs.as_cid_set()) .and_then(|set| set.iter().next().cloned())?; - let eclog = EntryChangelog::new_without_schema(cid, attrs.clone()); + // let eclog = EntryChangelog::new_without_schema(cid, attrs.clone()); + let ecstate = EntryChangeState::new_without_schema(&cid, &attrs); Some(Entry { - valid: EntrySealed { uuid, eclog }, + valid: EntrySealed { uuid, ecstate }, state: EntryCommitted { id }, attrs, }) @@ -1593,34 +1396,33 @@ impl Entry { /// Convert this recycled entry, into a tombstone ready for reaping. pub fn to_tombstone(&self, cid: Cid) -> Entry { - let mut eclog = self.valid.eclog.clone(); + let mut ecstate = self.valid.ecstate.clone(); // Duplicate this to a tombstone entry + let mut attrs_new: Eattrs = Map::new(); + let class_ava = vs_iutf8!["object", "tombstone"]; let last_mod_ava = vs_cid![cid.clone()]; - let mut attrs_new: Eattrs = Map::new(); - attrs_new.insert(AttrString::from("uuid"), vs_uuid![self.get_uuid()]); attrs_new.insert(AttrString::from("class"), class_ava); attrs_new.insert(AttrString::from("last_modified_cid"), last_mod_ava); // ⚠️ No return from this point! - eclog.tombstone(&cid, attrs_new.clone()); + ecstate.tombstone(&cid); Entry { - valid: EntryInvalid { cid, eclog }, + valid: EntryInvalid { cid, ecstate }, state: self.state.clone(), attrs: attrs_new, } } /// Given a current transaction change identifier, mark this entry as valid and committed. - pub fn into_valid(self, cid: Cid, eclog: EntryChangelog) -> Entry { + pub fn into_valid(self, ecstate: EntryChangeState) -> Entry { Entry { valid: EntryValid { uuid: self.valid.uuid, - cid, - eclog, + ecstate, }, state: self.state, attrs: self.attrs, @@ -1633,32 +1435,258 @@ impl Entry { results: &mut Vec>, ) { self.valid - .eclog + .ecstate .verify(schema, &self.attrs, self.state.id, results); } } impl Entry { - pub fn invalidate(self, eclog: EntryChangelog) -> Entry { + fn validate( + self, + schema: &dyn SchemaTransaction, + ) -> Result, SchemaError> { + let schema_classes = schema.get_classes(); + let schema_attributes = schema.get_attributes(); + + // Now validate it! + trace!(?self.attrs, "Entry::validate -> target"); + + // First, check we have class on the object .... + if !self.attribute_pres("class") { + // lrequest_error!("Missing attribute class"); + return Err(SchemaError::NoClassFound); + } + + // Do we have extensible? + let extensible = self.attribute_equality("class", &PVCLASS_EXTENSIBLE); + + let entry_classes = self.get_ava_set("class").ok_or_else(|| { + admin_debug!("Attribute 'class' missing from entry"); + SchemaError::NoClassFound + })?; + let mut invalid_classes = Vec::with_capacity(0); + + let mut classes: Vec<&SchemaClass> = Vec::with_capacity(entry_classes.len()); + + // We need to keep the btreeset of entry classes here so we can check the + // requires and excludes. + let entry_classes = if let Some(ec) = entry_classes.as_iutf8_set() { + ec.iter() + .for_each(|s| match schema_classes.get(s.as_str()) { + Some(x) => classes.push(x), + None => { + admin_debug!("invalid class: {:?}", s); + invalid_classes.push(s.to_string()) + } + }); + ec + } else { + admin_debug!("corrupt class attribute"); + return Err(SchemaError::NoClassFound); + }; + + if !invalid_classes.is_empty() { + return Err(SchemaError::InvalidClass(invalid_classes)); + }; + + // Now determine the set of excludes and requires we have, and then + // assert we don't violate them. + + let supplements_classes: Vec<_> = classes + .iter() + .flat_map(|cls| cls.systemsupplements.iter().chain(cls.supplements.iter())) + .collect(); + + // So long as one supplement is present we can continue. + let valid_supplements = if supplements_classes.is_empty() { + // No need to check. + true + } else { + supplements_classes + .iter() + .any(|class| entry_classes.contains(class.as_str())) + }; + + if !valid_supplements { + admin_warn!( + "Validation error, the following possible supplement classes are missing - {:?}", + supplements_classes + ); + let supplements_classes = supplements_classes.iter().map(|s| s.to_string()).collect(); + return Err(SchemaError::SupplementsNotSatisfied(supplements_classes)); + } + + let excludes_classes: Vec<_> = classes + .iter() + .flat_map(|cls| cls.systemexcludes.iter().chain(cls.excludes.iter())) + .collect(); + + let mut invalid_excludes = Vec::with_capacity(0); + + excludes_classes.iter().for_each(|class| { + if entry_classes.contains(class.as_str()) { + invalid_excludes.push(class.to_string()) + } + }); + + if !invalid_excludes.is_empty() { + admin_warn!( + "Validation error, the following excluded classes are present - {:?}", + invalid_excludes + ); + return Err(SchemaError::ExcludesNotSatisfied(invalid_excludes)); + } + + // What this is really doing is taking a set of classes, and building an + // "overall" class that describes this exact object for checking. IE we + // build a super must/may set from the small class must/may sets. + + // for each class + // add systemmust/must and systemmay/may to their lists + // add anything from must also into may + + // Now from the set of valid classes make a list of must/may + // + // NOTE: We still need this on extensible, because we still need to satisfy + // our other must conditions as well! + let must: Result, _> = classes + .iter() + // Join our class systemmmust + must into one iter + .flat_map(|cls| cls.systemmust.iter().chain(cls.must.iter())) + .map(|s| { + // This should NOT fail - if it does, it means our schema is + // in an invalid state! + schema_attributes.get(s).ok_or(SchemaError::Corrupted) + }) + .collect(); + + let must = must?; + + // Check that all must are inplace + // for each attr in must, check it's present on our ent + let mut missing_must = Vec::with_capacity(0); + must.iter().for_each(|attr| { + let avas = self.get_ava_set(&attr.name); + if avas.is_none() { + missing_must.push(attr.name.to_string()); + } + }); + + if !missing_must.is_empty() { + admin_warn!( + "Validation error, the following required (must) attributes are missing - {:?}", + missing_must + ); + return Err(SchemaError::MissingMustAttribute(missing_must)); + } + + if extensible { + self.attrs.iter().try_for_each(|(attr_name, avas)| { + match schema_attributes.get(attr_name) { + Some(a_schema) => { + // Now, for each type we do a *full* check of the syntax + // and validity of the ava. + if a_schema.phantom { + admin_warn!( + "Rejecting attempt to add phantom attribute to extensible object: {}", + attr_name + ); + Err(SchemaError::PhantomAttribute(attr_name.to_string())) + } else { + a_schema.validate_ava(attr_name.as_str(), avas) + // .map_err(|e| lrequest_error!("Failed to validate: {}", attr_name);) + } + } + None => { + admin_error!( + "Invalid Attribute {}, undefined in schema_attributes", + attr_name.to_string() + ); + Err(SchemaError::InvalidAttribute( + attr_name.to_string() + )) + } + } + })?; + } else { + // Note - we do NOT need to check phantom attributes here because they are + // not allowed to exist in the class, which means a phantom attribute can't + // be in the may/must set, and would FAIL our normal checks anyway. + + // The set of "may" is a combination of may and must, since we have already + // asserted that all must requirements are fulfilled. This allows us to + // perform extended attribute checking in a single pass. + let may: Result, _> = classes + .iter() + // Join our class systemmmust + must + systemmay + may into one. + .flat_map(|cls| { + cls.systemmust + .iter() + .chain(cls.must.iter()) + .chain(cls.systemmay.iter()) + .chain(cls.may.iter()) + }) + .map(|s| { + // This should NOT fail - if it does, it means our schema is + // in an invalid state! + Ok((s, schema_attributes.get(s).ok_or(SchemaError::Corrupted)?)) + }) + .collect(); + + let may = may?; + + // TODO #70: Error needs to say what is missing + // We need to return *all* missing attributes, not just the first error + // we find. This will probably take a rewrite of the function definition + // to return a result<_, vec> and for the schema errors to take + // information about what is invalid. It's pretty nontrivial. + + // Check that any other attributes are in may + // for each attr on the object, check it's in the may+must set + self.attrs.iter().try_for_each(|(attr_name, avas)| { + match may.get(attr_name) { + Some(a_schema) => { + // Now, for each type we do a *full* check of the syntax + // and validity of the ava. + a_schema.validate_ava(attr_name.as_str(), avas) + // .map_err(|e| lrequest_error!("Failed to validate: {}", attr_name); + } + None => { + admin_error!( + "{} - not found in the list of valid attributes for this set of classes - valid attributes are {:?}", + attr_name.to_string(), + may.keys().collect::>() + ); + Err(SchemaError::AttributeNotValidForClass( + attr_name.to_string() + )) + } + } + })?; + } + + // Well, we got here, so okay! + Ok(self) + } + + pub fn invalidate(self, cid: Cid, ecstate: EntryChangeState) -> Entry { Entry { - valid: EntryInvalid { - cid: self.valid.cid, - eclog, - }, + valid: EntryInvalid { cid, ecstate }, state: self.state, attrs: self.attrs, } } - pub fn seal(self, _schema: &dyn SchemaTransaction) -> Entry { - let EntryValid { - cid: _, - uuid, - eclog, - } = self.valid; + pub fn seal(self, schema: &dyn SchemaTransaction) -> Entry { + let EntryValid { uuid, mut ecstate } = self.valid; + + // Remove anything from the ecstate that is not a replicated attribute in the schema. + // This is to allow ecstate equality to work, but also to just prevent ruv updates and + // replicating things that only touched or changed phantom attrs. + ecstate.retain(|k, _| schema.is_replicated(k)); Entry { - valid: EntrySealed { uuid, eclog }, + valid: EntrySealed { uuid, ecstate }, state: self.state, attrs: self.attrs, } @@ -1677,7 +1705,7 @@ impl Entry { Entry { valid: EntryInvalid { cid, - eclog: self.valid.eclog, + ecstate: self.valid.ecstate, }, state: self.state, attrs: self.attrs, @@ -1688,19 +1716,26 @@ impl Entry { self.valid.uuid } + /* pub fn get_changelog(&self) -> &EntryChangelog { &self.valid.eclog } + */ + + pub fn get_changestate(&self) -> &EntryChangeState { + &self.valid.ecstate + } #[cfg(test)] pub unsafe fn into_invalid(mut self) -> Entry { let cid = Cid::new_zero(); self.set_last_changed(cid.clone()); - let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone()); + // let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone()); + let ecstate = EntryChangeState::new_without_schema(&cid, &self.attrs); Entry { - valid: EntryInvalid { cid, eclog }, + valid: EntryInvalid { cid, ecstate }, state: self.state, attrs: self.attrs, } @@ -1736,7 +1771,7 @@ impl Entry { ) -> Result { let rdn = qs.uuid_to_rdn(self.get_uuid())?; - let dn = format!("{},{}", rdn, basedn); + let dn = format!("{rdn},{basedn}"); // Everything in our attrs set is "what was requested". So we can transform that now // so they are all in "ldap forms" which makes our next stage a bit easier. @@ -1866,13 +1901,16 @@ impl Entry { self.attrs.keys().map(|a| a.as_str()) } - /* #[inline(always)] /// Get an iterator over the current set of values for an attribute name. - pub fn get_ava(&self, attr: &str) -> Option> { - self.attrs.get(attr).map(|vs| vs.iter()) + pub fn get_ava(&self) -> &Eattrs { + &self.attrs + } + + #[inline(always)] + pub fn get_ava_iter(&self) -> impl Iterator { + self.attrs.iter() } - */ #[inline(always)] /// Return a reference to the current set of values that are associated to this attribute. @@ -2349,16 +2387,22 @@ where // If this already exists, we silently drop the event. This is because // we need this to be *state* based where we assert presence. pub fn add_ava(&mut self, attr: &str, value: Value) { + self.valid.ecstate.change_ava(&self.valid.cid, attr); + /* self.valid .eclog .add_ava_iter(&self.valid.cid, attr, std::iter::once(value.clone())); + */ self.add_ava_int(attr, value) } fn assert_ava(&mut self, attr: &str, value: &PartialValue) -> Result<(), OperationError> { + self.valid.ecstate.change_ava(&self.valid.cid, attr); + /* self.valid .eclog .assert_ava(&self.valid.cid, attr, value.clone()); + */ if self.attribute_equality(attr, value) { Ok(()) } else { @@ -2369,9 +2413,12 @@ where /// Remove an attribute-value pair from this entry. If the ava doesn't exist, we /// don't do anything else since we are asserting the absence of a value. pub(crate) fn remove_ava(&mut self, attr: &str, value: &PartialValue) { + self.valid.ecstate.change_ava(&self.valid.cid, attr); + /* self.valid .eclog .remove_ava_iter(&self.valid.cid, attr, std::iter::once(value.clone())); + */ let rm = if let Some(vs) = self.attrs.get_mut(attr) { vs.remove(value); @@ -2385,9 +2432,12 @@ where } pub(crate) fn remove_avas(&mut self, attr: &str, values: &BTreeSet) { + self.valid.ecstate.change_ava(&self.valid.cid, attr); + /* self.valid .eclog .remove_ava_iter(&self.valid.cid, attr, values.iter().cloned()); + */ let rm = if let Some(vs) = self.attrs.get_mut(attr) { values.iter().for_each(|k| { @@ -2405,13 +2455,15 @@ where /// Remove all values of this attribute from the entry. If it doesn't exist, this /// asserts that no content of that attribute exist. pub(crate) fn purge_ava(&mut self, attr: &str) { - self.valid.eclog.purge_ava(&self.valid.cid, attr); + self.valid.ecstate.change_ava(&self.valid.cid, attr); + // self.valid.eclog.purge_ava(&self.valid.cid, attr); self.attrs.remove(attr); } /// Remove all values of this attribute from the entry, and return their content. pub fn pop_ava(&mut self, attr: &str) -> Option { - self.valid.eclog.purge_ava(&self.valid.cid, attr); + self.valid.ecstate.change_ava(&self.valid.cid, attr); + // self.valid.eclog.purge_ava(&self.valid.cid, attr); self.attrs.remove(attr) } @@ -2421,18 +2473,24 @@ where where T: Clone + IntoIterator, { + self.valid.ecstate.change_ava(&self.valid.cid, attr); + /* self.valid.eclog.purge_ava(&self.valid.cid, attr); self.valid .eclog .add_ava_iter(&self.valid.cid, attr, iter.clone()); + */ self.set_ava_int(attr, iter) } pub fn set_ava_set(&mut self, attr: &str, vs: ValueSet) { + self.valid.ecstate.change_ava(&self.valid.cid, attr); + /* self.valid.eclog.purge_ava(&self.valid.cid, attr); self.valid .eclog .add_ava_iter(&self.valid.cid, attr, vs.to_value_iter()); + */ self.attrs.insert(AttrString::from(attr), vs); } @@ -2489,6 +2547,7 @@ impl From<&SchemaAttribute> for Entry { let multivalue_v = vs_bool![s.multivalue]; let sync_allowed_v = vs_bool![s.sync_allowed]; + let replicated_v = vs_bool![s.replicated]; let phantom_v = vs_bool![s.phantom]; let unique_v = vs_bool![s.unique]; @@ -2505,6 +2564,7 @@ impl From<&SchemaAttribute> for Entry { attrs.insert(AttrString::from("multivalue"), multivalue_v); attrs.insert(AttrString::from("phantom"), phantom_v); attrs.insert(AttrString::from("sync_allowed"), sync_allowed_v); + attrs.insert(AttrString::from("replicated"), replicated_v); attrs.insert(AttrString::from("unique"), unique_v); if let Some(vs) = index_v { attrs.insert(AttrString::from("index"), vs); @@ -2771,13 +2831,13 @@ mod tests { // When we do None, None, we get nothing back. let r1 = Entry::idx_diff(&idxmeta, None, None); - eprintln!("{:?}", r1); + eprintln!("{r1:?}"); assert!(r1 == Vec::new()); // Check generating a delete diff let mut del_r = Entry::idx_diff(&idxmeta, Some(&e1), None); del_r.sort_unstable(); - eprintln!("del_r {:?}", del_r); + eprintln!("del_r {del_r:?}"); assert!( del_r[0] == Err(( @@ -2798,7 +2858,7 @@ mod tests { // Check generating an add diff let mut add_r = Entry::idx_diff(&idxmeta, None, Some(&e1)); add_r.sort_unstable(); - eprintln!("{:?}", add_r); + eprintln!("{add_r:?}"); assert!( add_r[0] == Ok(( @@ -2847,7 +2907,7 @@ mod tests { // Change an attribute. let mut chg_r = Entry::idx_diff(&idxmeta, Some(&e1), Some(&e2)); chg_r.sort_unstable(); - eprintln!("{:?}", chg_r); + eprintln!("{chg_r:?}"); assert!( chg_r[1] == Err(( diff --git a/kanidmd/lib/src/idm/credupdatesession.rs b/kanidmd/lib/src/idm/credupdatesession.rs index c686c5366..586a38d71 100644 --- a/kanidmd/lib/src/idm/credupdatesession.rs +++ b/kanidmd/lib/src/idm/credupdatesession.rs @@ -71,7 +71,7 @@ impl fmt::Debug for MfaRegState { MfaRegState::TotpInvalidSha1(_, _, _) => "MfaRegState::TotpInvalidSha1", MfaRegState::Passkey(_, _) => "MfaRegState::Passkey", }; - write!(f, "{}", t) + write!(f, "{t}") } } @@ -168,7 +168,7 @@ impl fmt::Debug for MfaRegStateStatus { MfaRegStateStatus::BackupCodes(_) => "MfaRegStateStatus::BackupCodes", MfaRegStateStatus::Passkey(_) => "MfaRegStateStatus::Passkey", }; - write!(f, "{}", t) + write!(f, "{t}") } } diff --git a/kanidmd/lib/src/idm/ldap.rs b/kanidmd/lib/src/idm/ldap.rs index 44a19bdd9..eff3082f4 100644 --- a/kanidmd/lib/src/idm/ldap.rs +++ b/kanidmd/lib/src/idm/ldap.rs @@ -75,10 +75,10 @@ impl LdapServer { let basedn = ldap_domain_to_dc(domain_name.as_str()); - let dnre = Regex::new(format!("^((?P[^=]+)=(?P[^=]+),)?{}$", basedn).as_str()) + let dnre = Regex::new(format!("^((?P[^=]+)=(?P[^=]+),)?{basedn}$").as_str()) .map_err(|_| OperationError::InvalidEntryState)?; - let binddnre = Regex::new(format!("^(([^=,]+)=)?(?P[^=,]+)(,{})?$", basedn).as_str()) + let binddnre = Regex::new(format!("^(([^=,]+)=)?(?P[^=,]+)(,{basedn})?$").as_str()) .map_err(|_| OperationError::InvalidEntryState)?; let rootdse = LdapSearchResultEntry { @@ -513,7 +513,7 @@ impl LdapServer { wr.gen_success(format!("u: {}", u.spn).as_str()), )), None => Ok(LdapResponseState::Respond( - wr.gen_operror(format!("Unbound Connection {}", eventid).as_str()), + wr.gen_operror(format!("Unbound Connection {eventid}").as_str()), )), }, } // end match server op @@ -542,9 +542,9 @@ fn operationerr_to_ldapresultcode(e: OperationError) -> (LdapResultCode, String) (LdapResultCode::InvalidAttributeSyntax, s) } OperationError::SchemaViolation(se) => { - (LdapResultCode::UnwillingToPerform, format!("{:?}", se)) + (LdapResultCode::UnwillingToPerform, format!("{se:?}")) } - e => (LdapResultCode::Other, format!("{:?}", e)), + e => (LdapResultCode::Other, format!("{e:?}")), } } @@ -685,7 +685,7 @@ mod tests { assert!(admin_t.effective_session == LdapSession::UnixBind(UUID_ADMIN)); let admin_t = task::block_on(ldaps.do_bind( idms, - format!("uuid={},dc=example,dc=com", STR_UUID_ADMIN).as_str(), + format!("uuid={STR_UUID_ADMIN},dc=example,dc=com").as_str(), TEST_PASSWORD, )) .unwrap() @@ -703,7 +703,7 @@ mod tests { assert!(admin_t.effective_session == LdapSession::UnixBind(UUID_ADMIN)); let admin_t = task::block_on(ldaps.do_bind( idms, - format!("uuid={}", STR_UUID_ADMIN).as_str(), + format!("uuid={STR_UUID_ADMIN}").as_str(), TEST_PASSWORD, )) .unwrap() @@ -725,7 +725,7 @@ mod tests { assert!(admin_t.effective_session == LdapSession::UnixBind(UUID_ADMIN)); let admin_t = task::block_on(ldaps.do_bind( idms, - format!("{},dc=example,dc=com", STR_UUID_ADMIN).as_str(), + format!("{STR_UUID_ADMIN},dc=example,dc=com").as_str(), TEST_PASSWORD, )) .unwrap() diff --git a/kanidmd/lib/src/idm/mod.rs b/kanidmd/lib/src/idm/mod.rs index 7a8ec0628..3e7d13835 100644 --- a/kanidmd/lib/src/idm/mod.rs +++ b/kanidmd/lib/src/idm/mod.rs @@ -32,10 +32,10 @@ pub enum AuthState { impl fmt::Debug for AuthState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - AuthState::Choose(mechs) => write!(f, "AuthState::Choose({:?})", mechs), - AuthState::Continue(allow) => write!(f, "AuthState::Continue({:?})", allow), - AuthState::Denied(reason) => write!(f, "AuthState::Denied({:?})", reason), - AuthState::Success(_token, issue) => write!(f, "AuthState::Success({:?})", issue), + AuthState::Choose(mechs) => write!(f, "AuthState::Choose({mechs:?})"), + AuthState::Continue(allow) => write!(f, "AuthState::Continue({allow:?})"), + AuthState::Denied(reason) => write!(f, "AuthState::Denied({reason:?})"), + AuthState::Success(_token, issue) => write!(f, "AuthState::Success({issue:?})"), } } } diff --git a/kanidmd/lib/src/idm/oauth2.rs b/kanidmd/lib/src/idm/oauth2.rs index 04553b526..953513e48 100644 --- a/kanidmd/lib/src/idm/oauth2.rs +++ b/kanidmd/lib/src/idm/oauth2.rs @@ -143,10 +143,10 @@ impl fmt::Display for Oauth2TokenType { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Oauth2TokenType::Access { session_id, .. } => { - write!(f, "access_token ({}) ", session_id) + write!(f, "access_token ({session_id}) ") } Oauth2TokenType::Refresh { session_id, .. } => { - write!(f, "refresh_token ({}) ", session_id) + write!(f, "refresh_token ({session_id}) ") } } } @@ -389,13 +389,13 @@ impl<'a> Oauth2ResourceServersWriteTransaction<'a> { token_endpoint.set_path("/oauth2/token"); let mut userinfo_endpoint = self.inner.origin.clone(); - userinfo_endpoint.set_path(&format!("/oauth2/openid/{}/userinfo", name)); + userinfo_endpoint.set_path(&format!("/oauth2/openid/{name}/userinfo")); let mut jwks_uri = self.inner.origin.clone(); - jwks_uri.set_path(&format!("/oauth2/openid/{}/public_key.jwk", name)); + jwks_uri.set_path(&format!("/oauth2/openid/{name}/public_key.jwk")); let mut iss = self.inner.origin.clone(); - iss.set_path(&format!("/oauth2/openid/{}", name)); + iss.set_path(&format!("/oauth2/openid/{name}")); let scopes_supported: BTreeSet = scope_maps @@ -2193,7 +2193,7 @@ mod tests { ); // * doesn't have : - let client_authz = Some(base64::encode(format!("test_resource_server {}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server {secret}"))); assert!( idms_prox_read .check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct) @@ -2202,7 +2202,7 @@ mod tests { ); // * invalid client_id - let client_authz = Some(base64::encode(format!("NOT A REAL SERVER:{}", secret))); + let client_authz = Some(base64::encode(format!("NOT A REAL SERVER:{secret}"))); assert!( idms_prox_read .check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct) @@ -2220,7 +2220,7 @@ mod tests { ); // ✅ Now the valid client_authz is in place. - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); // * expired exchange code (took too long) assert!( idms_prox_read @@ -2304,7 +2304,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false, false); - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); let mut idms_prox_read = task::block_on(idms.proxy_read()); @@ -2370,7 +2370,7 @@ mod tests { ) .expect("Failed to inspect token"); - eprintln!("👉 {:?}", intr_response); + eprintln!("👉 {intr_response:?}"); assert!(intr_response.active); assert!(intr_response.scope.as_deref() == Some("openid supplement")); assert!(intr_response.client_id.as_deref() == Some("test_resource_server")); @@ -2419,7 +2419,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false, false); - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); let mut idms_prox_read = task::block_on(idms.proxy_read()); @@ -2496,7 +2496,7 @@ mod tests { ct, ) .expect("Failed to inspect token"); - eprintln!("👉 {:?}", intr_response); + eprintln!("👉 {intr_response:?}"); assert!(intr_response.active); drop(idms_prox_read); @@ -2600,7 +2600,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false, false); - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); let mut idms_prox_read = task::block_on(idms.proxy_read()); @@ -2943,7 +2943,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false, false); - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); let mut idms_prox_read = task::block_on(idms.proxy_read()); @@ -3081,7 +3081,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false, true); - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); let mut idms_prox_read = task::block_on(idms.proxy_read()); @@ -3178,7 +3178,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false, true); - let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); + let client_authz = Some(base64::encode(format!("test_resource_server:{secret}"))); let mut idms_prox_read = task::block_on(idms.proxy_read()); diff --git a/kanidmd/lib/src/idm/scim.rs b/kanidmd/lib/src/idm/scim.rs index 0d0e9dcc5..a16e0537a 100644 --- a/kanidmd/lib/src/idm/scim.rs +++ b/kanidmd/lib/src/idm/scim.rs @@ -788,8 +788,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { let attr_schema = schema.get_attributes().get(scim_attr_name).ok_or_else(|| { OperationError::InvalidAttribute(format!( - "No such attribute in schema - {}", - scim_attr_name + "No such attribute in schema - {scim_attr_name}" )) })?; @@ -820,16 +819,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { .ok_or_else(|| { error!("Invalid value - not a valid unsigned integer"); OperationError::InvalidAttribute(format!( - "Invalid unsigned integer - {}", - scim_attr_name + "Invalid unsigned integer - {scim_attr_name}" )) }) .and_then(|i| { u32::try_from(i).map_err(|_| { error!("Invalid value - not within the bounds of a u32"); OperationError::InvalidAttribute(format!( - "Out of bounds unsigned integer - {}", - scim_attr_name + "Out of bounds unsigned integer - {scim_attr_name}" )) }) }) @@ -849,8 +846,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { let external_id = complex.attrs.get("external_id").ok_or_else(|| { error!("Invalid scim complex attr - missing required key external_id"); OperationError::InvalidAttribute(format!( - "missing required key external_id - {}", - scim_attr_name + "missing required key external_id - {scim_attr_name}" )) })?; @@ -859,8 +855,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { _ => { error!("Invalid external_id attribute - must be scim simple string"); Err(OperationError::InvalidAttribute(format!( - "external_id must be scim simple string - {}", - scim_attr_name + "external_id must be scim simple string - {scim_attr_name}" ))) } }?; @@ -889,8 +884,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { .ok_or_else(|| { error!("Invalid scim complex attr - missing required key external_id"); OperationError::InvalidAttribute(format!( - "missing required key external_id - {}", - scim_attr_name + "missing required key external_id - {scim_attr_name}" )) }) .and_then(|external_id| match external_id { @@ -900,8 +894,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { "Invalid external_id attribute - must be scim simple string" ); Err(OperationError::InvalidAttribute(format!( - "external_id must be scim simple string - {}", - scim_attr_name + "external_id must be scim simple string - {scim_attr_name}" ))) } })?; @@ -912,8 +905,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { .ok_or_else(|| { error!("Invalid scim complex attr - missing required key secret"); OperationError::InvalidAttribute(format!( - "missing required key secret - {}", - scim_attr_name + "missing required key secret - {scim_attr_name}" )) }) .and_then(|secret| match secret { @@ -923,16 +915,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { .map_err(|_| { error!("Invalid secret attribute - must be base64 string"); OperationError::InvalidAttribute(format!( - "secret must be base64 string - {}", - scim_attr_name + "secret must be base64 string - {scim_attr_name}" )) }) } _ => { error!("Invalid secret attribute - must be scim simple string"); Err(OperationError::InvalidAttribute(format!( - "secret must be scim simple string - {}", - scim_attr_name + "secret must be scim simple string - {scim_attr_name}" ))) } })?; @@ -941,8 +931,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { .ok_or_else(|| { error!("Invalid scim complex attr - missing required key algo"); OperationError::InvalidAttribute(format!( - "missing required key algo - {}", - scim_attr_name + "missing required key algo - {scim_attr_name}" )) }) .and_then(|algo_str| { @@ -955,8 +944,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { _ => { error!("Invalid algo attribute - must be one of sha1, sha256 or sha512"); Err(OperationError::InvalidAttribute(format!( - "algo must be one of sha1, sha256 or sha512 - {}", - scim_attr_name + "algo must be one of sha1, sha256 or sha512 - {scim_attr_name}" ))) } } @@ -964,8 +952,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { _ => { error!("Invalid algo attribute - must be scim simple string"); Err(OperationError::InvalidAttribute(format!( - "algo must be scim simple string - {}", - scim_attr_name + "algo must be scim simple string - {scim_attr_name}" ))) } } @@ -974,8 +961,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { let step = complex.attrs.get("step").ok_or_else(|| { error!("Invalid scim complex attr - missing required key step"); OperationError::InvalidAttribute(format!( - "missing required key step - {}", - scim_attr_name + "missing required key step - {scim_attr_name}" )) }).and_then(|step| { match step { @@ -984,16 +970,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { Some(s) if s >= 30 => Ok(s), _ => Err(OperationError::InvalidAttribute(format!( - "step must be a positive integer value equal to or greater than 30 - {}", - scim_attr_name + "step must be a positive integer value equal to or greater than 30 - {scim_attr_name}" ))), } } _ => { error!("Invalid step attribute - must be scim simple number"); Err(OperationError::InvalidAttribute(format!( - "step must be scim simple number - {}", - scim_attr_name + "step must be scim simple number - {scim_attr_name}" ))) } } @@ -1005,8 +989,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { .ok_or_else(|| { error!("Invalid scim complex attr - missing required key digits"); OperationError::InvalidAttribute(format!( - "missing required key digits - {}", - scim_attr_name + "missing required key digits - {scim_attr_name}" )) }) .and_then(|digits| match digits { @@ -1014,15 +997,13 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { Some(6) => Ok(TotpDigits::Six), Some(8) => Ok(TotpDigits::Eight), _ => Err(OperationError::InvalidAttribute(format!( - "digits must be a positive integer value of 6 OR 8 - {}", - scim_attr_name + "digits must be a positive integer value of 6 OR 8 - {scim_attr_name}" ))), }, _ => { error!("Invalid digits attribute - must be scim simple number"); Err(OperationError::InvalidAttribute(format!( - "digits must be scim simple number - {}", - scim_attr_name + "digits must be scim simple number - {scim_attr_name}" ))) } })?; @@ -1035,8 +1016,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { (syn, mv, sa) => { error!(?syn, ?mv, ?sa, "Unsupported scim attribute conversion. This may be a syntax error in your import, or a missing feature in Kanidm."); Err(OperationError::InvalidAttribute(format!( - "Unsupported attribute conversion - {}", - scim_attr_name + "Unsupported attribute conversion - {scim_attr_name}" ))) } } @@ -1951,7 +1931,7 @@ mod tests { assert!(task::block_on(apply_phase_3_test( idms, vec![ScimEntry { - schemas: vec![format!("{}system", SCIM_SCHEMA_SYNC)], + schemas: vec![format!("{SCIM_SCHEMA_SYNC}system")], id: user_sync_uuid, external_id: Some("cn=testgroup,ou=people,dc=test".to_string()), meta: None, diff --git a/kanidmd/lib/src/idm/server.rs b/kanidmd/lib/src/idm/server.rs index c1b4a640a..3b1595408 100644 --- a/kanidmd/lib/src/idm/server.rs +++ b/kanidmd/lib/src/idm/server.rs @@ -179,7 +179,7 @@ impl IdmServer { let valid = url.domain().map(|effective_domain| { // We need to prepend the '.' here to ensure that myexample.com != example.com, // rather than just ends with. - effective_domain.ends_with(&format!(".{}", rp_id)) + effective_domain.ends_with(&format!(".{rp_id}")) || effective_domain == rp_id }).unwrap_or(false); diff --git a/kanidmd/lib/src/lib.rs b/kanidmd/lib/src/lib.rs index 753351be4..3357cb019 100644 --- a/kanidmd/lib/src/lib.rs +++ b/kanidmd/lib/src/lib.rs @@ -69,8 +69,8 @@ pub mod prelude { pub use crate::constants::*; pub use crate::entry::{ Entry, EntryCommitted, EntryInit, EntryInitNew, EntryInvalid, EntryInvalidCommitted, - EntryInvalidNew, EntryNew, EntryReduced, EntryReducedCommitted, EntrySealed, - EntrySealedCommitted, EntrySealedNew, EntryTuple, EntryValid, + EntryInvalidNew, EntryNew, EntryReduced, EntryReducedCommitted, EntryRefresh, + EntryRefreshNew, EntrySealed, EntrySealedCommitted, EntrySealedNew, EntryTuple, EntryValid, }; pub use crate::event::{CreateEvent, DeleteEvent, ExistsEvent, ModifyEvent, SearchEvent}; pub use crate::filter::{ diff --git a/kanidmd/lib/src/macros.rs b/kanidmd/lib/src/macros.rs index 8d55e4568..67723666b 100644 --- a/kanidmd/lib/src/macros.rs +++ b/kanidmd/lib/src/macros.rs @@ -288,18 +288,6 @@ macro_rules! run_delete_test { }}; } -#[cfg(test)] -macro_rules! run_entrychangelog_test { - ($test_fn:expr) => {{ - let _ = sketching::test_init(); - let schema_outer = Schema::new().expect("Failed to init schema"); - - let schema_txn = schema_outer.read(); - - $test_fn(&schema_txn) - }}; -} - #[allow(unused_macros)] #[macro_export] macro_rules! modlist { diff --git a/kanidmd/lib/src/plugins/attrunique.rs b/kanidmd/lib/src/plugins/attrunique.rs index fbfcc7deb..33f851cef 100644 --- a/kanidmd/lib/src/plugins/attrunique.rs +++ b/kanidmd/lib/src/plugins/attrunique.rs @@ -21,6 +21,9 @@ fn get_cand_attr_set( cand: &[Entry], attr: &str, ) -> Result, OperationError> { + // This is building both the set of values to search for uniqueness, but ALSO + // is detecting if any modified or current entries in the cand set also duplicated + // do to the ennforcing that the PartialValue must be unique in the cand_attr set. let mut cand_attr: BTreeMap = BTreeMap::new(); cand.iter() @@ -53,22 +56,18 @@ fn get_cand_attr_set( .map(|()| cand_attr) } -fn enforce_unique( +fn enforce_unique( qs: &mut QueryServerWriteTransaction, - cand: &[Entry], + cand: &[Entry], attr: &str, ) -> Result<(), OperationError> { - trace!(?attr); - // Build a set of all the value -> uuid for the cands. // If already exist, reject due to dup. let cand_attr = get_cand_attr_set(cand, attr).map_err(|e| { - admin_error!(err = ?e, "failed to get cand attr set"); + admin_error!(err = ?e, ?attr, "failed to get cand attr set"); e })?; - trace!(?cand_attr); - // No candidates to check! if cand_attr.is_empty() { return Ok(()); @@ -235,6 +234,21 @@ impl Plugin for AttrUnique { r } + fn pre_repl_refresh( + qs: &mut QueryServerWriteTransaction, + cand: &[EntryRefreshNew], + ) -> Result<(), OperationError> { + let uniqueattrs = { + let schema = qs.get_schema(); + schema.get_attributes_unique() + }; + + let r: Result<(), OperationError> = uniqueattrs + .iter() + .try_for_each(|attr| enforce_unique(qs, cand, attr.as_str())); + r + } + #[instrument(level = "debug", name = "attrunique_verify", skip(qs))] fn verify(qs: &mut QueryServerReadTransaction) -> Vec> { // Only check live entries, not recycled. diff --git a/kanidmd/lib/src/plugins/gidnumber.rs b/kanidmd/lib/src/plugins/gidnumber.rs index 50b94805e..091075962 100644 --- a/kanidmd/lib/src/plugins/gidnumber.rs +++ b/kanidmd/lib/src/plugins/gidnumber.rs @@ -35,8 +35,7 @@ fn apply_gidnumber(e: &mut Entry) -> Result<(), Opera // assert the value is greater than the system range. if gid < GID_SYSTEM_NUMBER_MIN { return Err(OperationError::InvalidAttribute(format!( - "gidnumber {} may overlap with system range {}", - gid, GID_SYSTEM_NUMBER_MIN + "gidnumber {gid} may overlap with system range {GID_SYSTEM_NUMBER_MIN}" ))); } @@ -48,8 +47,7 @@ fn apply_gidnumber(e: &mut Entry) -> Result<(), Opera // If they provided us with a gid number, ensure it's in a safe range. if gid <= GID_SAFETY_NUMBER_MIN { Err(OperationError::InvalidAttribute(format!( - "gidnumber {} overlaps into system secure range {}", - gid, GID_SAFETY_NUMBER_MIN + "gidnumber {gid} overlaps into system secure range {GID_SAFETY_NUMBER_MIN}" ))) } else { Ok(()) diff --git a/kanidmd/lib/src/plugins/memberof.rs b/kanidmd/lib/src/plugins/memberof.rs index 45d1aa7a2..6fe126fba 100644 --- a/kanidmd/lib/src/plugins/memberof.rs +++ b/kanidmd/lib/src/plugins/memberof.rs @@ -216,27 +216,16 @@ impl Plugin for MemberOf { cand: &[Entry], ce: &CreateEvent, ) -> Result<(), OperationError> { - let dyngroup_change = super::dyngroup::DynGroup::post_create(qs, cand, &ce.ident)?; + Self::post_create_inner(qs, cand, &ce.ident) + } - let group_affect = cand - .iter() - .map(|e| e.get_uuid()) - .chain(dyngroup_change.into_iter()) - .chain( - cand.iter() - .filter_map(|e| { - // Is it a group? - if e.attribute_equality("class", &PVCLASS_GROUP) { - e.get_ava_as_refuuid("member") - } else { - None - } - }) - .flatten(), - ) - .collect(); - - apply_memberof(qs, group_affect) + #[instrument(level = "debug", name = "memberof_post_repl_refresh", skip_all)] + fn post_repl_refresh( + qs: &mut QueryServerWriteTransaction, + cand: &[Entry], + ) -> Result<(), OperationError> { + let ident = Identity::from_internal(); + Self::post_create_inner(qs, cand, &ident) } #[instrument(level = "debug", name = "memberof_post_modify", skip_all)] @@ -376,6 +365,34 @@ impl Plugin for MemberOf { } impl MemberOf { + fn post_create_inner( + qs: &mut QueryServerWriteTransaction, + cand: &[Entry], + ident: &Identity, + ) -> Result<(), OperationError> { + let dyngroup_change = super::dyngroup::DynGroup::post_create(qs, cand, ident)?; + + let group_affect = cand + .iter() + .map(|e| e.get_uuid()) + .chain(dyngroup_change.into_iter()) + .chain( + cand.iter() + .filter_map(|e| { + // Is it a group? + if e.attribute_equality("class", &PVCLASS_GROUP) { + e.get_ava_as_refuuid("member") + } else { + None + } + }) + .flatten(), + ) + .collect(); + + apply_memberof(qs, group_affect) + } + fn post_modify_inner( qs: &mut QueryServerWriteTransaction, pre_cand: &[Arc], diff --git a/kanidmd/lib/src/plugins/mod.rs b/kanidmd/lib/src/plugins/mod.rs index 4d8714905..38a3968ab 100644 --- a/kanidmd/lib/src/plugins/mod.rs +++ b/kanidmd/lib/src/plugins/mod.rs @@ -124,6 +124,28 @@ trait Plugin { Err(OperationError::InvalidState) } + fn pre_repl_refresh( + _qs: &mut QueryServerWriteTransaction, + _cand: &[EntryRefreshNew], + ) -> Result<(), OperationError> { + admin_error!( + "plugin {} has an unimplemented pre_repl_refresh!", + Self::id() + ); + Err(OperationError::InvalidState) + } + + fn post_repl_refresh( + _qs: &mut QueryServerWriteTransaction, + _cand: &[EntrySealedCommitted], + ) -> Result<(), OperationError> { + admin_error!( + "plugin {} has an unimplemented post_repl_refresh!", + Self::id() + ); + Err(OperationError::InvalidState) + } + fn verify(_qs: &mut QueryServerReadTransaction) -> Vec> { admin_error!("plugin {} has an unimplemented verify!", Self::id()); vec![Err(ConsistencyError::Unknown)] @@ -258,6 +280,23 @@ impl Plugins { .and_then(|_| memberof::MemberOf::post_delete(qs, cand, de)) } + #[instrument(level = "debug", name = "plugins::run_pre_repl_refresh", skip_all)] + pub fn run_pre_repl_refresh( + qs: &mut QueryServerWriteTransaction, + cand: &[EntryRefreshNew], + ) -> Result<(), OperationError> { + attrunique::AttrUnique::pre_repl_refresh(qs, cand) + } + + #[instrument(level = "debug", name = "plugins::run_post_repl_refresh", skip_all)] + pub fn run_post_repl_refresh( + qs: &mut QueryServerWriteTransaction, + cand: &[EntrySealedCommitted], + ) -> Result<(), OperationError> { + refint::ReferentialIntegrity::post_repl_refresh(qs, cand) + .and_then(|_| memberof::MemberOf::post_repl_refresh(qs, cand)) + } + #[instrument(level = "debug", name = "plugins::run_verify", skip_all)] pub fn run_verify( qs: &mut QueryServerReadTransaction, diff --git a/kanidmd/lib/src/plugins/refint.rs b/kanidmd/lib/src/plugins/refint.rs index 245aa738b..4791e3013 100644 --- a/kanidmd/lib/src/plugins/refint.rs +++ b/kanidmd/lib/src/plugins/refint.rs @@ -109,6 +109,14 @@ impl Plugin for ReferentialIntegrity { Self::post_modify_inner(qs, cand) } + #[instrument(level = "debug", name = "refint_post_repl_refresh", skip_all)] + fn post_repl_refresh( + qs: &mut QueryServerWriteTransaction, + cand: &[EntrySealedCommitted], + ) -> Result<(), OperationError> { + Self::post_modify_inner(qs, cand) + } + #[instrument(level = "debug", name = "refint_post_delete", skip_all)] fn post_delete( qs: &mut QueryServerWriteTransaction, diff --git a/kanidmd/lib/src/repl/cid.rs b/kanidmd/lib/src/repl/cid.rs index fb2251c2d..b81f35744 100644 --- a/kanidmd/lib/src/repl/cid.rs +++ b/kanidmd/lib/src/repl/cid.rs @@ -9,29 +9,28 @@ use serde::{Deserialize, Serialize}; pub struct Cid { // Mental note: Derive ord always checks in order of struct fields. pub ts: Duration, - pub d_uuid: Uuid, pub s_uuid: Uuid, } impl fmt::Display for Cid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}-{}-{}", self.ts.as_nanos(), self.d_uuid, self.s_uuid) + write!(f, "{:032}-{}", self.ts.as_nanos(), self.s_uuid) } } impl Cid { #[cfg(test)] - pub(crate) fn new(d_uuid: Uuid, s_uuid: Uuid, ts: Duration) -> Self { - Cid { d_uuid, s_uuid, ts } + pub(crate) fn new(s_uuid: Uuid, ts: Duration) -> Self { + Cid { s_uuid, ts } } - pub fn new_lamport(d_uuid: Uuid, s_uuid: Uuid, ts: Duration, max_ts: &Duration) -> Self { + pub fn new_lamport(s_uuid: Uuid, ts: Duration, max_ts: &Duration) -> Self { let ts = if ts > *max_ts { ts } else { *max_ts + Duration::from_nanos(1) }; - Cid { ts, d_uuid, s_uuid } + Cid { ts, s_uuid } } #[cfg(test)] @@ -42,7 +41,6 @@ impl Cid { #[cfg(test)] pub unsafe fn new_count(c: u64) -> Self { Cid { - d_uuid: uuid!("00000000-0000-0000-0000-000000000000"), s_uuid: uuid!("00000000-0000-0000-0000-000000000000"), ts: Duration::new(c, 0), } @@ -51,7 +49,6 @@ impl Cid { #[cfg(test)] pub fn new_random_s_d(ts: Duration) -> Self { Cid { - d_uuid: Uuid::new_v4(), s_uuid: Uuid::new_v4(), ts, } @@ -62,7 +59,6 @@ impl Cid { self.ts .checked_sub(Duration::from_secs(secs)) .map(|r| Cid { - d_uuid: uuid!("00000000-0000-0000-0000-000000000000"), s_uuid: uuid!("00000000-0000-0000-0000-000000000000"), ts: r, }) @@ -82,12 +78,10 @@ mod tests { fn test_cid_ordering() { // Check diff ts let cid_a = Cid::new( - uuid!("00000000-0000-0000-0000-000000000001"), uuid!("00000000-0000-0000-0000-000000000001"), Duration::new(5, 0), ); let cid_b = Cid::new( - uuid!("00000000-0000-0000-0000-000000000001"), uuid!("00000000-0000-0000-0000-000000000001"), Duration::new(15, 0), ); @@ -96,30 +90,12 @@ mod tests { assert!(cid_a.cmp(&cid_b) == Ordering::Less); assert!(cid_b.cmp(&cid_a) == Ordering::Greater); - // check same ts diff d_uuid - let cid_c = Cid::new( - uuid!("00000000-0000-0000-0000-000000000000"), - uuid!("00000000-0000-0000-0000-000000000001"), - Duration::new(5, 0), - ); - let cid_d = Cid::new( - uuid!("00000000-0000-0000-0000-000000000001"), - uuid!("00000000-0000-0000-0000-000000000001"), - Duration::new(5, 0), - ); - - assert!(cid_c.cmp(&cid_c) == Ordering::Equal); - assert!(cid_c.cmp(&cid_d) == Ordering::Less); - assert!(cid_d.cmp(&cid_c) == Ordering::Greater); - // check same ts, d_uuid, diff s_uuid let cid_e = Cid::new( - uuid!("00000000-0000-0000-0000-000000000001"), uuid!("00000000-0000-0000-0000-000000000000"), Duration::new(5, 0), ); let cid_f = Cid::new( - uuid!("00000000-0000-0000-0000-000000000001"), uuid!("00000000-0000-0000-0000-000000000001"), Duration::new(5, 0), ); @@ -131,8 +107,7 @@ mod tests { #[test] fn test_cid_lamport() { - let d_uuid = uuid!("00000000-0000-0000-0000-000000000001"); - let s_uuid = d_uuid; + let s_uuid = uuid!("00000000-0000-0000-0000-000000000001"); let ts5 = Duration::new(5, 0); let ts10 = Duration::new(10, 0); @@ -140,12 +115,12 @@ mod tests { let cid_z = unsafe { Cid::new_zero() }; - let cid_a = Cid::new_lamport(d_uuid, s_uuid, ts5, &ts5); + let cid_a = Cid::new_lamport(s_uuid, ts5, &ts5); assert!(cid_a.cmp(&cid_z) == Ordering::Greater); - let cid_b = Cid::new_lamport(d_uuid, s_uuid, ts15, &ts10); + let cid_b = Cid::new_lamport(s_uuid, ts15, &ts10); assert!(cid_b.cmp(&cid_a) == Ordering::Greater); // Even with an older ts, we should still step forward. - let cid_c = Cid::new_lamport(d_uuid, s_uuid, ts10, &ts15); + let cid_c = Cid::new_lamport(s_uuid, ts10, &ts15); assert!(cid_c.cmp(&cid_b) == Ordering::Greater); } } diff --git a/kanidmd/lib/src/repl/consumer.rs b/kanidmd/lib/src/repl/consumer.rs new file mode 100644 index 000000000..9b6e66297 --- /dev/null +++ b/kanidmd/lib/src/repl/consumer.rs @@ -0,0 +1,214 @@ +use super::proto::*; +use crate::plugins::Plugins; +use crate::prelude::*; + +impl<'a> QueryServerReadTransaction<'a> { + // Get the current state of "where we are up to" + // + // There are two approaches we can use here. We can either store a cookie + // related to the supplier we are fetching from, or we can use our RUV state. + // + // Initially I'm using RUV state, because it lets us select exactly what has + // changed, where the cookie approach is more coarse grained. The cookie also + // requires some more knowledge about what supplier we are communicating too + // where the RUV approach doesn't since the supplier calcs the diff. + + #[instrument(level = "debug", skip_all)] + pub fn consumer_get_state(&mut self) -> Result<(), OperationError> { + Ok(()) + } +} + +impl<'a> QueryServerWriteTransaction<'a> { + // Apply the state changes if they are valid. + + #[instrument(level = "debug", skip_all)] + pub fn consumer_apply_changes(&mut self) -> Result<(), OperationError> { + Ok(()) + } + + pub fn consumer_apply_refresh( + &mut self, + ctx: &ReplRefreshContext, + ) -> Result<(), OperationError> { + match ctx { + ReplRefreshContext::V1 { + domain_version, + domain_uuid, + schema_entries, + meta_entries, + entries, + } => self.consumer_apply_refresh_v1( + *domain_version, + *domain_uuid, + schema_entries, + meta_entries, + entries, + ), + } + } + + fn consumer_refresh_create_entries( + &mut self, + ctx_entries: &[ReplEntryV1], + ) -> Result<(), OperationError> { + let candidates = ctx_entries + .iter() + .map(EntryRefreshNew::from_repl_entry_v1) + .collect::, _>>() + .map_err(|e| { + error!("Failed to convert entries from supplier"); + e + })?; + + Plugins::run_pre_repl_refresh(self, candidates.as_slice()).map_err(|e| { + admin_error!( + "Refresh operation failed (pre_repl_refresh plugin), {:?}", + e + ); + e + })?; + + // No need to assign CID's since this is a repl import. + let norm_cand = candidates + .into_iter() + .map(|e| { + e.validate(&self.schema) + .map_err(|e| { + admin_error!("Schema Violation in create validate {:?}", e); + OperationError::SchemaViolation(e) + }) + .map(|e| { + // Then seal the changes? + e.seal(&self.schema) + }) + }) + .collect::, _>>()?; + + // Do not run plugs! + + let commit_cand = self.be_txn.refresh(norm_cand).map_err(|e| { + admin_error!("betxn create failure {:?}", e); + e + })?; + + Plugins::run_post_repl_refresh(self, &commit_cand).map_err(|e| { + admin_error!( + "Refresh operation failed (post_repl_refresh plugin), {:?}", + e + ); + e + })?; + + self.changed_uuid + .extend(commit_cand.iter().map(|e| e.get_uuid())); + + Ok(()) + } + + #[instrument(level = "debug", skip_all)] + fn consumer_apply_refresh_v1( + &mut self, + ctx_domain_version: DomainVersion, + ctx_domain_uuid: Uuid, + ctx_schema_entries: &[ReplEntryV1], + ctx_meta_entries: &[ReplEntryV1], + ctx_entries: &[ReplEntryV1], + ) -> Result<(), OperationError> { + // Can we apply the domain version validly? + // if domain_version >= min_support ... + + if ctx_domain_version < DOMAIN_MIN_LEVEL { + error!("Unable to proceed with consumer refresh - incoming domain level is lower than our minimum supported level. {} < {}", ctx_domain_version, DOMAIN_MIN_LEVEL); + return Err(OperationError::ReplDomainLevelUnsatisfiable); + } else if ctx_domain_version > DOMAIN_MAX_LEVEL { + error!("Unable to proceed with consumer refresh - incoming domain level is greater than our maximum supported level. {} > {}", ctx_domain_version, DOMAIN_MAX_LEVEL); + return Err(OperationError::ReplDomainLevelUnsatisfiable); + } else { + debug!( + "Proceeding to refresh from domain at level {}", + ctx_domain_version + ); + }; + + // == ⚠️ Below this point we begin to make changes! == + + // Update the d_uuid. This is what defines us as being part of this repl topology! + self.be_txn.set_db_d_uuid(ctx_domain_uuid).map_err(|e| { + error!("Failed to reset domain uuid"); + e + })?; + + // Do we need to reset our s_uuid to avoid potential RUV conflicts? + // - I don't think so, since the refresh is supplying and rebuilding + // our local state. + + // Delete all entries - *proper delete, not just tombstone!* + + self.be_txn.danger_delete_all_db_content().map_err(|e| { + error!("Failed to clear existing server database content"); + e + })?; + + // Reset this transactions schema to a completely clean slate. + self.schema.generate_in_memory().map_err(|e| { + error!("Failed to reset in memory schema to clean state"); + e + })?; + + // Apply the schema entries first. This is the foundation that everything + // else will build upon! + self.consumer_refresh_create_entries(ctx_schema_entries) + .map_err(|e| { + error!("Failed to refresh schema entries"); + e + })?; + + // We need to reload schema now! + self.reload_schema().map_err(|e| { + error!("Failed to reload schema"); + e + })?; + + // We have to reindex to force all the existing indexes to be dumped + // and recreated before we start to import. + self.reindex().map_err(|e| { + error!("Failed to reload schema"); + e + })?; + + // Apply the domain info entry / system info / system config entry? + self.consumer_refresh_create_entries(ctx_meta_entries) + .map_err(|e| { + error!("Failed to refresh schema entries"); + e + })?; + + // NOTE: The domain info we receive here will have the domain version populated! + // That's okay though, because all the incoming data is already at the right + // version! + self.reload_domain_info().map_err(|e| { + error!("Failed to reload domain info"); + e + })?; + + // Mark that everything changed so that post commit hooks function as expected. + self.changed_schema = true; + self.changed_acp = true; + self.changed_oauth2 = true; + self.changed_domain = true; + + // That's it! We are GOOD to go! + + // Create all the entries. Note we don't hit plugins here beside post repl plugs. + self.consumer_refresh_create_entries(ctx_entries) + .map_err(|e| { + error!("Failed to refresh schema entries"); + e + })?; + + // Run post repl plugins + + Ok(()) + } +} diff --git a/kanidmd/lib/src/repl/entry-changelog.rs b/kanidmd/lib/src/repl/entry-changelog.rs new file mode 100644 index 000000000..02b413287 --- /dev/null +++ b/kanidmd/lib/src/repl/entry-changelog.rs @@ -0,0 +1,601 @@ +use std::collections::btree_map::Keys; +use std::collections::BTreeMap; +use std::fmt; +use std::ops::Bound; +use std::ops::Bound::*; + +use kanidm_proto::v1::ConsistencyError; + +use super::cid::Cid; +use crate::entry::{compare_attrs, Eattrs}; +use crate::prelude::*; +use crate::schema::SchemaTransaction; +use crate::valueset; + +#[derive(Debug, Clone)] +pub struct EntryChangelog { + /// The set of "entries as they existed at a point in time". This allows us to rewind + /// to a point-in-time, and then to start to "replay" applying all changes again. + /// + /// A subtle and important piece of information is that an anchor can be considered + /// as the "state as existing between two Cid's". This means for Cid X, this state is + /// the "moment before X". This is important, as for a create we define the initial anchor + /// as "nothing". It's means for the anchor at time X, that changes that occurred at time + /// X have NOT been replayed and applied! + anchors: BTreeMap, + changes: BTreeMap, +} + +/* +impl fmt::Display for EntryChangelog { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f + } +} +*/ + +/// A change defines the transitions that occurred within this Cid (transaction). A change is applied +/// as a whole, or rejected during the replay process. +#[derive(Debug, Clone)] +pub struct Change { + s: Vec, +} + +#[derive(Debug, Clone)] +enum State { + NonExistent, + Live(Eattrs), + Recycled(Eattrs), + Tombstone(Eattrs), +} + +impl fmt::Display for State { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self { + State::NonExistent => write!(f, "NonExistent"), + State::Live(_) => write!(f, "Live"), + State::Recycled(_) => write!(f, "Recycled"), + State::Tombstone(_) => write!(f, "Tombstone"), + } + } +} + +#[derive(Debug, Clone)] +enum Transition { + Create(Eattrs), + ModifyPurge(AttrString), + ModifyPresent(AttrString, Box), + ModifyRemoved(AttrString, Box), + ModifyAssert(AttrString, Box), + Recycle, + Revive, + Tombstone(Eattrs), +} + +impl fmt::Display for Transition { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self { + Transition::Create(_) => write!(f, "Create"), + Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a), + Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a), + Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a), + Transition::ModifyAssert(a, _) => write!(f, "ModifyAssert({})", a), + Transition::Recycle => write!(f, "Recycle"), + Transition::Revive => write!(f, "Revive"), + Transition::Tombstone(_) => write!(f, "Tombstone"), + } + } +} + +impl State { + fn apply_change(self, change: &Change) -> Result { + let mut state = self; + for transition in change.s.iter() { + match (&mut state, transition) { + (State::NonExistent, Transition::Create(attrs)) => { + trace!("NonExistent + Create -> Live"); + state = State::Live(attrs.clone()); + } + (State::Live(ref mut attrs), Transition::ModifyPurge(attr)) => { + trace!("Live + ModifyPurge({}) -> Live", attr); + attrs.remove(attr); + } + (State::Live(ref mut attrs), Transition::ModifyPresent(attr, value)) => { + trace!("Live + ModifyPresent({}) -> Live", attr); + if let Some(vs) = attrs.get_mut(attr) { + let r = vs.insert_checked(value.as_ref().clone()); + assert!(r.is_ok()); + // Reject if it fails? + } else { + #[allow(clippy::expect_used)] + let vs = valueset::from_value_iter(std::iter::once(value.as_ref().clone())) + .expect("Unable to fail - always single value, and only one type!"); + attrs.insert(attr.clone(), vs); + } + } + (State::Live(ref mut attrs), Transition::ModifyRemoved(attr, value)) => { + trace!("Live + ModifyRemoved({}) -> Live", attr); + let rm = if let Some(vs) = attrs.get_mut(attr) { + vs.remove(value); + vs.is_empty() + } else { + false + }; + if rm { + attrs.remove(attr); + }; + } + (State::Live(ref mut attrs), Transition::ModifyAssert(attr, value)) => { + trace!("Live + ModifyAssert({}) -> Live", attr); + + if attrs + .get(attr) + .map(|vs| vs.contains(value)) + .unwrap_or(false) + { + // Valid + } else { + warn!("{} + {:?} -> Assertion not met - REJECTING", attr, value); + return Err(state); + } + } + (State::Live(attrs), Transition::Recycle) => { + trace!("Live + Recycle -> Recycled"); + state = State::Recycled(attrs.clone()); + } + (State::Live(_), Transition::Tombstone(attrs)) => { + trace!("Live + Tombstone -> Tombstone"); + state = State::Tombstone(attrs.clone()); + } + (State::Recycled(attrs), Transition::Revive) => { + trace!("Recycled + Revive -> Live"); + state = State::Live(attrs.clone()); + } + (State::Recycled(ref mut attrs), Transition::ModifyPurge(attr)) => { + trace!("Recycled + ModifyPurge({}) -> Recycled", attr); + attrs.remove(attr); + } + (State::Recycled(attrs), Transition::ModifyRemoved(attr, value)) => { + trace!("Recycled + ModifyRemoved({}) -> Recycled", attr); + let rm = if let Some(vs) = attrs.get_mut(attr) { + vs.remove(value); + vs.is_empty() + } else { + false + }; + if rm { + attrs.remove(attr); + }; + } + (State::Recycled(_), Transition::Tombstone(attrs)) => { + trace!("Recycled + Tombstone -> Tombstone"); + state = State::Tombstone(attrs.clone()); + } + + // ============================== + // Invalid States + /* + (State::NonExistent, Transition::ModifyPurge(_)) + | (State::NonExistent, Transition::ModifyPresent(_, _)) + | (State::NonExistent, Transition::ModifyRemoved(_, _)) + | (State::NonExistent, Transition::Recycle) + | (State::NonExistent, Transition::Revive) + | (State::NonExistent, Transition::Tombstone(_)) + | (State::Live(_), Transition::Create(_)) + | (State::Live(_), Transition::Revive) + | (State::Recycled(_), Transition::Create(_)) + | (State::Recycled(_), Transition::Recycle) + | (State::Recycled(_), Transition::ModifyPresent(_, _)) + | (State::Tombstone(_), _) + */ + (s, t) => { + warn!("{} + {} -> REJECTING", s, t); + return Err(state); + } + }; + } + // Everything must have applied, all good then. + trace!(?state, "applied changes"); + Ok(state) + } +} + +impl EntryChangelog { + pub fn new(cid: Cid, attrs: Eattrs, _schema: &dyn SchemaTransaction) -> Self { + // I think we need to reduce the attrs based on what is / is not replicated.? + + let anchors = btreemap![(cid.clone(), State::NonExistent)]; + let changes = btreemap![( + cid, + Change { + s: vec![Transition::Create(attrs)] + } + )]; + + EntryChangelog { anchors, changes } + } + + // TODO: work out if the below comment about uncommenting is still valid + // Uncomment this once we have a real on-disk storage of the changelog + pub fn new_without_schema(cid: Cid, attrs: Eattrs) -> Self { + // I think we need to reduce the attrs based on what is / is not replicated.? + + // We need to pick a state that reflects the current state WRT to tombstone + // or recycled! + let class = attrs.get("class"); + + let (anchors, changes) = + if class + .as_ref() + .map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue)) + .unwrap_or(false) + { + (btreemap![(cid, State::Tombstone(attrs))], BTreeMap::new()) + } else if class + .as_ref() + .map(|c| c.contains(&PVCLASS_RECYCLED as &PartialValue)) + .unwrap_or(false) + { + (btreemap![(cid, State::Recycled(attrs))], BTreeMap::new()) + } else { + ( + btreemap![(cid.clone(), State::NonExistent)], + btreemap![( + cid, + Change { + s: vec![Transition::Create(attrs)] + } + )], + ) + }; + + EntryChangelog { anchors, changes } + } + + pub fn add_ava_iter(&mut self, cid: &Cid, attr: &str, viter: T) + where + T: IntoIterator, + { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + + viter + .into_iter() + .map(|v| Transition::ModifyPresent(AttrString::from(attr), Box::new(v))) + .for_each(|t| change.s.push(t)); + } + + pub fn remove_ava_iter(&mut self, cid: &Cid, attr: &str, viter: T) + where + T: IntoIterator, + { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + + viter + .into_iter() + .map(|v| Transition::ModifyRemoved(AttrString::from(attr), Box::new(v))) + .for_each(|t| change.s.push(t)); + } + + pub fn assert_ava(&mut self, cid: &Cid, attr: &str, value: PartialValue) { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + + change.s.push(Transition::ModifyAssert( + AttrString::from(attr), + Box::new(value), + )) + } + + pub fn purge_ava(&mut self, cid: &Cid, attr: &str) { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + change + .s + .push(Transition::ModifyPurge(AttrString::from(attr))); + } + + pub fn recycled(&mut self, cid: &Cid) { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + change.s.push(Transition::Recycle); + } + + pub fn revive(&mut self, cid: &Cid) { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + change.s.push(Transition::Revive); + } + + pub fn tombstone(&mut self, cid: &Cid, attrs: Eattrs) { + if !self.changes.contains_key(cid) { + self.changes.insert(cid.clone(), Change { s: Vec::new() }); + } + + #[allow(clippy::expect_used)] + let change = self + .changes + .get_mut(cid) + .expect("Memory corruption, change must exist"); + change.s.push(Transition::Tombstone(attrs)); + } + + /// Replay our changes from and including the replay Cid, up to the latest point + /// in time. We also return a vector of *rejected* Cid's showing what is in the + /// change log that is considered invalid. + fn replay( + &self, + from_cid: Bound<&Cid>, + to_cid: Bound<&Cid>, + ) -> Result<(State, Vec), OperationError> { + // Select the anchor_cid that is *earlier* or *equals* to the replay_cid. + + // if not found, we are *unable to* perform this replay which indicates a problem! + let (anchor_cid, anchor) = if matches!(from_cid, Unbounded) { + // If the from is unbounded, and to is unbounded, we want + // the earliest anchor possible. + + // If from is unbounded and to is bounded, we want the earliest + // possible. + self.anchors.iter().next() + } else { + // If from has a bound, we want an anchor "earlier than" from, regardless + // of the to bound state. + self.anchors.range((Unbounded, from_cid)).next_back() + } + .ok_or_else(|| { + admin_error!( + ?from_cid, + ?to_cid, + "Failed to locate anchor in replay range" + ); + OperationError::ReplReplayFailure + })?; + + trace!(?anchor_cid, ?anchor); + + // Load the entry attribute state at that time. + let mut replay_state = anchor.clone(); + let mut rejected_cid = Vec::new(); + + // For each change + for (change_cid, change) in self.changes.range((Included(anchor_cid), to_cid)) { + // Apply the change. + trace!(?change_cid, ?change); + + replay_state = match replay_state.apply_change(change) { + Ok(mut new_state) => { + // Indicate that this was the highest CID so far. + match &mut new_state { + State::NonExistent => { + trace!("pass"); + } + State::Live(ref mut attrs) + | State::Recycled(ref mut attrs) + | State::Tombstone(ref mut attrs) => { + let cv = vs_cid![change_cid.clone()]; + let _ = attrs.insert(AttrString::from("last_modified_cid"), cv); + } + }; + new_state + } + Err(previous_state) => { + warn!("rejecting invalid change {:?}", change_cid); + rejected_cid.push(change_cid.clone()); + previous_state + } + }; + } + + // Return the eattrs state. + Ok((replay_state, rejected_cid)) + } + + #[instrument( + level = "trace", + name = "verify", + skip(self, _schema, expected_attrs, results) + )] + pub fn verify( + &self, + _schema: &dyn SchemaTransaction, + expected_attrs: &Eattrs, + entry_id: u64, + results: &mut Vec>, + ) { + // We need to be able to take any anchor entry, and replay that when all changes + // are applied we get the *same entry* as the current state. + debug_assert!(results.is_empty()); + + // For each anchor (we only needs it's change id.) + for cid in self.anchors.keys() { + match self.replay(Included(cid), Unbounded) { + Ok((entry_state, rejected)) => { + trace!(?rejected); + + match entry_state { + State::Live(attrs) | State::Recycled(attrs) | State::Tombstone(attrs) => { + if compare_attrs(&attrs, expected_attrs) { + // valid + trace!("changelog is synchronised"); + } else { + // ruh-roh. + warn!("changelog has desynchronised!"); + debug!(?attrs); + debug!(?expected_attrs); + debug_assert!(false); + results + .push(Err(ConsistencyError::ChangelogDesynchronised(entry_id))); + } + } + State::NonExistent => { + warn!("entry does not exist - changelog is corrupted?!"); + results.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id))) + } + } + } + Err(e) => { + error!(?e); + } + } + } + + debug_assert!(results.is_empty()); + } + + pub fn contains_tail_cid(&self, cid: &Cid) -> bool { + if let Some(tail_cid) = self.changes.keys().next_back() { + if tail_cid == cid { + return true; + } + }; + false + } + + pub fn can_delete(&self) -> bool { + // Changelog should be empty. + // should have a current anchor state of tombstone. + self.changes.is_empty() + && matches!(self.anchors.values().next_back(), Some(State::Tombstone(_))) + } + + pub fn is_live(&self) -> bool { + !matches!(self.anchors.values().next_back(), Some(State::Tombstone(_))) + } + + pub fn cid_iter(&self) -> Keys { + self.changes.keys() + } + + /* + fn insert_anchor(&mut self, cid: Cid, entry_state: State) { + // When we insert an anchor, we have to remove all subsequent anchors (but not + // the preceding ones.) + let _ = self.anchors.split_off(&cid); + self.anchors.insert(cid.clone(), entry_state); + } + */ + + pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> { + // Build a new anchor that is equal or less than this cid. + // In other words, the cid we are trimming to, should be remaining + // in the CL, and we should have an anchor that precedes it. + let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| { + error!(?e); + e + })?; + trace!(?rejected); + // Add the entry_state as an anchor. Use the CID we just + // trimmed to. + + // insert_anchor will remove anything to the right, we also need to + // remove everything to the left, so just clear. + self.anchors.clear(); + self.anchors.insert(cid.clone(), entry_state); + + // And now split the CL. + let mut right = self.changes.split_off(cid); + std::mem::swap(&mut right, &mut self.changes); + // We can trace what we drop later? + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use crate::entry::Eattrs; + // use crate::prelude::*; + use crate::repl::cid::Cid; + use crate::repl::entry::{Change, EntryChangelog, State, Transition}; + use crate::schema::{Schema, SchemaTransaction}; + + #[cfg(test)] + macro_rules! run_entrychangelog_test { + ($test_fn:expr) => {{ + let _ = sketching::test_init(); + let schema_outer = Schema::new().expect("Failed to init schema"); + + let schema_txn = schema_outer.read(); + + $test_fn(&schema_txn) + }}; + } + + #[test] + fn test_entrychangelog_basic() { + run_entrychangelog_test!(|schema: &dyn SchemaTransaction| { + let cid = Cid::new_random_s_d(Duration::from_secs(1)); + let eattrs = Eattrs::new(); + let eclog = EntryChangelog::new(cid, eattrs, schema); + trace!(?eclog); + }) + } + + #[test] + fn test_entrychangelog_state_transitions() { + // Test that all our transitions are defined and work as + // expected. + assert!(State::NonExistent + .apply_change(&Change { s: vec![] }) + .is_ok()); + assert!(State::NonExistent + .apply_change(&Change { + s: vec![Transition::Create(Eattrs::new())] + }) + .is_ok()); + + assert!(State::Live(Eattrs::new()) + .apply_change(&Change { s: vec![] }) + .is_ok()); + assert!(State::Live(Eattrs::new()) + .apply_change(&Change { + s: vec![Transition::Create(Eattrs::new())] + }) + .is_err()); + } +} diff --git a/kanidmd/lib/src/repl/entry.rs b/kanidmd/lib/src/repl/entry.rs index fa9f80af4..74f9857c5 100644 --- a/kanidmd/lib/src/repl/entry.rs +++ b/kanidmd/lib/src/repl/entry.rs @@ -1,588 +1,210 @@ -use std::collections::btree_map::Keys; -use std::collections::BTreeMap; -use std::fmt; -use std::ops::Bound; -use std::ops::Bound::*; - -use kanidm_proto::v1::ConsistencyError; - use super::cid::Cid; -use crate::entry::{compare_attrs, Eattrs}; +use crate::entry::Eattrs; use crate::prelude::*; use crate::schema::SchemaTransaction; -use crate::valueset; +// use crate::valueset; + +use std::collections::BTreeMap; #[derive(Debug, Clone)] -pub struct EntryChangelog { - /// The set of "entries as they existed at a point in time". This allows us to rewind - /// to a point-in-time, and then to start to "replay" applying all changes again. - /// - /// A subtle and important piece of information is that an anchor can be considered - /// as the "state as existing between two Cid's". This means for Cid X, this state is - /// the "moment before X". This is important, as for a create we define the initial anchor - /// as "nothing". It's means for the anchor at time X, that changes that occurred at time - /// X have NOT been replayed and applied! - anchors: BTreeMap, - changes: BTreeMap, -} - -/* -impl fmt::Display for EntryChangelog { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f - } -} -*/ - -/// A change defines the transitions that occurred within this Cid (transaction). A change is applied -/// as a whole, or rejected during the replay process. -#[derive(Debug, Clone)] -pub struct Change { - s: Vec, +pub enum State { + Live { changes: BTreeMap }, + Tombstone { at: Cid }, } #[derive(Debug, Clone)] -enum State { - NonExistent, - Live(Eattrs), - Recycled(Eattrs), - Tombstone(Eattrs), +pub struct EntryChangeState { + pub(super) st: State, } -impl fmt::Display for State { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self { - State::NonExistent => write!(f, "NonExistent"), - State::Live(_) => write!(f, "Live"), - State::Recycled(_) => write!(f, "Recycled"), - State::Tombstone(_) => write!(f, "Tombstone"), - } - } -} +impl EntryChangeState { + pub fn new(cid: &Cid, attrs: &Eattrs, _schema: &dyn SchemaTransaction) -> Self { + let changes = attrs + .keys() + .cloned() + .map(|attr| (attr, cid.clone())) + .collect(); -#[derive(Debug, Clone)] -enum Transition { - Create(Eattrs), - ModifyPurge(AttrString), - ModifyPresent(AttrString, Box), - ModifyRemoved(AttrString, Box), - ModifyAssert(AttrString, Box), - Recycle, - Revive, - Tombstone(Eattrs), -} + let st = State::Live { changes }; -impl fmt::Display for Transition { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match &self { - Transition::Create(_) => write!(f, "Create"), - Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a), - Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a), - Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a), - Transition::ModifyAssert(a, _) => write!(f, "ModifyAssert({})", a), - Transition::Recycle => write!(f, "Recycle"), - Transition::Revive => write!(f, "Revive"), - Transition::Tombstone(_) => write!(f, "Tombstone"), - } - } -} - -impl State { - fn apply_change(self, change: &Change) -> Result { - let mut state = self; - for transition in change.s.iter() { - match (&mut state, transition) { - (State::NonExistent, Transition::Create(attrs)) => { - trace!("NonExistent + Create -> Live"); - state = State::Live(attrs.clone()); - } - (State::Live(ref mut attrs), Transition::ModifyPurge(attr)) => { - trace!("Live + ModifyPurge({}) -> Live", attr); - attrs.remove(attr); - } - (State::Live(ref mut attrs), Transition::ModifyPresent(attr, value)) => { - trace!("Live + ModifyPresent({}) -> Live", attr); - if let Some(vs) = attrs.get_mut(attr) { - let r = vs.insert_checked(value.as_ref().clone()); - assert!(r.is_ok()); - // Reject if it fails? - } else { - #[allow(clippy::expect_used)] - let vs = valueset::from_value_iter(std::iter::once(value.as_ref().clone())) - .expect("Unable to fail - always single value, and only one type!"); - attrs.insert(attr.clone(), vs); - } - } - (State::Live(ref mut attrs), Transition::ModifyRemoved(attr, value)) => { - trace!("Live + ModifyRemoved({}) -> Live", attr); - let rm = if let Some(vs) = attrs.get_mut(attr) { - vs.remove(value); - vs.is_empty() - } else { - false - }; - if rm { - attrs.remove(attr); - }; - } - (State::Live(ref mut attrs), Transition::ModifyAssert(attr, value)) => { - trace!("Live + ModifyAssert({}) -> Live", attr); - - if attrs - .get(attr) - .map(|vs| vs.contains(value)) - .unwrap_or(false) - { - // Valid - } else { - warn!("{} + {:?} -> Assertion not met - REJECTING", attr, value); - return Err(state); - } - } - (State::Live(attrs), Transition::Recycle) => { - trace!("Live + Recycle -> Recycled"); - state = State::Recycled(attrs.clone()); - } - (State::Live(_), Transition::Tombstone(attrs)) => { - trace!("Live + Tombstone -> Tombstone"); - state = State::Tombstone(attrs.clone()); - } - (State::Recycled(attrs), Transition::Revive) => { - trace!("Recycled + Revive -> Live"); - state = State::Live(attrs.clone()); - } - (State::Recycled(ref mut attrs), Transition::ModifyPurge(attr)) => { - trace!("Recycled + ModifyPurge({}) -> Recycled", attr); - attrs.remove(attr); - } - (State::Recycled(attrs), Transition::ModifyRemoved(attr, value)) => { - trace!("Recycled + ModifyRemoved({}) -> Recycled", attr); - let rm = if let Some(vs) = attrs.get_mut(attr) { - vs.remove(value); - vs.is_empty() - } else { - false - }; - if rm { - attrs.remove(attr); - }; - } - (State::Recycled(_), Transition::Tombstone(attrs)) => { - trace!("Recycled + Tombstone -> Tombstone"); - state = State::Tombstone(attrs.clone()); - } - - // ============================== - // Invalid States - /* - (State::NonExistent, Transition::ModifyPurge(_)) - | (State::NonExistent, Transition::ModifyPresent(_, _)) - | (State::NonExistent, Transition::ModifyRemoved(_, _)) - | (State::NonExistent, Transition::Recycle) - | (State::NonExistent, Transition::Revive) - | (State::NonExistent, Transition::Tombstone(_)) - | (State::Live(_), Transition::Create(_)) - | (State::Live(_), Transition::Revive) - | (State::Recycled(_), Transition::Create(_)) - | (State::Recycled(_), Transition::Recycle) - | (State::Recycled(_), Transition::ModifyPresent(_, _)) - | (State::Tombstone(_), _) - */ - (s, t) => { - warn!("{} + {} -> REJECTING", s, t); - return Err(state); - } - }; - } - // Everything must have applied, all good then. - trace!(?state, "applied changes"); - Ok(state) - } -} - -impl EntryChangelog { - pub fn new(cid: Cid, attrs: Eattrs, _schema: &dyn SchemaTransaction) -> Self { - // I think we need to reduce the attrs based on what is / is not replicated.? - - let anchors = btreemap![(cid.clone(), State::NonExistent)]; - let changes = btreemap![( - cid, - Change { - s: vec![Transition::Create(attrs)] - } - )]; - - EntryChangelog { anchors, changes } + EntryChangeState { st } } - // TODO: work out if the below comment about uncommenting is still valid - // Uncomment this once we have a real on-disk storage of the changelog - pub fn new_without_schema(cid: Cid, attrs: Eattrs) -> Self { - // I think we need to reduce the attrs based on what is / is not replicated.? - - // We need to pick a state that reflects the current state WRT to tombstone - // or recycled! + pub fn new_without_schema(cid: &Cid, attrs: &Eattrs) -> Self { let class = attrs.get("class"); - - let (anchors, changes) = if class + let st = if class .as_ref() .map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue)) .unwrap_or(false) { - (btreemap![(cid, State::Tombstone(attrs))], BTreeMap::new()) - } else if class - .as_ref() - .map(|c| c.contains(&PVCLASS_RECYCLED as &PartialValue)) - .unwrap_or(false) - { - (btreemap![(cid, State::Recycled(attrs))], BTreeMap::new()) + State::Tombstone { at: cid.clone() } } else { - ( - btreemap![(cid.clone(), State::NonExistent)], - btreemap![( - cid, - Change { - s: vec![Transition::Create(attrs)] - } - )], - ) + let changes = attrs + .keys() + .cloned() + .map(|attr| (attr, cid.clone())) + .collect(); + + State::Live { changes } }; - EntryChangelog { anchors, changes } + EntryChangeState { st } } - pub fn add_ava_iter(&mut self, cid: &Cid, attr: &str, viter: T) - where - T: IntoIterator, - { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - - viter - .into_iter() - .map(|v| Transition::ModifyPresent(AttrString::from(attr), Box::new(v))) - .for_each(|t| change.s.push(t)); + pub fn current(&self) -> &State { + &self.st } - pub fn remove_ava_iter(&mut self, cid: &Cid, attr: &str, viter: T) - where - T: IntoIterator, - { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - - viter - .into_iter() - .map(|v| Transition::ModifyRemoved(AttrString::from(attr), Box::new(v))) - .for_each(|t| change.s.push(t)); - } - - pub fn assert_ava(&mut self, cid: &Cid, attr: &str, value: PartialValue) { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - - change.s.push(Transition::ModifyAssert( - AttrString::from(attr), - Box::new(value), - )) - } - - pub fn purge_ava(&mut self, cid: &Cid, attr: &str) { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - change - .s - .push(Transition::ModifyPurge(AttrString::from(attr))); - } - - pub fn recycled(&mut self, cid: &Cid) { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - change.s.push(Transition::Recycle); - } - - pub fn revive(&mut self, cid: &Cid) { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - change.s.push(Transition::Revive); - } - - pub fn tombstone(&mut self, cid: &Cid, attrs: Eattrs) { - if !self.changes.contains_key(cid) { - self.changes.insert(cid.clone(), Change { s: Vec::new() }); - } - - #[allow(clippy::expect_used)] - let change = self - .changes - .get_mut(cid) - .expect("Memory corruption, change must exist"); - change.s.push(Transition::Tombstone(attrs)); - } - - /// Replay our changes from and including the replay Cid, up to the latest point - /// in time. We also return a vector of *rejected* Cid's showing what is in the - /// change log that is considered invalid. - fn replay( - &self, - from_cid: Bound<&Cid>, - to_cid: Bound<&Cid>, - ) -> Result<(State, Vec), OperationError> { - // Select the anchor_cid that is *earlier* or *equals* to the replay_cid. - - // if not found, we are *unable to* perform this replay which indicates a problem! - let (anchor_cid, anchor) = if matches!(from_cid, Unbounded) { - // If the from is unbounded, and to is unbounded, we want - // the earliest anchor possible. - - // If from is unbounded and to is bounded, we want the earliest - // possible. - self.anchors.iter().next() - } else { - // If from has a bound, we want an anchor "earlier than" from, regardless - // of the to bound state. - self.anchors.range((Unbounded, from_cid)).next_back() - } - .ok_or_else(|| { - admin_error!( - ?from_cid, - ?to_cid, - "Failed to locate anchor in replay range" - ); - OperationError::ReplReplayFailure - })?; - - trace!(?anchor_cid, ?anchor); - - // Load the entry attribute state at that time. - let mut replay_state = anchor.clone(); - let mut rejected_cid = Vec::new(); - - // For each change - for (change_cid, change) in self.changes.range((Included(anchor_cid), to_cid)) { - // Apply the change. - trace!(?change_cid, ?change); - - replay_state = match replay_state.apply_change(change) { - Ok(mut new_state) => { - // Indicate that this was the highest CID so far. - match &mut new_state { - State::NonExistent => { - trace!("pass"); - } - State::Live(ref mut attrs) - | State::Recycled(ref mut attrs) - | State::Tombstone(ref mut attrs) => { - let cv = vs_cid![change_cid.clone()]; - let _ = attrs.insert(AttrString::from("last_modified_cid"), cv); - } - }; - new_state + pub fn change_ava(&mut self, cid: &Cid, attr: &str) { + match &mut self.st { + State::Live { ref mut changes } => { + if let Some(change) = changes.get_mut(attr) { + // Update the cid. + if change != cid { + *change = cid.clone() + } + } else { + changes.insert(attr.into(), cid.clone()); } - Err(previous_state) => { - warn!("rejecting invalid change {:?}", change_cid); - rejected_cid.push(change_cid.clone()); - previous_state - } - }; + } + State::Tombstone { .. } => { + assert!(false) + } } - - // Return the eattrs state. - Ok((replay_state, rejected_cid)) } - #[instrument( - level = "trace", - name = "verify", - skip(self, _schema, expected_attrs, results) - )] + pub fn tombstone(&mut self, cid: &Cid) { + match &mut self.st { + State::Live { changes: _ } => self.st = State::Tombstone { at: cid.clone() }, + State::Tombstone { .. } => {} // no-op + }; + } + + pub fn can_delete(&self, cid: &Cid) -> bool { + match &self.st { + State::Live { .. } => false, + State::Tombstone { at } => at < cid, + } + } + + pub fn is_live(&self) -> bool { + match &self.st { + State::Live { .. } => true, + State::Tombstone { .. } => false, + } + } + + pub fn contains_tail_cid(&self, cid: &Cid) -> bool { + // This is slow? Is it needed? + match &self.st { + State::Live { changes } => changes.values().any(|change| change == cid), + State::Tombstone { at } => at == cid, + } + } + + pub fn cid_iter(&self) -> Vec<&Cid> { + match &self.st { + State::Live { changes } => { + let mut v: Vec<_> = changes.values().collect(); + v.sort_unstable(); + v.dedup(); + v + } + State::Tombstone { at } => vec![at], + } + } + + pub fn retain(&mut self, f: F) + where + F: FnMut(&AttrString, &mut Cid) -> bool, + { + match &mut self.st { + State::Live { changes } => changes.retain(f), + State::Tombstone { .. } => {} + } + } + + #[instrument(level = "trace", name = "verify", skip_all)] pub fn verify( &self, - _schema: &dyn SchemaTransaction, + schema: &dyn SchemaTransaction, expected_attrs: &Eattrs, entry_id: u64, results: &mut Vec>, ) { - // We need to be able to take any anchor entry, and replay that when all changes - // are applied we get the *same entry* as the current state. - debug_assert!(results.is_empty()); + let class = expected_attrs.get("class"); + let is_ts = class + .as_ref() + .map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue)) + .unwrap_or(false); - // For each anchor (we only needs it's change id.) - for cid in self.anchors.keys() { - match self.replay(Included(cid), Unbounded) { - Ok((entry_state, rejected)) => { - trace!(?rejected); + match (&self.st, is_ts) { + (State::Live { changes }, false) => { + // Check that all attrs from expected, have a value in our changes. + let inconsistent: Vec<_> = expected_attrs + .keys() + .filter(|attr| { + /* + * If the attribute is a replicated attribute, and it is NOT present + * in the change state then we are in a desync state. + * + * However, we don't check the inverse - if an entry is in the change state + * but is NOT replicated by schema. This is because there is is a way to + * delete an attribute in schema which will then prevent future replications + * of that value. However the value, while not being updated, will retain + * a state entry in the change state. + * + * For the entry to then be replicated once more, it would require it's schema + * attributes to be re-added and then the replication will resume from whatever + * receives the changes first. Generally there are lots of desync and edge + * cases here, which is why we pretty much don't allow schema to be deleted + * but we have to handle it here due to a test case that simulates this. + */ + let desync = schema.is_replicated(attr) && !changes.contains_key(*attr); + if desync { + debug!(%entry_id, %attr, %desync); + } + desync + }) + .collect(); - match entry_state { - State::Live(attrs) | State::Recycled(attrs) | State::Tombstone(attrs) => { - if compare_attrs(&attrs, expected_attrs) { - // valid - trace!("changelog is synchronised"); - } else { - // ruh-roh. - warn!("changelog has desynchronised!"); - debug!(?attrs); - debug!(?expected_attrs); - debug_assert!(false); - results - .push(Err(ConsistencyError::ChangelogDesynchronised(entry_id))); - } - } - State::NonExistent => { - warn!("entry does not exist - changelog is corrupted?!"); - results.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id))) - } - } - } - Err(e) => { - error!(?e); + if inconsistent.is_empty() { + trace!("changestate is synchronised"); + } else { + warn!("changestate has desynchronised! Missing state attrs {inconsistent:?}"); + results.push(Err(ConsistencyError::ChangeStateDesynchronised(entry_id))); } } + (State::Tombstone { .. }, true) => { + trace!("changestate is synchronised"); + } + (State::Live { .. }, true) => { + warn!("changestate has desynchronised! State Live when tombstone is true"); + results.push(Err(ConsistencyError::ChangeStateDesynchronised(entry_id))); + } + (State::Tombstone { .. }, false) => { + warn!("changestate has desynchronised! State Tombstone when tombstone is false"); + results.push(Err(ConsistencyError::ChangeStateDesynchronised(entry_id))); + } } - - debug_assert!(results.is_empty()); } +} - pub fn contains_tail_cid(&self, cid: &Cid) -> bool { - if let Some(tail_cid) = self.changes.keys().next_back() { - if tail_cid == cid { - return true; +impl PartialEq for EntryChangeState { + fn eq(&self, rhs: &Self) -> bool { + match (&self.st, &rhs.st) { + ( + State::Live { + changes: changes_left, + }, + State::Live { + changes: changes_right, + }, + ) => changes_left.eq(changes_right), + (State::Tombstone { at: at_left }, State::Tombstone { at: at_right }) => { + at_left.eq(at_right) } - }; - false - } - - pub fn can_delete(&self) -> bool { - // Changelog should be empty. - // should have a current anchor state of tombstone. - self.changes.is_empty() - && matches!(self.anchors.values().next_back(), Some(State::Tombstone(_))) - } - - pub fn is_live(&self) -> bool { - !matches!(self.anchors.values().next_back(), Some(State::Tombstone(_))) - } - - pub fn cid_iter(&self) -> Keys { - self.changes.keys() - } - - /* - fn insert_anchor(&mut self, cid: Cid, entry_state: State) { - // When we insert an anchor, we have to remove all subsequent anchors (but not - // the preceding ones.) - let _ = self.anchors.split_off(&cid); - self.anchors.insert(cid.clone(), entry_state); - } - */ - - pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> { - // Build a new anchor that is equal or less than this cid. - // In other words, the cid we are trimming to, should be remaining - // in the CL, and we should have an anchor that precedes it. - let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| { - error!(?e); - e - })?; - trace!(?rejected); - // Add the entry_state as an anchor. Use the CID we just - // trimmed to. - - // insert_anchor will remove anything to the right, we also need to - // remove everything to the left, so just clear. - self.anchors.clear(); - self.anchors.insert(cid.clone(), entry_state); - - // And now split the CL. - let mut right = self.changes.split_off(cid); - std::mem::swap(&mut right, &mut self.changes); - // We can trace what we drop later? - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::time::Duration; - - use crate::entry::Eattrs; - // use crate::prelude::*; - use crate::repl::cid::Cid; - use crate::repl::entry::{Change, EntryChangelog, State, Transition}; - use crate::schema::{Schema, SchemaTransaction}; - - #[test] - fn test_entrychangelog_basic() { - run_entrychangelog_test!(|schema: &dyn SchemaTransaction| { - let cid = Cid::new_random_s_d(Duration::from_secs(1)); - let eattrs = Eattrs::new(); - let eclog = EntryChangelog::new(cid, eattrs, schema); - trace!(?eclog); - }) - } - - #[test] - fn test_entrychangelog_state_transitions() { - // Test that all our transitions are defined and work as - // expected. - assert!(State::NonExistent - .apply_change(&Change { s: vec![] }) - .is_ok()); - assert!(State::NonExistent - .apply_change(&Change { - s: vec![Transition::Create(Eattrs::new())] - }) - .is_ok()); - - assert!(State::Live(Eattrs::new()) - .apply_change(&Change { s: vec![] }) - .is_ok()); - assert!(State::Live(Eattrs::new()) - .apply_change(&Change { - s: vec![Transition::Create(Eattrs::new())] - }) - .is_err()); + (_, _) => false, + } } } diff --git a/kanidmd/lib/src/repl/mod.rs b/kanidmd/lib/src/repl/mod.rs index a4ed2e850..31e56acb7 100644 --- a/kanidmd/lib/src/repl/mod.rs +++ b/kanidmd/lib/src/repl/mod.rs @@ -2,5 +2,9 @@ pub mod cid; pub mod entry; pub mod ruv; +pub mod consumer; +pub mod proto; +pub mod supplier; + #[cfg(test)] mod tests; diff --git a/kanidmd/lib/src/repl/proto.rs b/kanidmd/lib/src/repl/proto.rs new file mode 100644 index 000000000..eb7fa8893 --- /dev/null +++ b/kanidmd/lib/src/repl/proto.rs @@ -0,0 +1,497 @@ +use super::cid::Cid; +use super::entry::EntryChangeState; +use super::entry::State; +use crate::entry::Eattrs; +use crate::prelude::*; +use crate::schema::{SchemaReadTransaction, SchemaTransaction}; +use crate::valueset; +use base64urlsafedata::Base64UrlSafeData; +use serde::{Deserialize, Serialize}; +use std::collections::{BTreeMap, BTreeSet}; + +use webauthn_rs::prelude::{ + DeviceKey as DeviceKeyV4, Passkey as PasskeyV4, SecurityKey as SecurityKeyV4, +}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplCidV1 { + #[serde(rename = "t")] + pub ts: Duration, + #[serde(rename = "s")] + pub s_uuid: Uuid, +} + +// From / Into CID +impl From<&Cid> for ReplCidV1 { + fn from(cid: &Cid) -> Self { + ReplCidV1 { + ts: cid.ts, + s_uuid: cid.s_uuid, + } + } +} + +impl From for Cid { + fn from(cid: ReplCidV1) -> Self { + Cid { + ts: cid.ts, + s_uuid: cid.s_uuid, + } + } +} + +impl From<&ReplCidV1> for Cid { + fn from(cid: &ReplCidV1) -> Self { + Cid { + ts: cid.ts, + s_uuid: cid.s_uuid, + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] +pub struct ReplAddressV1 { + #[serde(rename = "f")] + pub formatted: String, + #[serde(rename = "s")] + pub street_address: String, + #[serde(rename = "l")] + pub locality: String, + #[serde(rename = "r")] + pub region: String, + #[serde(rename = "p")] + pub postal_code: String, + #[serde(rename = "c")] + pub country: String, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum ReplTotpAlgoV1 { + S1, + S256, + S512, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplTotpV1 { + pub key: Base64UrlSafeData, + pub step: u64, + pub algo: ReplTotpAlgoV1, + pub digits: u8, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[allow(non_camel_case_types)] +pub enum ReplPasswordV1 { + PBKDF2 { + cost: usize, + salt: Base64UrlSafeData, + hash: Base64UrlSafeData, + }, + PBKDF2_SHA1 { + cost: usize, + salt: Base64UrlSafeData, + hash: Base64UrlSafeData, + }, + PBKDF2_SHA512 { + cost: usize, + salt: Base64UrlSafeData, + hash: Base64UrlSafeData, + }, + SSHA512 { + salt: Base64UrlSafeData, + hash: Base64UrlSafeData, + }, + NT_MD4 { + hash: Base64UrlSafeData, + }, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplBackupCodeV1 { + pub codes: BTreeSet, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum ReplCredV1 { + TmpWn { + tag: String, + set: Vec, + }, + Password { + tag: String, + password: ReplPasswordV1, + uuid: Uuid, + }, + GenPassword { + tag: String, + password: ReplPasswordV1, + uuid: Uuid, + }, + PasswordMfa { + tag: String, + password: ReplPasswordV1, + totp: Vec<(String, ReplTotpV1)>, + backup_code: Option, + webauthn: Vec, + uuid: Uuid, + }, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum ReplIntentTokenV1 { + Valid { + token_id: String, + max_ttl: Duration, + }, + InProgress { + token_id: String, + max_ttl: Duration, + session_id: Uuid, + session_ttl: Duration, + }, + Consumed { + token_id: String, + max_ttl: Duration, + }, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct ReplSecurityKeyV4V1 { + pub tag: String, + pub key: SecurityKeyV4, +} + +impl Eq for ReplSecurityKeyV4V1 {} + +impl PartialEq for ReplSecurityKeyV4V1 { + fn eq(&self, other: &Self) -> bool { + self.key.cred_id() == other.key.cred_id() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ReplPasskeyV4V1 { + pub uuid: Uuid, + pub tag: String, + pub key: PasskeyV4, +} + +impl Eq for ReplPasskeyV4V1 {} + +impl PartialEq for ReplPasskeyV4V1 { + fn eq(&self, other: &Self) -> bool { + self.uuid == other.uuid && self.key.cred_id() == other.key.cred_id() + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ReplDeviceKeyV4V1 { + pub uuid: Uuid, + pub tag: String, + pub key: DeviceKeyV4, +} + +impl Eq for ReplDeviceKeyV4V1 {} + +impl PartialEq for ReplDeviceKeyV4V1 { + fn eq(&self, other: &Self) -> bool { + self.uuid == other.uuid && self.key.cred_id() == other.key.cred_id() + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplOauthScopeMapV1 { + pub refer: Uuid, + pub data: BTreeSet, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplOauth2SessionV1 { + pub refer: Uuid, + pub parent: Uuid, + pub expiry: Option, + pub issued_at: String, + pub rs_uuid: Uuid, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Default)] +pub enum ReplAccessScopeV1 { + IdentityOnly, + #[default] + ReadOnly, + ReadWrite, + Synchronise, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum ReplIdentityIdV1 { + Internal, + Uuid(Uuid), + Synch(Uuid), +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplSessionV1 { + pub refer: Uuid, + pub label: String, + pub expiry: Option, + pub issued_at: String, + pub issued_by: ReplIdentityIdV1, + pub scope: ReplAccessScopeV1, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum ReplAttrV1 { + Address { + set: Vec, + }, + EmailAddress { + primary: String, + set: Vec, + }, + PublicBinary { + set: Vec<(String, Base64UrlSafeData)>, + }, + PrivateBinary { + set: Vec, + }, + Bool { + set: Vec, + }, + Cid { + set: Vec, + }, + Credential { + set: Vec, + }, + IntentToken { + set: Vec, + }, + Passkey { + set: Vec, + }, + DeviceKey { + set: Vec, + }, + DateTime { + set: Vec, + }, + Iname { + set: Vec, + }, + IndexType { + set: Vec, + }, + Iutf8 { + set: Vec, + }, + JsonFilter { + set: Vec, + }, + JwsKeyEs256 { + set: Vec, + }, + JwsKeyRs256 { + set: Vec, + }, + NsUniqueId { + set: Vec, + }, + SecretValue { + set: Vec, + }, + RestrictedString { + set: Vec, + }, + Uint32 { + set: Vec, + }, + Url { + set: Vec, + }, + Utf8 { + set: Vec, + }, + Uuid { + set: Vec, + }, + Reference { + set: Vec, + }, + SyntaxType { + set: Vec, + }, + Spn { + set: Vec<(String, String)>, + }, + UiHint { + set: Vec, + }, + SshKey { + set: Vec<(String, String)>, + }, + OauthScope { + set: Vec, + }, + OauthScopeMap { + set: Vec, + }, + Oauth2Session { + set: Vec, + }, + Session { + set: Vec, + }, + TotpSecret { + set: Vec<(String, ReplTotpV1)>, + }, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct ReplAttrStateV1 { + cid: ReplCidV1, + attr: Option, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum ReplStateV1 { + Live { + attrs: BTreeMap, + }, + Tombstone { + at: ReplCidV1, + }, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +// I think partial entries should be separate? This clearly implies a refresh. +pub struct ReplEntryV1 { + uuid: Uuid, + // Change State + st: ReplStateV1, +} + +impl ReplEntryV1 { + pub fn new(entry: &EntrySealedCommitted, schema: &SchemaReadTransaction) -> ReplEntryV1 { + let cs = entry.get_changestate(); + let uuid = entry.get_uuid(); + + let st = match cs.current() { + State::Live { changes } => { + let live_attrs = entry.get_ava(); + + let attrs = changes + .iter() + .filter_map(|(attr_name, cid)| { + if schema.is_replicated(attr_name) { + let live_attr = live_attrs.get(attr_name.as_str()); + + let cid = cid.into(); + let attr = live_attr.and_then(|maybe| + // There is a quirk in the way we currently handle certain + // types of adds/deletes that it may be possible to have an + // empty value set still in memory on a supplier. In the future + // we may make it so in memory valuesets can be empty and sent + // but for now, if it's an empty set in any capacity, we map + // to None and just send the Cid since they have the same result + // on how the entry/attr state looks at each end. + if maybe.len() > 0 { + Some(maybe.to_repl_v1()) + } else { + None + } + ); + + Some((attr_name.to_string(), ReplAttrStateV1 { cid, attr })) + } else { + None + } + }) + .collect(); + + ReplStateV1::Live { attrs } + } + State::Tombstone { at } => ReplStateV1::Tombstone { at: at.into() }, + }; + + ReplEntryV1 { uuid, st } + } + + pub fn rehydrate(&self) -> Result<(EntryChangeState, Eattrs), OperationError> { + match &self.st { + ReplStateV1::Live { attrs } => { + trace!("{:#?}", attrs); + // We need to build two sets, one for the Entry Change States, and one for the + // Eattrs. + let mut changes = BTreeMap::default(); + let mut eattrs = Eattrs::default(); + + for (attr_name, ReplAttrStateV1 { cid, attr }) in attrs.iter() { + let astring: AttrString = attr_name.as_str().into(); + let cid: Cid = cid.into(); + + if let Some(attr_value) = attr { + let v = valueset::from_repl_v1(attr_value).map_err(|e| { + error!("Unable to restore valueset for {}", attr_name); + e + })?; + if eattrs.insert(astring.clone(), v).is_some() { + error!( + "Impossible eattrs state, attribute {} appears to be duplicated!", + attr_name + ); + return Err(OperationError::InvalidEntryState); + } + } + + if changes.insert(astring, cid).is_some() { + error!( + "Impossible changes state, attribute {} appears to be duplicated!", + attr_name + ); + return Err(OperationError::InvalidEntryState); + } + } + + let ecstate = EntryChangeState { + st: State::Live { changes }, + }; + Ok((ecstate, eattrs)) + } + ReplStateV1::Tombstone { at } => { + let at: Cid = at.into(); + + let mut eattrs = Eattrs::default(); + + let class_ava = vs_iutf8!["object", "tombstone"]; + let last_mod_ava = vs_cid![at.clone()]; + + eattrs.insert(AttrString::from("uuid"), vs_uuid![self.uuid]); + eattrs.insert(AttrString::from("class"), class_ava); + eattrs.insert(AttrString::from("last_modified_cid"), last_mod_ava); + + let ecstate = EntryChangeState { + st: State::Tombstone { at }, + }; + + Ok((ecstate, eattrs)) + } + } + } +} + +// From / Into Entry + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +pub enum ReplRefreshContext { + V1 { + domain_version: DomainVersion, + domain_uuid: Uuid, + schema_entries: Vec, + meta_entries: Vec, + entries: Vec, + }, +} diff --git a/kanidmd/lib/src/repl/ruv.rs b/kanidmd/lib/src/repl/ruv.rs index d66769d6f..09f2a9d92 100644 --- a/kanidmd/lib/src/repl/ruv.rs +++ b/kanidmd/lib/src/repl/ruv.rs @@ -9,6 +9,7 @@ use kanidm_proto::v1::ConsistencyError; use crate::prelude::*; use crate::repl::cid::Cid; +use std::fmt; pub struct ReplicationUpdateVector { // This sorts by time. Should we look up by IDL or by UUID? @@ -42,6 +43,15 @@ pub struct ReplicationUpdateVectorWriteTransaction<'a> { data: BptreeMapWriteTxn<'a, Cid, IDLBitRange>, } +impl<'a> fmt::Debug for ReplicationUpdateVectorWriteTransaction<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "RUV DUMP")?; + self.data + .iter() + .try_for_each(|(cid, idl)| writeln!(f, "* [{cid} {idl:?}]")) + } +} + pub struct ReplicationUpdateVectorReadTransaction<'a> { data: BptreeMapReadTxn<'a, Cid, IDLBitRange>, } @@ -59,10 +69,10 @@ pub trait ReplicationUpdateVectorTransaction { for entry in entries { // The DB id we need. let eid = entry.get_id(); - let eclog = entry.get_changelog(); + let ecstate = entry.get_changestate(); // We don't need the details of the change - only the cid of the // change that this entry was involved in. - for cid in eclog.cid_iter() { + for cid in ecstate.cid_iter() { if let Some(idl) = check_ruv.get_mut(cid) { // We can't guarantee id order, so we have to do this properly. idl.insert_id(eid); @@ -91,7 +101,15 @@ pub trait ReplicationUpdateVectorTransaction { while let (Some((ck, cv)), Some((sk, sv))) = (&check_next, &snap_next) { match ck.cmp(sk) { Ordering::Equal => { - if cv == sv { + // Counter intuitive, but here we check that the check set is a *subset* + // of the ruv snapshot. This is because when we have an entry that is + // tombstoned, all it's CID interactions are "lost" and it's cid becomes + // that of when it was tombstoned. So the "rebuilt" ruv will miss that + // entry. + // + // In the future the RUV concept may be ditched entirely anyway, thoughts needed. + let intersect = *cv & *sv; + if *cv == &intersect { trace!("{:?} is consistent!", ck); } else { admin_warn!("{:?} is NOT consistent! IDL's differ", ck); @@ -102,15 +120,17 @@ pub trait ReplicationUpdateVectorTransaction { snap_next = snap_iter.next(); } Ordering::Less => { + // Due to deletes, it can be that the check ruv is missing whole entries + // in a rebuild. admin_warn!("{:?} is NOT consistent! CID missing from RUV", ck); - debug_assert!(false); - results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string()))); + // debug_assert!(false); + // results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string()))); check_next = check_iter.next(); } Ordering::Greater => { admin_warn!("{:?} is NOT consistent! CID should not exist in RUV", sk); - debug_assert!(false); - results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string()))); + // debug_assert!(false); + // results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string()))); snap_next = snap_iter.next(); } } @@ -118,15 +138,15 @@ pub trait ReplicationUpdateVectorTransaction { while let Some((ck, _cv)) = &check_next { admin_warn!("{:?} is NOT consistent! CID missing from RUV", ck); - debug_assert!(false); - results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string()))); + // debug_assert!(false); + // results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string()))); check_next = check_iter.next(); } while let Some((sk, _sv)) = &snap_next { admin_warn!("{:?} is NOT consistent! CID should not exist in RUV", sk); - debug_assert!(false); - results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string()))); + // debug_assert!(false); + // results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string()))); snap_next = snap_iter.next(); } @@ -162,10 +182,10 @@ impl<'a> ReplicationUpdateVectorWriteTransaction<'a> { for entry in entries { // The DB id we need. let eid = entry.get_id(); - let eclog = entry.get_changelog(); + let ecstate = entry.get_changestate(); // We don't need the details of the change - only the cid of the // change that this entry was involved in. - for cid in eclog.cid_iter() { + for cid in ecstate.cid_iter() { if let Some(idl) = rebuild_ruv.get_mut(cid) { // We can't guarantee id order, so we have to do this properly. idl.insert_id(eid); diff --git a/kanidmd/lib/src/repl/supplier.rs b/kanidmd/lib/src/repl/supplier.rs new file mode 100644 index 000000000..29469fcf2 --- /dev/null +++ b/kanidmd/lib/src/repl/supplier.rs @@ -0,0 +1,101 @@ +use super::proto::{ReplEntryV1, ReplRefreshContext}; +use crate::prelude::*; + +impl<'a> QueryServerReadTransaction<'a> { + // Given a consumers state, calculate the differential of changes they + // need to be sent to bring them to the equivalent state. + + // We use the RUV or Cookie to determine if: + // * The consumer requires a full-reinit. + // * Which entry attr-states need to be sent, if any + + #[instrument(level = "debug", skip_all)] + pub fn supplier_provide_changes(&mut self) -> Result<(), OperationError> { + Ok(()) + } + + #[instrument(level = "debug", skip_all)] + pub fn supplier_provide_refresh(&mut self) -> Result { + // Get the current schema. We use this for attribute and entry filtering. + let schema = self.get_schema(); + + // A refresh must provide + // + // * the current domain version + let domain_version = self.d_info.d_vers; + let domain_uuid = self.d_info.d_uuid; + + // * the domain uuid + // * the set of schema entries + // * the set of non-schema entries + // - We must exclude certain entries and attributes! + // * schema defines what we exclude! + + let schema_filter = filter!(f_or!([ + f_eq("class", PVCLASS_ATTRIBUTETYPE.clone()), + f_eq("class", PVCLASS_CLASSTYPE.clone()), + ])); + + let meta_filter = filter!(f_or!([ + f_eq("uuid", PVUUID_DOMAIN_INFO.clone()), + f_eq("uuid", PVUUID_SYSTEM_INFO.clone()), + f_eq("uuid", PVUUID_SYSTEM_CONFIG.clone()), + ])); + + let entry_filter = filter!(f_and!([ + f_pres("class"), + f_andnot(f_or(vec![ + // These are from above! + f_eq("class", PVCLASS_ATTRIBUTETYPE.clone()), + f_eq("class", PVCLASS_CLASSTYPE.clone()), + f_eq("uuid", PVUUID_DOMAIN_INFO.clone()), + f_eq("uuid", PVUUID_SYSTEM_INFO.clone()), + f_eq("uuid", PVUUID_SYSTEM_CONFIG.clone()), + ])), + ])); + + let schema_entries = self + .internal_search(schema_filter) + .map(|ent| { + ent.into_iter() + .map(|e| ReplEntryV1::new(e.as_ref(), schema)) + .collect() + }) + .map_err(|e| { + error!("Failed to access schema entries"); + e + })?; + + let meta_entries = self + .internal_search(meta_filter) + .map(|ent| { + ent.into_iter() + .map(|e| ReplEntryV1::new(e.as_ref(), schema)) + .collect() + }) + .map_err(|e| { + error!("Failed to access meta entries"); + e + })?; + + let entries = self + .internal_search(entry_filter) + .map(|ent| { + ent.into_iter() + .map(|e| ReplEntryV1::new(e.as_ref(), schema)) + .collect() + }) + .map_err(|e| { + error!("Failed to access entries"); + e + })?; + + Ok(ReplRefreshContext::V1 { + domain_version, + domain_uuid, + schema_entries, + meta_entries, + entries, + }) + } +} diff --git a/kanidmd/lib/src/repl/tests.rs b/kanidmd/lib/src/repl/tests.rs index eda69f426..2c6aa4e58 100644 --- a/kanidmd/lib/src/repl/tests.rs +++ b/kanidmd/lib/src/repl/tests.rs @@ -1,6 +1,99 @@ -// use crate::prelude::*; +use crate::prelude::*; +use std::collections::BTreeMap; -#[tokio::test] -async fn multiple_qs_setup() { - assert!(true); +#[qs_pair_test] +async fn test_repl_refresh_basic(server_a: &QueryServer, server_b: &QueryServer) { + // Rebuild / refresh the content of server a with the content from b. + + // To ensure we have a spectrum of content, we do some setup here such as creating + // tombstones. + + let mut server_a_txn = server_a.write(duration_from_epoch_now()).await; + + let mut server_b_txn = server_b.read().await; + + // First, build the refresh context. + let refresh_context = server_b_txn + .supplier_provide_refresh() + .expect("Failed to build refresh"); + + // Verify content of the refresh + // eprintln!("{:#?}", refresh_context); + + // Apply it to the server + assert!(server_a_txn + .consumer_apply_refresh(&refresh_context) + .and_then(|_| server_a_txn.commit()) + .is_ok()); + + // Verify the content of server_a and server_b are identical. + let mut server_a_txn = server_a.read().await; + + // Need same d_uuid + assert_eq!( + server_a_txn.get_domain_uuid(), + server_b_txn.get_domain_uuid() + ); + + let domain_entry_a = server_a_txn + .internal_search_uuid(UUID_DOMAIN_INFO) + .expect("Failed to access domain info"); + + let domain_entry_b = server_b_txn + .internal_search_uuid(UUID_DOMAIN_INFO) + .expect("Failed to access domain info"); + + // Same d_vers / domain info. + assert_eq!(domain_entry_a, domain_entry_b); + + trace!("a {:#?}", domain_entry_a.get_changestate()); + trace!("b {:#?}", domain_entry_b.get_changestate()); + + // Compare that their change states are identical too. + assert_eq!( + domain_entry_a.get_changestate(), + domain_entry_b.get_changestate() + ); + + // There is some metadata here we should also consider testing such as key + // reloads? These are done at the IDM level, but this is QS level, so do we need to change + // these tests? Or should they be separate repl tests later? + assert_eq!(*server_a_txn.d_info, *server_b_txn.d_info); + + // Now assert everything else in the db matches. + + let entries_a = server_a_txn + .internal_search(filter_all!(f_pres("class"))) + .map(|ents| { + ents.into_iter() + .map(|e| (e.get_uuid(), e)) + .collect::>() + }) + .expect("Failed to access all entries"); + + let entries_b = server_a_txn + .internal_search(filter_all!(f_pres("class"))) + .map(|ents| { + ents.into_iter() + .map(|e| (e.get_uuid(), e)) + .collect::>() + }) + .expect("Failed to access all entries"); + + // Basically do a select * then put into btreemaps and compare them all. + + // Need to have the same length! + assert_eq!(entries_a.len(), entries_b.len()); + + // We don't use the uuid-keys here since these are compared internally, they are + // just to sort the two sets. + std::iter::zip(entries_a.values(), entries_b.values()).for_each(|(ent_a, ent_b)| { + assert_eq!(ent_a, ent_b); + assert_eq!(ent_a.get_changestate(), ent_b.get_changestate()); + }); + + // Done! The entry content are identical as are their replication metadata. We are good + // to go! + + // Both servers will be post-test validated. } diff --git a/kanidmd/lib/src/schema.rs b/kanidmd/lib/src/schema.rs index 6370285a1..e79ba4c3b 100644 --- a/kanidmd/lib/src/schema.rs +++ b/kanidmd/lib/src/schema.rs @@ -90,6 +90,7 @@ pub struct SchemaAttribute { pub unique: bool, pub phantom: bool, pub sync_allowed: bool, + pub replicated: bool, pub index: Vec, pub syntax: SyntaxType, } @@ -136,10 +137,15 @@ impl SchemaAttribute { admin_error!("missing unique - {}", name); OperationError::InvalidSchemaState("missing unique".to_string()) })?; + let phantom = value.get_ava_single_bool("phantom").unwrap_or(false); let sync_allowed = value.get_ava_single_bool("sync_allowed").unwrap_or(false); + // Default, all attributes are replicated unless you opt in for them to NOT be. + // Generally this is internal to the server only, so we don't advertise it. + let replicated = value.get_ava_single_bool("replicated").unwrap_or(true); + // index vec // even if empty, it SHOULD be present ... (is that valid to put an empty set?) // The get_ava_opt_index handles the optional case for us :) @@ -161,6 +167,7 @@ impl SchemaAttribute { unique, phantom, sync_allowed, + replicated, index, syntax, }) @@ -486,6 +493,22 @@ pub trait SchemaTransaction { res } + fn is_replicated(&self, attr: &str) -> bool { + match self.get_attributes().get(attr) { + Some(a_schema) => { + // We'll likely add more conditions here later. + !(a_schema.phantom || !a_schema.replicated) + } + None => { + warn!( + "Attribute {} was not found in schema during replication request", + attr + ); + false + } + } + } + fn is_multivalue(&self, attr: &str) -> Result { match self.get_attributes().get(attr) { Some(a_schema) => Ok(a_schema.multivalue), @@ -672,6 +695,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality, IndexType::Presence], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -688,6 +712,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality, IndexType::Presence], syntax: SyntaxType::Uuid, }, @@ -704,6 +729,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Cid, }, @@ -718,6 +744,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: true, phantom: false, sync_allowed: true, + replicated: true, index: vec![IndexType::Equality, IndexType::Presence], syntax: SyntaxType::Utf8StringIname, }, @@ -734,6 +761,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: true, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::SecurityPrincipalName, }, @@ -748,6 +776,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: true, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -762,6 +791,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: true, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -776,6 +806,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: true, + replicated: true, index: vec![], syntax: SyntaxType::Utf8String, }, @@ -788,6 +819,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Boolean, }); @@ -799,6 +831,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Boolean, }); @@ -810,6 +843,19 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, + index: vec![], + syntax: SyntaxType::Boolean, + }); + self.attributes.insert(AttrString::from("replicated"), SchemaAttribute { + name: AttrString::from("replicated"), + uuid: UUID_SCHEMA_ATTR_REPLICATED, + description: String::from("If true, this attribute or class can by replicated between nodes in the topology"), + multivalue: false, + unique: false, + phantom: false, + sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Boolean, }); @@ -825,6 +871,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Boolean, }, @@ -841,6 +888,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::IndexId, }, @@ -857,6 +905,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::SyntaxId, }, @@ -873,6 +922,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -889,6 +939,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -905,6 +956,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -921,6 +973,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -936,7 +989,8 @@ impl<'a> SchemaWriteTransaction<'a> { multivalue: true, unique: false, phantom: false, - sync_allowed: false, + sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -952,7 +1006,8 @@ impl<'a> SchemaWriteTransaction<'a> { multivalue: true, unique: false, phantom: false, - sync_allowed: false, + sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -969,6 +1024,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -984,7 +1040,8 @@ impl<'a> SchemaWriteTransaction<'a> { multivalue: true, unique: false, phantom: false, - sync_allowed: false, + sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1001,7 +1058,8 @@ impl<'a> SchemaWriteTransaction<'a> { multivalue: false, unique: false, phantom: false, - sync_allowed: false, + sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Boolean, }, @@ -1019,6 +1077,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality, IndexType::SubString], syntax: SyntaxType::JsonFilter, }, @@ -1035,6 +1094,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::ReferenceUuid, }, @@ -1052,6 +1112,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality, IndexType::SubString], syntax: SyntaxType::JsonFilter, }, @@ -1068,6 +1129,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1082,6 +1144,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1098,6 +1161,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1115,6 +1179,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1131,6 +1196,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1144,7 +1210,8 @@ impl<'a> SchemaWriteTransaction<'a> { multivalue: true, unique: false, phantom: false, - sync_allowed: false, + sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1160,6 +1227,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: false, index: vec![IndexType::Equality], syntax: SyntaxType::ReferenceUuid, }, @@ -1174,6 +1242,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: false, index: vec![IndexType::Equality], syntax: SyntaxType::ReferenceUuid, }, @@ -1188,6 +1257,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: true, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::ReferenceUuid, }, @@ -1205,6 +1275,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Uint32, }, @@ -1220,6 +1291,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringIname, }, @@ -1236,6 +1308,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1252,6 +1325,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1270,6 +1344,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: true, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1286,6 +1361,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![IndexType::Equality], syntax: SyntaxType::ReferenceUuid, }, @@ -1300,6 +1376,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: false, sync_allowed: false, + replicated: true, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1315,6 +1392,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: true, + replicated: false, index: vec![], syntax: SyntaxType::Utf8String, }, @@ -1330,6 +1408,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: true, + replicated: false, index: vec![], syntax: SyntaxType::TotpSecret, }, @@ -1346,6 +1425,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1360,6 +1440,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1374,6 +1455,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::Uuid, }, @@ -1388,6 +1470,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::Utf8StringInsensitive, }, @@ -1402,6 +1485,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::Utf8StringIname, }, @@ -1416,6 +1500,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::SshKey, }, @@ -1430,6 +1515,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::SshKey, }, @@ -1444,6 +1530,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::EmailAddress, }, @@ -1458,6 +1545,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::EmailAddress, }, @@ -1472,6 +1560,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::EmailAddress, }, @@ -1486,6 +1575,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::EmailAddress, }, @@ -1500,6 +1590,7 @@ impl<'a> SchemaWriteTransaction<'a> { unique: false, phantom: true, sync_allowed: false, + replicated: false, index: vec![], syntax: SyntaxType::Uint32, }, @@ -1513,6 +1604,7 @@ impl<'a> SchemaWriteTransaction<'a> { uuid: UUID_SCHEMA_CLASS_ATTRIBUTETYPE, description: String::from("Definition of a schema attribute"), systemmay: vec![ + AttrString::from("replicated"), AttrString::from("phantom"), AttrString::from("sync_allowed"), AttrString::from("index"), diff --git a/kanidmd/lib/src/server/access/mod.rs b/kanidmd/lib/src/server/access/mod.rs index 2214f9fca..8181fcea6 100644 --- a/kanidmd/lib/src/server/access/mod.rs +++ b/kanidmd/lib/src/server/access/mod.rs @@ -2429,7 +2429,7 @@ mod tests { // Test allowed to create test_acp_create!(&ce_admin, vec![acp.clone()], &r1_set, true); // Test Fails due to protected from sync object - test_acp_create!(&ce_admin, vec![acp.clone()], &r2_set, false); + test_acp_create!(&ce_admin, vec![acp], &r2_set, false); } #[test] @@ -2601,6 +2601,6 @@ mod tests { // Test reject rem test_acp_modify!(&me_rem, vec![acp_allow.clone()], &r2_set, false); // Test reject purge - test_acp_modify!(&me_purge, vec![acp_allow.clone()], &r2_set, false); + test_acp_modify!(&me_purge, vec![acp_allow], &r2_set, false); } } diff --git a/kanidmd/lib/src/server/create.rs b/kanidmd/lib/src/server/create.rs index 75496dbde..1fb008375 100644 --- a/kanidmd/lib/src/server/create.rs +++ b/kanidmd/lib/src/server/create.rs @@ -67,7 +67,7 @@ impl<'a> QueryServerWriteTransaction<'a> { // Now, normalise AND validate! - let res: Result>, OperationError> = candidates + let norm_cand = candidates .into_iter() .map(|e| { e.validate(&self.schema) @@ -80,9 +80,7 @@ impl<'a> QueryServerWriteTransaction<'a> { e.seal(&self.schema) }) }) - .collect(); - - let norm_cand: Vec> = res?; + .collect::, _>>()?; // Run any pre-create plugins now with schema validated entries. // This is important for normalisation of certain types IE class diff --git a/kanidmd/lib/src/server/migrations.rs b/kanidmd/lib/src/server/migrations.rs index 0e8790c66..d815faffa 100644 --- a/kanidmd/lib/src/server/migrations.rs +++ b/kanidmd/lib/src/server/migrations.rs @@ -79,8 +79,8 @@ impl QueryServer { admin_debug!(?system_info_version); if system_info_version > 0 { - if system_info_version <= 6 { - error!("Your instance of Kanidm is version 1.1.0-alpha.9 or lower, and you are trying to perform a skip upgrade. This will not work."); + if system_info_version <= 9 { + error!("Your instance of Kanidm is version 1.1.0-alpha.10 or lower, and you are trying to perform a skip upgrade. This will not work."); error!("You need to upgrade one version at a time to ensure upgrade migrations are performed in the correct order."); return Err(OperationError::InvalidState); } @@ -103,6 +103,10 @@ impl QueryServer { ts_write_3.commit() })?; + // Here is where in the future we will need to apply domain version increments. + // The actually migrations are done in a transaction though, this just needs to + // bump the version in it's own transaction. + admin_debug!("Database version check and migrations success! ☀️ "); Ok(()) } diff --git a/kanidmd/lib/src/server/mod.rs b/kanidmd/lib/src/server/mod.rs index 121a795e3..089d585ff 100644 --- a/kanidmd/lib/src/server/mod.rs +++ b/kanidmd/lib/src/server/mod.rs @@ -55,18 +55,19 @@ enum ServerPhase { Running, } -#[derive(Debug, Clone)] -struct DomainInfo { - d_uuid: Uuid, - d_name: String, - d_display: String, +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DomainInfo { + pub(crate) d_uuid: Uuid, + pub(crate) d_name: String, + pub(crate) d_display: String, + pub(crate) d_vers: DomainVersion, } #[derive(Clone)] pub struct QueryServer { phase: Arc>, s_uuid: Uuid, - d_info: Arc>, + pub(crate) d_info: Arc>, be: Backend, schema: Arc, accesscontrols: Arc, @@ -81,7 +82,7 @@ pub struct QueryServerReadTransaction<'a> { be_txn: BackendReadTransaction<'a>, // Anything else? In the future, we'll need to have a schema transaction // type, maybe others? - d_info: CowCellReadTxn, + pub(crate) d_info: CowCellReadTxn, schema: SchemaReadTransaction, accesscontrols: AccessControlsReadTransaction<'a>, _db_ticket: SemaphorePermit<'a>, @@ -99,18 +100,18 @@ pub struct QueryServerWriteTransaction<'a> { d_info: CowCellWriteTxn<'a, DomainInfo>, curtime: Duration, cid: Cid, - be_txn: BackendWriteTransaction<'a>, - schema: SchemaWriteTransaction<'a>, + pub(crate) be_txn: BackendWriteTransaction<'a>, + pub(crate) schema: SchemaWriteTransaction<'a>, accesscontrols: AccessControlsWriteTransaction<'a>, // We store a set of flags that indicate we need a reload of // schema or acp, which is tested by checking the classes of the // changing content. - changed_schema: bool, - changed_acp: bool, - changed_oauth2: bool, - changed_domain: bool, + pub(crate) changed_schema: bool, + pub(crate) changed_acp: bool, + pub(crate) changed_oauth2: bool, + pub(crate) changed_domain: bool, // Store the list of changed uuids for other invalidation needs? - changed_uuid: HashSet, + pub(crate) changed_uuid: HashSet, _db_ticket: SemaphorePermit<'a>, _write_ticket: SemaphorePermit<'a>, resolve_filter_cache: @@ -642,7 +643,7 @@ pub trait QueryServerTransaction<'a> { Some(v) => v.to_proto_string_clone(), None => uuid_to_proto_string(*u), }; - Ok(format!("{}: {:?}", u, m)) + Ok(format!("{u}: {m:?}")) }) .collect(); v @@ -663,7 +664,7 @@ pub trait QueryServerTransaction<'a> { .copied() .map(|ur| { let rdn = self.uuid_to_rdn(ur)?; - Ok(format!("{},{}", rdn, basedn).into_bytes()) + Ok(format!("{rdn},{basedn}").into_bytes()) }) .collect(); v @@ -926,6 +927,9 @@ impl QueryServer { let d_info = Arc::new(CowCell::new(DomainInfo { d_uuid, + // Start with our minimum supported level. + // This will be reloaded from the DB shortly :) + d_vers: DOMAIN_MIN_LEVEL, d_name: domain_name.clone(), // we set the domain_display_name to the configuration file's domain_name // here because the database is not started, so we cannot pull it from there. @@ -1009,7 +1013,7 @@ impl QueryServer { let ts_max = be_txn .get_db_ts_max(curtime) .expect("Unable to get db_ts_max"); - let cid = Cid::new_lamport(self.s_uuid, d_info.d_uuid, curtime, &ts_max); + let cid = Cid::new_lamport(self.s_uuid, curtime, &ts_max); QueryServerWriteTransaction { // I think this is *not* needed, because commit is mut self which should @@ -1054,7 +1058,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } #[instrument(level = "debug", name = "reload_schema", skip(self))] - fn reload_schema(&mut self) -> Result<(), OperationError> { + pub(crate) fn reload_schema(&mut self) -> Result<(), OperationError> { // supply entries to the writable schema to reload from. // find all attributes. let filt = filter!(f_eq("class", PVCLASS_ATTRIBUTETYPE.clone())); @@ -1266,10 +1270,19 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Pulls the domain name from the database and updates the DomainInfo data in memory #[instrument(level = "debug", skip_all)] - fn reload_domain_info(&mut self) -> Result<(), OperationError> { + pub(crate) fn reload_domain_info(&mut self) -> Result<(), OperationError> { let domain_name = self.get_db_domain_name()?; let display_name = self.get_db_domain_display_name()?; + let domain_uuid = self.be_txn.get_db_d_uuid(); let mut_d_info = self.d_info.get_mut(); + if mut_d_info.d_uuid != domain_uuid { + admin_warn!( + "Using domain uuid from the database {} - was {} in memory", + domain_name, + mut_d_info.d_name, + ); + mut_d_info.d_uuid = domain_uuid; + } if mut_d_info.d_name != domain_name { admin_warn!( "Using domain name from the database {} - was {} in memory", @@ -1401,23 +1414,18 @@ impl<'a> QueryServerWriteTransaction<'a> { // Write the cid to the db. If this fails, we can't assume replication // will be stable, so return if it fails. be_txn.set_db_ts_max(cid.ts)?; - // Validate the schema as we just loaded it. - let r = schema.validate(); - if r.is_empty() { - // Schema has been validated, so we can go ahead and commit it with the be - // because both are consistent. - schema - .commit() - .map(|_| d_info.commit()) - .map(|_| phase.commit()) - .map(|_| dyngroup_cache.commit()) - .and_then(|_| accesscontrols.commit()) - .and_then(|_| be_txn.commit()) - } else { - Err(OperationError::ConsistencyError(r)) - } - // Audit done + // Point of no return - everything has been validated and reloaded. + // + // = Lets commit = + + schema + .commit() + .map(|_| d_info.commit()) + .map(|_| phase.commit()) + .map(|_| dyngroup_cache.commit()) + .and_then(|_| accesscontrols.commit()) + .and_then(|_| be_txn.commit()) } } @@ -1510,7 +1518,7 @@ mod tests { assert!(r1 == Ok(None)); // Name does exist let r3 = server_txn.uuid_to_spn(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930")); - println!("{:?}", r3); + println!("{r3:?}"); assert!(r3.unwrap().unwrap() == Value::new_spn_str("testperson1", "example.com")); // Name is not syntax normalised (but exists) let r4 = server_txn.uuid_to_spn(uuid!("CC8E95B4-C24F-4D68-BA54-8BED76F63930")); @@ -1543,7 +1551,7 @@ mod tests { assert!(r1.unwrap() == "uuid=bae3f507-e6c3-44ba-ad01-f8ff1083534a"); // Name does exist let r3 = server_txn.uuid_to_rdn(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930")); - println!("{:?}", r3); + println!("{r3:?}"); assert!(r3.unwrap() == "spn=testperson1@example.com"); // Uuid is not syntax normalised (but exists) let r4 = server_txn.uuid_to_rdn(uuid!("CC8E95B4-C24F-4D68-BA54-8BED76F63930")); diff --git a/kanidmd/lib/src/value.rs b/kanidmd/lib/src/value.rs index 8f55bfe49..2d598c1dc 100644 --- a/kanidmd/lib/src/value.rs +++ b/kanidmd/lib/src/value.rs @@ -684,7 +684,7 @@ impl PartialValue { | PartialValue::SshKey(tag) => tag.to_string(), // This will never match as we never index radius creds! See generate_idx_eq_keys PartialValue::SecretValue | PartialValue::PrivateBinary => "_".to_string(), - PartialValue::Spn(name, realm) => format!("{}@{}", name, realm), + PartialValue::Spn(name, realm) => format!("{name}@{realm}"), PartialValue::Uint32(u) => u.to_string(), // This will never work, we don't allow equality searching on Cid's PartialValue::Cid(_) => "_".to_string(), @@ -1503,10 +1503,10 @@ impl Value { let fp = spk.fingerprint(); format!("{}: {}", tag, fp.hash) } - Err(_) => format!("{}: corrupted ssh public key", tag), + Err(_) => format!("{tag}: corrupted ssh public key"), } } - Value::Spn(n, r) => format!("{}@{}", n, r), + Value::Spn(n, r) => format!("{n}@{r}"), _ => unreachable!(), } } diff --git a/kanidmd/lib/src/valueset/address.rs b/kanidmd/lib/src/valueset/address.rs index 67ecf5eb2..a6b2abc88 100644 --- a/kanidmd/lib/src/valueset/address.rs +++ b/kanidmd/lib/src/valueset/address.rs @@ -4,6 +4,7 @@ use smolset::SmolSet; use crate::be::dbvalue::DbValueAddressV1; use crate::prelude::*; +use crate::repl::proto::{ReplAddressV1, ReplAttrV1}; use crate::schema::SchemaAttribute; use crate::value::Address; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -49,6 +50,33 @@ impl ValueSetAddress { .collect(); Ok(Box::new(ValueSetAddress { set })) } + + pub fn from_repl_v1(data: &[ReplAddressV1]) -> Result { + let set = data + .iter() + .cloned() + .map( + |ReplAddressV1 { + formatted, + street_address, + locality, + region, + postal_code, + country, + }| { + Address { + formatted, + street_address, + locality, + region, + postal_code, + country, + } + }, + ) + .collect(); + Ok(Box::new(ValueSetAddress { set })) + } } impl FromIterator
for Option> { @@ -142,6 +170,23 @@ impl ValueSetT for ValueSetAddress { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Address { + set: self + .set + .iter() + .map(|a| ReplAddressV1 { + formatted: a.formatted.clone(), + street_address: a.street_address.clone(), + locality: a.locality.clone(), + region: a.region.clone(), + postal_code: a.postal_code.clone(), + country: a.country.clone(), + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new( self.set @@ -217,6 +262,19 @@ impl ValueSetEmailAddress { } } + pub fn from_repl_v1(primary: &String, data: &[String]) -> Result { + let set: BTreeSet<_> = data.iter().cloned().collect(); + + if set.contains(primary) { + Ok(Box::new(ValueSetEmailAddress { + primary: primary.clone(), + set, + })) + } else { + Err(OperationError::InvalidValueState) + } + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -337,6 +395,13 @@ impl ValueSetT for ValueSetEmailAddress { DbValueSetV2::EmailAddress(self.primary.clone(), self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::EmailAddress { + primary: self.primary.clone(), + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::EmailAddress)) } diff --git a/kanidmd/lib/src/valueset/binary.rs b/kanidmd/lib/src/valueset/binary.rs index 8e31ad374..d52a80bdf 100644 --- a/kanidmd/lib/src/valueset/binary.rs +++ b/kanidmd/lib/src/valueset/binary.rs @@ -1,9 +1,11 @@ +use base64urlsafedata::Base64UrlSafeData; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::BTreeMap; use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -28,6 +30,11 @@ impl ValueSetPrivateBinary { Ok(Box::new(ValueSetPrivateBinary { set })) } + pub fn from_repl_v1(data: &[Base64UrlSafeData]) -> Result { + let set = data.iter().map(|b| b.0.clone()).collect(); + Ok(Box::new(ValueSetPrivateBinary { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and vec is foreign #[allow(clippy::should_implement_trait)] @@ -95,6 +102,12 @@ impl ValueSetT for ValueSetPrivateBinary { DbValueSetV2::PrivateBinary(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::PrivateBinary { + set: self.set.iter().cloned().map(|b| b.into()).collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new( self.set @@ -160,6 +173,11 @@ impl ValueSetPublicBinary { Ok(Box::new(ValueSetPublicBinary { map })) } + pub fn from_repl_v1(data: &[(String, Base64UrlSafeData)]) -> Result { + let map = data.iter().map(|(k, v)| (k.clone(), v.0.clone())).collect(); + Ok(Box::new(ValueSetPublicBinary { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -243,6 +261,16 @@ impl ValueSetT for ValueSetPublicBinary { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::PublicBinary { + set: self + .map + .iter() + .map(|(tag, bin)| (tag.clone(), bin.clone().into())) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::PublicBinary)) } diff --git a/kanidmd/lib/src/valueset/bool.rs b/kanidmd/lib/src/valueset/bool.rs index b5e8ba7a5..bea23a119 100644 --- a/kanidmd/lib/src/valueset/bool.rs +++ b/kanidmd/lib/src/valueset/bool.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetBool { Ok(Box::new(ValueSetBool { set })) } + pub fn from_repl_v1(data: &[bool]) -> Result { + let set = data.iter().copied().collect(); + Ok(Box::new(ValueSetBool { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and bool is foreign. #[allow(clippy::should_implement_trait)] @@ -101,6 +107,12 @@ impl ValueSetT for ValueSetBool { DbValueSetV2::Bool(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Bool { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::new_bool)) } diff --git a/kanidmd/lib/src/valueset/cid.rs b/kanidmd/lib/src/valueset/cid.rs index 36aedacc9..40eac7e3a 100644 --- a/kanidmd/lib/src/valueset/cid.rs +++ b/kanidmd/lib/src/valueset/cid.rs @@ -3,6 +3,7 @@ use smolset::SmolSet; use crate::be::dbvalue::DbCidV1; use crate::prelude::*; use crate::repl::cid::Cid; +use crate::repl::proto::{ReplAttrV1, ReplCidV1}; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -26,13 +27,17 @@ impl ValueSetCid { let set = data .into_iter() .map(|dc| Cid { - d_uuid: dc.domain_id, s_uuid: dc.server_id, ts: dc.timestamp, }) .collect(); Ok(Box::new(ValueSetCid { set })) } + + pub fn from_repl_v1(data: &[ReplCidV1]) -> Result { + let set = data.iter().map(|dc| dc.into()).collect(); + Ok(Box::new(ValueSetCid { set })) + } } impl FromIterator for Option> { @@ -105,11 +110,7 @@ impl ValueSetT for ValueSetCid { } fn to_proto_string_clone_iter(&self) -> Box + '_> { - Box::new( - self.set - .iter() - .map(|c| format!("{:?}_{}_{}", c.ts, c.d_uuid, c.s_uuid)), - ) + Box::new(self.set.iter().map(|c| format!("{:?}_{}", c.ts, c.s_uuid))) } fn to_db_valueset_v2(&self) -> DbValueSetV2 { @@ -117,7 +118,6 @@ impl ValueSetT for ValueSetCid { self.set .iter() .map(|c| DbCidV1 { - domain_id: c.d_uuid, server_id: c.s_uuid, timestamp: c.ts, }) @@ -125,6 +125,12 @@ impl ValueSetT for ValueSetCid { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Cid { + set: self.set.iter().map(|c| c.into()).collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::new_cid)) } diff --git a/kanidmd/lib/src/valueset/cred.rs b/kanidmd/lib/src/valueset/cred.rs index a22f0b108..50a319be0 100644 --- a/kanidmd/lib/src/valueset/cred.rs +++ b/kanidmd/lib/src/valueset/cred.rs @@ -8,6 +8,9 @@ use crate::be::dbvalue::{ }; use crate::credential::Credential; use crate::prelude::*; +use crate::repl::proto::{ + ReplAttrV1, ReplCredV1, ReplDeviceKeyV4V1, ReplIntentTokenV1, ReplPasskeyV4V1, +}; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, IntentTokenState, ValueSet}; @@ -40,6 +43,16 @@ impl ValueSetCredential { Ok(Box::new(ValueSetCredential { map })) } + pub fn from_repl_v1(data: &[ReplCredV1]) -> Result { + let map = data + .iter() + .map(|dc| { + Credential::try_from_repl_v1(dc).map_err(|()| OperationError::InvalidValueState) + }) + .collect::>()?; + Ok(Box::new(ValueSetCredential { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -125,6 +138,16 @@ impl ValueSetT for ValueSetCredential { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Credential { + set: self + .map + .iter() + .map(|(tag, cred)| cred.to_repl_v1(tag.clone())) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::Cred)) } @@ -214,6 +237,36 @@ impl ValueSetIntentToken { Ok(Box::new(ValueSetIntentToken { map })) } + pub fn from_repl_v1(data: &[ReplIntentTokenV1]) -> Result { + let map = data + .iter() + .map(|dits| match dits { + ReplIntentTokenV1::Valid { token_id, max_ttl } => ( + token_id.clone(), + IntentTokenState::Valid { max_ttl: *max_ttl }, + ), + ReplIntentTokenV1::InProgress { + token_id, + max_ttl, + session_id, + session_ttl, + } => ( + token_id.clone(), + IntentTokenState::InProgress { + max_ttl: *max_ttl, + session_id: *session_id, + session_ttl: *session_ttl, + }, + ), + ReplIntentTokenV1::Consumed { token_id, max_ttl } => ( + token_id.clone(), + IntentTokenState::Consumed { max_ttl: *max_ttl }, + ), + }) + .collect(); + Ok(Box::new(ValueSetIntentToken { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -317,6 +370,35 @@ impl ValueSetT for ValueSetIntentToken { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::IntentToken { + set: self + .map + .iter() + .map(|(u, s)| match s { + IntentTokenState::Valid { max_ttl } => ReplIntentTokenV1::Valid { + token_id: u.clone(), + max_ttl: *max_ttl, + }, + IntentTokenState::InProgress { + max_ttl, + session_id, + session_ttl, + } => ReplIntentTokenV1::InProgress { + token_id: u.clone(), + max_ttl: *max_ttl, + session_id: *session_id, + session_ttl: *session_ttl, + }, + IntentTokenState::Consumed { max_ttl } => ReplIntentTokenV1::Consumed { + token_id: u.clone(), + max_ttl: *max_ttl, + }, + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::IntentToken)) } @@ -378,6 +460,17 @@ impl ValueSetPasskey { Ok(Box::new(ValueSetPasskey { map })) } + pub fn from_repl_v1(data: &[ReplPasskeyV4V1]) -> Result { + let map = data + .iter() + .cloned() + .map(|k| match k { + ReplPasskeyV4V1 { uuid, tag, key } => Ok((uuid, (tag, key))), + }) + .collect::>()?; + Ok(Box::new(ValueSetPasskey { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -467,6 +560,20 @@ impl ValueSetT for ValueSetPasskey { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Passkey { + set: self + .map + .iter() + .map(|(u, (t, k))| ReplPasskeyV4V1 { + uuid: *u, + tag: t.clone(), + key: k.clone(), + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::Passkey)) } @@ -537,6 +644,17 @@ impl ValueSetDeviceKey { Ok(Box::new(ValueSetDeviceKey { map })) } + pub fn from_repl_v1(data: &[ReplDeviceKeyV4V1]) -> Result { + let map = data + .iter() + .cloned() + .map(|k| match k { + ReplDeviceKeyV4V1 { uuid, tag, key } => Ok((uuid, (tag, key))), + }) + .collect::>()?; + Ok(Box::new(ValueSetDeviceKey { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -626,6 +744,20 @@ impl ValueSetT for ValueSetDeviceKey { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::DeviceKey { + set: self + .map + .iter() + .map(|(u, (t, k))| ReplDeviceKeyV4V1 { + uuid: *u, + tag: t.clone(), + key: k.clone(), + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().copied().map(PartialValue::DeviceKey)) } diff --git a/kanidmd/lib/src/valueset/datetime.rs b/kanidmd/lib/src/valueset/datetime.rs index 4741fdf02..4a6bfaa55 100644 --- a/kanidmd/lib/src/valueset/datetime.rs +++ b/kanidmd/lib/src/valueset/datetime.rs @@ -2,6 +2,7 @@ use smolset::SmolSet; use time::OffsetDateTime; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -33,6 +34,18 @@ impl ValueSetDateTime { Ok(Box::new(ValueSetDateTime { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data + .iter() + .map(|s| { + OffsetDateTime::parse(s, time::Format::Rfc3339) + .map(|odt| odt.to_offset(time::UtcOffset::UTC)) + .map_err(|_| OperationError::InvalidValueState) + }) + .collect::>()?; + Ok(Box::new(ValueSetDateTime { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and offset date time is foreign #[allow(clippy::should_implement_trait)] @@ -123,6 +136,19 @@ impl ValueSetT for ValueSetDateTime { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::DateTime { + set: self + .set + .iter() + .map(|odt| { + debug_assert!(odt.offset() == time::UtcOffset::UTC); + odt.format(time::Format::Rfc3339) + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::DateTime)) } diff --git a/kanidmd/lib/src/valueset/iname.rs b/kanidmd/lib/src/valueset/iname.rs index 56a7eeccc..2369828f2 100644 --- a/kanidmd/lib/src/valueset/iname.rs +++ b/kanidmd/lib/src/valueset/iname.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetIname { Ok(Box::new(ValueSetIname { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetIname { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and str is foreign #[allow(clippy::should_implement_trait)] @@ -107,6 +113,12 @@ impl ValueSetT for ValueSetIname { DbValueSetV2::Iname(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Iname { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().map(|i| PartialValue::new_iname(i.as_str()))) } diff --git a/kanidmd/lib/src/valueset/index.rs b/kanidmd/lib/src/valueset/index.rs index 25b57e911..bc228e3f8 100644 --- a/kanidmd/lib/src/valueset/index.rs +++ b/kanidmd/lib/src/valueset/index.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -26,6 +27,12 @@ impl ValueSetIndex { Ok(Box::new(ValueSetIndex { set })) } + pub fn from_repl_v1(data: &[u16]) -> Result { + let set: Result<_, _> = data.iter().copied().map(IndexType::try_from).collect(); + let set = set.map_err(|_| OperationError::InvalidValueState)?; + Ok(Box::new(ValueSetIndex { set })) + } + // We need to allow this, because there seems to be a bug using it fromiterator in entry.rs #[allow(clippy::should_implement_trait)] pub fn from_iter(iter: T) -> Option> @@ -101,6 +108,12 @@ impl ValueSetT for ValueSetIndex { DbValueSetV2::IndexType(self.set.iter().map(|s| *s as u16).collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::IndexType { + set: self.set.iter().map(|s| *s as u16).collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::Index)) } diff --git a/kanidmd/lib/src/valueset/iutf8.rs b/kanidmd/lib/src/valueset/iutf8.rs index 8fd50d56e..d714928d2 100644 --- a/kanidmd/lib/src/valueset/iutf8.rs +++ b/kanidmd/lib/src/valueset/iutf8.rs @@ -2,6 +2,7 @@ use std::collections::BTreeSet; use super::iname::ValueSetIname; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -26,6 +27,11 @@ impl ValueSetIutf8 { Ok(Box::new(ValueSetIutf8 { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetIutf8 { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and str is foreign. #[allow(clippy::should_implement_trait)] @@ -108,6 +114,12 @@ impl ValueSetT for ValueSetIutf8 { DbValueSetV2::Iutf8(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Iutf8 { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().map(|i| PartialValue::new_iutf8(i.as_str()))) } diff --git a/kanidmd/lib/src/valueset/json.rs b/kanidmd/lib/src/valueset/json.rs index 09449f302..488615c21 100644 --- a/kanidmd/lib/src/valueset/json.rs +++ b/kanidmd/lib/src/valueset/json.rs @@ -2,6 +2,7 @@ use kanidm_proto::v1::Filter as ProtoFilter; use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -21,10 +22,18 @@ impl ValueSetJsonFilter { self.set.insert(b) } - pub fn from_dbvs2(data: Vec) -> Result { + pub fn from_dbvs2(data: &[String]) -> Result { let set = data - .into_iter() - .map(|s| serde_json::from_str(&s).map_err(|_| OperationError::SerdeJsonError)) + .iter() + .map(|s| serde_json::from_str(s).map_err(|_| OperationError::SerdeJsonError)) + .collect::>()?; + Ok(Box::new(ValueSetJsonFilter { set })) + } + + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data + .iter() + .map(|s| serde_json::from_str(s).map_err(|_| OperationError::SerdeJsonError)) .collect::>()?; Ok(Box::new(ValueSetJsonFilter { set })) } @@ -123,6 +132,20 @@ impl ValueSetT for ValueSetJsonFilter { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::JsonFilter { + set: self + .set + .iter() + .map(|s| { + #[allow(clippy::expect_used)] + serde_json::to_string(s) + .expect("A json filter value was corrupted during run-time") + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::JsonFilt)) } diff --git a/kanidmd/lib/src/valueset/jws.rs b/kanidmd/lib/src/valueset/jws.rs index 0fa035e91..d204d86a4 100644 --- a/kanidmd/lib/src/valueset/jws.rs +++ b/kanidmd/lib/src/valueset/jws.rs @@ -1,7 +1,9 @@ +use base64urlsafedata::Base64UrlSafeData; use compact_jwt::{JwaAlg, JwsSigner}; use hashbrown::HashSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -36,6 +38,19 @@ impl ValueSetJwsKeyEs256 { Ok(Box::new(ValueSetJwsKeyEs256 { set })) } + pub fn from_repl_v1(data: &[Base64UrlSafeData]) -> Result { + let set = data + .iter() + .map(|b| { + JwsSigner::from_es256_der(b.0.as_slice()).map_err(|e| { + debug!(?e, "Error occurred parsing ES256 DER"); + OperationError::InvalidValueState + }) + }) + .collect::, _>>()?; + Ok(Box::new(ValueSetJwsKeyEs256 { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and jwssigner is foreign #[allow(clippy::should_implement_trait)] @@ -109,12 +124,24 @@ impl ValueSetT for ValueSetJwsKeyEs256 { fn to_db_valueset_v2(&self) -> DbValueSetV2 { DbValueSetV2::JwsKeyEs256(self.set.iter() - .filter_map(|k| k.private_key_to_der() - .map_err(|e| { - error!(?e, "Unable to process private key to der, likely corrupted - this key will be LOST"); - }) - .ok()) - .collect()) + .map(|k| { + #[allow(clippy::expect_used)] + k.private_key_to_der() + .expect("Unable to process private key to der, likely corrupted. You must restore from backup.") + }) + .collect()) + } + + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::JwsKeyEs256 { set: self.set.iter() + .map(|k| { + #[allow(clippy::expect_used)] + k.private_key_to_der() + .expect("Unable to process private key to der, likely corrupted. You must restore from backup.") + }) + .map(|b| b.into()) + .collect() + } } fn to_partialvalue_iter(&self) -> Box + '_> { @@ -192,6 +219,19 @@ impl ValueSetJwsKeyRs256 { Ok(Box::new(ValueSetJwsKeyRs256 { set })) } + pub fn from_repl_v1(data: &[Base64UrlSafeData]) -> Result { + let set = data + .iter() + .map(|b| { + JwsSigner::from_rs256_der(b.0.as_slice()).map_err(|e| { + debug!(?e, "Error occurred parsing RS256 DER"); + OperationError::InvalidValueState + }) + }) + .collect::, _>>()?; + Ok(Box::new(ValueSetJwsKeyRs256 { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and jwssigner is foreign #[allow(clippy::should_implement_trait)] @@ -265,12 +305,24 @@ impl ValueSetT for ValueSetJwsKeyRs256 { fn to_db_valueset_v2(&self) -> DbValueSetV2 { DbValueSetV2::JwsKeyRs256(self.set.iter() - .filter_map(|k| k.private_key_to_der() - .map_err(|e| { - error!(?e, "Unable to process private key to der, likely corrupted - this key will be LOST"); - }) - .ok()) - .collect()) + .map(|k| { + #[allow(clippy::expect_used)] + k.private_key_to_der() + .expect("Unable to process private key to der, likely corrupted. You must restore from backup.") + }) + .collect()) + } + + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::JwsKeyRs256 { set: self.set.iter() + .map(|k| { + #[allow(clippy::expect_used)] + k.private_key_to_der() + .expect("Unable to process private key to der, likely corrupted. You must restore from backup.") + }) + .map(|b| b.into()) + .collect() + } } fn to_partialvalue_iter(&self) -> Box + '_> { diff --git a/kanidmd/lib/src/valueset/mod.rs b/kanidmd/lib/src/valueset/mod.rs index 1735d4f2b..b183dad11 100644 --- a/kanidmd/lib/src/valueset/mod.rs +++ b/kanidmd/lib/src/valueset/mod.rs @@ -14,7 +14,7 @@ use webauthn_rs::prelude::Passkey as PasskeyV4; use crate::be::dbvalue::DbValueSetV2; use crate::credential::{totp::Totp, Credential}; use crate::prelude::*; -use crate::repl::cid::Cid; +use crate::repl::{cid::Cid, proto::ReplAttrV1}; use crate::schema::SchemaAttribute; use crate::value::{Address, IntentTokenState, Oauth2Session, Session}; @@ -107,6 +107,8 @@ pub trait ValueSetT: std::fmt::Debug + DynClone { fn to_db_valueset_v2(&self) -> DbValueSetV2; + fn to_repl_v1(&self) -> ReplAttrV1; + fn to_partialvalue_iter(&self) -> Box + '_>; fn to_value_iter(&self) -> Box + '_>; @@ -659,7 +661,7 @@ pub fn from_db_valueset_v2(dbvs: DbValueSetV2) -> Result ValueSetRestricted::from_dbvs2(set), DbValueSetV2::Spn(set) => ValueSetSpn::from_dbvs2(set), DbValueSetV2::Cid(set) => ValueSetCid::from_dbvs2(set), - DbValueSetV2::JsonFilter(set) => ValueSetJsonFilter::from_dbvs2(set), + DbValueSetV2::JsonFilter(set) => ValueSetJsonFilter::from_dbvs2(&set), DbValueSetV2::NsUniqueId(set) => ValueSetNsUniqueId::from_dbvs2(set), DbValueSetV2::Url(set) => ValueSetUrl::from_dbvs2(set), DbValueSetV2::DateTime(set) => ValueSetDateTime::from_dbvs2(set), @@ -686,3 +688,44 @@ pub fn from_db_valueset_v2(dbvs: DbValueSetV2) -> Result Result { + match rv1 { + ReplAttrV1::Iutf8 { set } => ValueSetIutf8::from_repl_v1(set), + ReplAttrV1::Utf8 { set } => ValueSetUtf8::from_repl_v1(set), + ReplAttrV1::IndexType { set } => ValueSetIndex::from_repl_v1(set), + ReplAttrV1::SyntaxType { set } => ValueSetSyntax::from_repl_v1(set), + ReplAttrV1::Cid { set } => ValueSetCid::from_repl_v1(set), + ReplAttrV1::Bool { set } => ValueSetBool::from_repl_v1(set), + ReplAttrV1::Uuid { set } => ValueSetUuid::from_repl_v1(set), + ReplAttrV1::Uint32 { set } => ValueSetUint32::from_repl_v1(set), + ReplAttrV1::Iname { set } => ValueSetIname::from_repl_v1(set), + ReplAttrV1::PrivateBinary { set } => ValueSetPrivateBinary::from_repl_v1(set), + ReplAttrV1::SecretValue { set } => ValueSetSecret::from_repl_v1(set), + ReplAttrV1::Reference { set } => ValueSetRefer::from_repl_v1(set), + ReplAttrV1::JwsKeyEs256 { set } => ValueSetJwsKeyEs256::from_repl_v1(set), + ReplAttrV1::JwsKeyRs256 { set } => ValueSetJwsKeyRs256::from_repl_v1(set), + ReplAttrV1::Spn { set } => ValueSetSpn::from_repl_v1(set), + ReplAttrV1::JsonFilter { set } => ValueSetJsonFilter::from_repl_v1(set), + ReplAttrV1::UiHint { set } => ValueSetUiHint::from_repl_v1(set), + ReplAttrV1::Address { set } => ValueSetAddress::from_repl_v1(set), + ReplAttrV1::EmailAddress { primary, set } => { + ValueSetEmailAddress::from_repl_v1(primary, set) + } + ReplAttrV1::PublicBinary { set } => ValueSetPublicBinary::from_repl_v1(set), + ReplAttrV1::Credential { set } => ValueSetCredential::from_repl_v1(set), + ReplAttrV1::IntentToken { set } => ValueSetIntentToken::from_repl_v1(set), + ReplAttrV1::Passkey { set } => ValueSetPasskey::from_repl_v1(set), + ReplAttrV1::DeviceKey { set } => ValueSetDeviceKey::from_repl_v1(set), + ReplAttrV1::DateTime { set } => ValueSetDateTime::from_repl_v1(set), + ReplAttrV1::Url { set } => ValueSetUrl::from_repl_v1(set), + ReplAttrV1::NsUniqueId { set } => ValueSetNsUniqueId::from_repl_v1(set), + ReplAttrV1::RestrictedString { set } => ValueSetRestricted::from_repl_v1(set), + ReplAttrV1::SshKey { set } => ValueSetSshKey::from_repl_v1(set), + ReplAttrV1::OauthScope { set } => ValueSetOauthScope::from_repl_v1(set), + ReplAttrV1::OauthScopeMap { set } => ValueSetOauthScopeMap::from_repl_v1(set), + ReplAttrV1::Oauth2Session { set } => ValueSetOauth2Session::from_repl_v1(set), + ReplAttrV1::Session { set } => ValueSetSession::from_repl_v1(set), + ReplAttrV1::TotpSecret { set } => ValueSetTotpSecret::from_repl_v1(set), + } +} diff --git a/kanidmd/lib/src/valueset/nsuniqueid.rs b/kanidmd/lib/src/valueset/nsuniqueid.rs index b2f06d403..17384a24c 100644 --- a/kanidmd/lib/src/valueset/nsuniqueid.rs +++ b/kanidmd/lib/src/valueset/nsuniqueid.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::value::NSUNIQUEID_RE; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -26,6 +27,11 @@ impl ValueSetNsUniqueId { Ok(Box::new(ValueSetNsUniqueId { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetNsUniqueId { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and String is foreign. #[allow(clippy::should_implement_trait)] @@ -102,6 +108,12 @@ impl ValueSetT for ValueSetNsUniqueId { DbValueSetV2::NsUniqueId(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::NsUniqueId { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::Nsuniqueid)) } diff --git a/kanidmd/lib/src/valueset/oauth.rs b/kanidmd/lib/src/valueset/oauth.rs index a11310b4b..c01f27d1f 100644 --- a/kanidmd/lib/src/valueset/oauth.rs +++ b/kanidmd/lib/src/valueset/oauth.rs @@ -3,6 +3,7 @@ use std::collections::{BTreeMap, BTreeSet}; use crate::be::dbvalue::DbValueOauthScopeMapV1; use crate::prelude::*; +use crate::repl::proto::{ReplAttrV1, ReplOauthScopeMapV1}; use crate::schema::SchemaAttribute; use crate::value::OAUTHSCOPE_RE; use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet}; @@ -28,6 +29,11 @@ impl ValueSetOauthScope { Ok(Box::new(ValueSetOauthScope { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetOauthScope { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and String is foreign. #[allow(clippy::should_implement_trait)] @@ -104,6 +110,12 @@ impl ValueSetT for ValueSetOauthScope { DbValueSetV2::OauthScope(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::OauthScope { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::OauthScope)) } @@ -168,11 +180,15 @@ impl ValueSetOauthScopeMap { pub fn from_dbvs2(data: Vec) -> Result { let map = data .into_iter() - .map(|dbv| { - let u = dbv.refer; - let m = dbv.data.into_iter().collect(); - (u, m) - }) + .map(|DbValueOauthScopeMapV1 { refer, data }| (refer, data.into_iter().collect())) + .collect(); + Ok(Box::new(ValueSetOauthScopeMap { map })) + } + + pub fn from_repl_v1(data: &[ReplOauthScopeMapV1]) -> Result { + let map = data + .iter() + .map(|ReplOauthScopeMapV1 { refer, data }| (*refer, data.clone())) .collect(); Ok(Box::new(ValueSetOauthScopeMap { map })) } @@ -281,6 +297,19 @@ impl ValueSetT for ValueSetOauthScopeMap { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::OauthScopeMap { + set: self + .map + .iter() + .map(|(u, m)| ReplOauthScopeMapV1 { + refer: *u, + data: m.iter().cloned().collect(), + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::Refer)) } diff --git a/kanidmd/lib/src/valueset/restricted.rs b/kanidmd/lib/src/valueset/restricted.rs index 3f680977d..18c39a9f2 100644 --- a/kanidmd/lib/src/valueset/restricted.rs +++ b/kanidmd/lib/src/valueset/restricted.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetRestricted { Ok(Box::new(ValueSetRestricted { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetRestricted { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and String is foreign. #[allow(clippy::should_implement_trait)] @@ -111,6 +117,12 @@ impl ValueSetT for ValueSetRestricted { DbValueSetV2::RestrictedString(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::RestrictedString { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::RestrictedString)) } diff --git a/kanidmd/lib/src/valueset/secret.rs b/kanidmd/lib/src/valueset/secret.rs index c906eeab2..53ec12f4d 100644 --- a/kanidmd/lib/src/valueset/secret.rs +++ b/kanidmd/lib/src/valueset/secret.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetSecret { Ok(Box::new(ValueSetSecret { set })) } + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetSecret { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and String is foreign. #[allow(clippy::should_implement_trait)] @@ -92,6 +98,12 @@ impl ValueSetT for ValueSetSecret { DbValueSetV2::SecretValue(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::SecretValue { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().map(|_| PartialValue::SecretValue)) } diff --git a/kanidmd/lib/src/valueset/session.rs b/kanidmd/lib/src/valueset/session.rs index f359021ea..9dd8c8f31 100644 --- a/kanidmd/lib/src/valueset/session.rs +++ b/kanidmd/lib/src/valueset/session.rs @@ -7,6 +7,9 @@ use crate::be::dbvalue::{ DbValueAccessScopeV1, DbValueIdentityId, DbValueOauth2Session, DbValueSession, }; use crate::prelude::*; +use crate::repl::proto::{ + ReplAccessScopeV1, ReplAttrV1, ReplIdentityIdV1, ReplOauth2SessionV1, ReplSessionV1, +}; use crate::schema::SchemaAttribute; use crate::value::{Oauth2Session, Session}; use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet}; @@ -105,6 +108,83 @@ impl ValueSetSession { Ok(Box::new(ValueSetSession { map })) } + pub fn from_repl_v1(data: &[ReplSessionV1]) -> Result { + let map = data + .iter() + .filter_map( + |ReplSessionV1 { + refer, + label, + expiry, + issued_at, + issued_by, + scope, + }| { + // Convert things. + let issued_at = OffsetDateTime::parse(issued_at, time::Format::Rfc3339) + .map(|odt| odt.to_offset(time::UtcOffset::UTC)) + .map_err(|e| { + admin_error!( + ?e, + "Invalidating session {} due to invalid issued_at timestamp", + refer + ) + }) + .ok()?; + + // This is a bit annoying. In the case we can't parse the optional + // expiry, we need to NOT return the session so that it's immediately + // invalidated. To do this we have to invert some of the options involved + // here. + let expiry = expiry + .as_ref() + .map(|e_inner| { + OffsetDateTime::parse(e_inner, time::Format::Rfc3339) + .map(|odt| odt.to_offset(time::UtcOffset::UTC)) + // We now have an + // Option> + }) + .transpose() + // Result, _> + .map_err(|e| { + admin_error!( + ?e, + "Invalidating session {} due to invalid expiry timestamp", + refer + ) + }) + // Option> + .ok()?; + + let issued_by = match issued_by { + ReplIdentityIdV1::Internal => IdentityId::Internal, + ReplIdentityIdV1::Uuid(u) => IdentityId::User(*u), + ReplIdentityIdV1::Synch(u) => IdentityId::Synch(*u), + }; + + let scope = match scope { + ReplAccessScopeV1::IdentityOnly => AccessScope::IdentityOnly, + ReplAccessScopeV1::ReadOnly => AccessScope::ReadOnly, + ReplAccessScopeV1::ReadWrite => AccessScope::ReadWrite, + ReplAccessScopeV1::Synchronise => AccessScope::Synchronise, + }; + + Some(( + *refer, + Session { + label: label.to_string(), + expiry, + issued_at, + issued_by, + scope, + }, + )) + }, + ) + .collect(); + Ok(Box::new(ValueSetSession { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -216,6 +296,38 @@ impl ValueSetT for ValueSetSession { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Session { + set: self + .map + .iter() + .map(|(u, m)| ReplSessionV1 { + refer: *u, + label: m.label.clone(), + expiry: m.expiry.map(|odt| { + debug_assert!(odt.offset() == time::UtcOffset::UTC); + odt.format(time::Format::Rfc3339) + }), + issued_at: { + debug_assert!(m.issued_at.offset() == time::UtcOffset::UTC); + m.issued_at.format(time::Format::Rfc3339) + }, + issued_by: match m.issued_by { + IdentityId::Internal => ReplIdentityIdV1::Internal, + IdentityId::User(u) => ReplIdentityIdV1::Uuid(u), + IdentityId::Synch(u) => ReplIdentityIdV1::Synch(u), + }, + scope: match m.scope { + AccessScope::IdentityOnly => ReplAccessScopeV1::IdentityOnly, + AccessScope::ReadOnly => ReplAccessScopeV1::ReadOnly, + AccessScope::ReadWrite => ReplAccessScopeV1::ReadWrite, + AccessScope::Synchronise => ReplAccessScopeV1::Synchronise, + }, + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::Refer)) } @@ -345,6 +457,71 @@ impl ValueSetOauth2Session { Ok(Box::new(ValueSetOauth2Session { map, rs_filter })) } + pub fn from_repl_v1(data: &[ReplOauth2SessionV1]) -> Result { + let mut rs_filter = BTreeSet::new(); + let map = data + .iter() + .filter_map( + |ReplOauth2SessionV1 { + refer, + parent, + expiry, + issued_at, + rs_uuid, + }| { + // Convert things. + let issued_at = OffsetDateTime::parse(issued_at, time::Format::Rfc3339) + .map(|odt| odt.to_offset(time::UtcOffset::UTC)) + .map_err(|e| { + admin_error!( + ?e, + "Invalidating session {} due to invalid issued_at timestamp", + refer + ) + }) + .ok()?; + + // This is a bit annoying. In the case we can't parse the optional + // expiry, we need to NOT return the session so that it's immediately + // invalidated. To do this we have to invert some of the options involved + // here. + let expiry = expiry + .as_ref() + .map(|e_inner| { + OffsetDateTime::parse(e_inner, time::Format::Rfc3339) + .map(|odt| odt.to_offset(time::UtcOffset::UTC)) + // We now have an + // Option> + }) + .transpose() + // Result, _> + .map_err(|e| { + admin_error!( + ?e, + "Invalidating session {} due to invalid expiry timestamp", + refer + ) + }) + // Option> + .ok()?; + + // Insert to the rs_filter. + rs_filter.insert(*rs_uuid); + Some(( + *refer, + Oauth2Session { + parent: *parent, + expiry, + issued_at, + rs_uuid: *rs_uuid, + }, + )) + }, + ) + .collect(); + Ok(Box::new(ValueSetOauth2Session { rs_filter, map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -475,6 +652,28 @@ impl ValueSetT for ValueSetOauth2Session { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Oauth2Session { + set: self + .map + .iter() + .map(|(u, m)| ReplOauth2SessionV1 { + refer: *u, + parent: m.parent, + expiry: m.expiry.map(|odt| { + debug_assert!(odt.offset() == time::UtcOffset::UTC); + odt.format(time::Format::Rfc3339) + }), + issued_at: { + debug_assert!(m.issued_at.offset() == time::UtcOffset::UTC); + m.issued_at.format(time::Format::Rfc3339) + }, + rs_uuid: m.rs_uuid, + }) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::Refer)) } diff --git a/kanidmd/lib/src/valueset/spn.rs b/kanidmd/lib/src/valueset/spn.rs index 6b001a71b..8e8a36590 100644 --- a/kanidmd/lib/src/valueset/spn.rs +++ b/kanidmd/lib/src/valueset/spn.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetSpn { Ok(Box::new(ValueSetSpn { set })) } + pub fn from_repl_v1(data: &[(String, String)]) -> Result { + let set = data.iter().map(|(a, b)| (a.clone(), b.clone())).collect(); + Ok(Box::new(ValueSetSpn { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -82,10 +88,7 @@ impl ValueSetT for ValueSetSpn { } fn generate_idx_eq_keys(&self) -> Vec { - self.set - .iter() - .map(|(n, d)| format!("{}@{}", n, d)) - .collect() + self.set.iter().map(|(n, d)| format!("{n}@{d}")).collect() } fn syntax(&self) -> SyntaxType { @@ -97,13 +100,19 @@ impl ValueSetT for ValueSetSpn { } fn to_proto_string_clone_iter(&self) -> Box + '_> { - Box::new(self.set.iter().map(|(n, d)| format!("{}@{}", n, d))) + Box::new(self.set.iter().map(|(n, d)| format!("{n}@{d}"))) } fn to_db_valueset_v2(&self) -> DbValueSetV2 { DbValueSetV2::Spn(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Spn { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new( self.set diff --git a/kanidmd/lib/src/valueset/ssh.rs b/kanidmd/lib/src/valueset/ssh.rs index eab58aad8..f8edfb3a7 100644 --- a/kanidmd/lib/src/valueset/ssh.rs +++ b/kanidmd/lib/src/valueset/ssh.rs @@ -3,6 +3,7 @@ use std::collections::BTreeMap; use crate::be::dbvalue::DbValueTaggedStringV1; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -27,6 +28,14 @@ impl ValueSetSshKey { Ok(Box::new(ValueSetSshKey { map })) } + pub fn from_repl_v1(data: &[(String, String)]) -> Result { + let map = data + .iter() + .map(|(tag, data)| (tag.clone(), data.clone())) + .collect(); + Ok(Box::new(ValueSetSshKey { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -112,6 +121,16 @@ impl ValueSetT for ValueSetSshKey { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::SshKey { + set: self + .map + .iter() + .map(|(tag, key)| (tag.clone(), key.clone())) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::SshKey)) } diff --git a/kanidmd/lib/src/valueset/syntax.rs b/kanidmd/lib/src/valueset/syntax.rs index 8d3ea061d..10ddb06ee 100644 --- a/kanidmd/lib/src/valueset/syntax.rs +++ b/kanidmd/lib/src/valueset/syntax.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,12 @@ impl ValueSetSyntax { let set = set.map_err(|_| OperationError::InvalidValueState)?; Ok(Box::new(ValueSetSyntax { set })) } + + pub fn from_repl_v1(data: &[u16]) -> Result { + let set: Result<_, _> = data.iter().copied().map(SyntaxType::try_from).collect(); + let set = set.map_err(|_| OperationError::InvalidValueState)?; + Ok(Box::new(ValueSetSyntax { set })) + } } impl FromIterator for Option> { @@ -101,6 +108,12 @@ impl ValueSetT for ValueSetSyntax { DbValueSetV2::SyntaxType(self.set.iter().map(|s| *s as u16).collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::SyntaxType { + set: self.set.iter().map(|s| *s as u16).collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::Syntax)) } diff --git a/kanidmd/lib/src/valueset/totp.rs b/kanidmd/lib/src/valueset/totp.rs index 4439c4ffc..4ed3e7434 100644 --- a/kanidmd/lib/src/valueset/totp.rs +++ b/kanidmd/lib/src/valueset/totp.rs @@ -5,6 +5,7 @@ use crate::credential::totp::Totp; use crate::prelude::*; use crate::be::dbvalue::DbTotpV1; +use crate::repl::proto::{ReplAttrV1, ReplTotpV1}; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -36,6 +37,18 @@ impl ValueSetTotpSecret { Ok(Box::new(ValueSetTotpSecret { map })) } + pub fn from_repl_v1(data: &[(String, ReplTotpV1)]) -> Result { + let map = data + .iter() + .map(|(l, data)| { + Totp::try_from(data) + .map_err(|()| OperationError::InvalidValueState) + .map(|t| (l.clone(), t)) + }) + .collect::>()?; + Ok(Box::new(ValueSetTotpSecret { map })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and tuples are always foreign. #[allow(clippy::should_implement_trait)] @@ -118,6 +131,16 @@ impl ValueSetT for ValueSetTotpSecret { ) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::TotpSecret { + set: self + .map + .iter() + .map(|(label, totp)| (label.clone(), totp.to_repl_v1())) + .collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.map.keys().cloned().map(PartialValue::Utf8)) } diff --git a/kanidmd/lib/src/valueset/uihint.rs b/kanidmd/lib/src/valueset/uihint.rs index 6ba29d14c..27d1f8cb8 100644 --- a/kanidmd/lib/src/valueset/uihint.rs +++ b/kanidmd/lib/src/valueset/uihint.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -27,6 +28,12 @@ impl ValueSetUiHint { let set = set.map_err(|_| OperationError::InvalidValueState)?; Ok(Box::new(ValueSetUiHint { set })) } + + pub fn from_repl_v1(data: &[u16]) -> Result { + let set: Result<_, _> = data.iter().copied().map(UiHint::try_from).collect(); + let set = set.map_err(|_| OperationError::InvalidValueState)?; + Ok(Box::new(ValueSetUiHint { set })) + } } impl ValueSetT for ValueSetUiHint { @@ -90,6 +97,12 @@ impl ValueSetT for ValueSetUiHint { DbValueSetV2::UiHint(self.set.iter().map(|u| *u as u16).collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::UiHint { + set: self.set.iter().map(|u| *u as u16).collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::UiHint)) } diff --git a/kanidmd/lib/src/valueset/uint32.rs b/kanidmd/lib/src/valueset/uint32.rs index f82a31b28..059174293 100644 --- a/kanidmd/lib/src/valueset/uint32.rs +++ b/kanidmd/lib/src/valueset/uint32.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetUint32 { Ok(Box::new(ValueSetUint32 { set })) } + pub fn from_repl_v1(data: &[u32]) -> Result { + let set = data.iter().copied().collect(); + Ok(Box::new(ValueSetUint32 { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and u32 is foreign. #[allow(clippy::should_implement_trait)] @@ -104,6 +110,12 @@ impl ValueSetT for ValueSetUint32 { DbValueSetV2::Uint32(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Uint32 { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::new_uint32)) } diff --git a/kanidmd/lib/src/valueset/url.rs b/kanidmd/lib/src/valueset/url.rs index b011425f8..1f7c246a0 100644 --- a/kanidmd/lib/src/valueset/url.rs +++ b/kanidmd/lib/src/valueset/url.rs @@ -1,6 +1,7 @@ use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -25,6 +26,11 @@ impl ValueSetUrl { Ok(Box::new(ValueSetUrl { set })) } + pub fn from_repl_v1(data: &[Url]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetUrl { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and Url is foreign. #[allow(clippy::should_implement_trait)] @@ -98,6 +104,12 @@ impl ValueSetT for ValueSetUrl { DbValueSetV2::Url(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Url { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().cloned().map(PartialValue::Url)) } diff --git a/kanidmd/lib/src/valueset/utf8.rs b/kanidmd/lib/src/valueset/utf8.rs index 6d04ae262..19916a07d 100644 --- a/kanidmd/lib/src/valueset/utf8.rs +++ b/kanidmd/lib/src/valueset/utf8.rs @@ -1,6 +1,7 @@ use std::collections::BTreeSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{DbValueSetV2, ValueSet}; @@ -24,6 +25,11 @@ impl ValueSetUtf8 { let set = data.into_iter().collect(); Ok(Box::new(ValueSetUtf8 { set })) } + + pub fn from_repl_v1(data: &[String]) -> Result { + let set = data.iter().cloned().collect(); + Ok(Box::new(ValueSetUtf8 { set })) + } } impl ValueSetT for ValueSetUtf8 { @@ -93,6 +99,12 @@ impl ValueSetT for ValueSetUtf8 { DbValueSetV2::Utf8(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Utf8 { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().map(|i| PartialValue::new_utf8s(i.as_str()))) } diff --git a/kanidmd/lib/src/valueset/uuid.rs b/kanidmd/lib/src/valueset/uuid.rs index ec3b870df..03a67307e 100644 --- a/kanidmd/lib/src/valueset/uuid.rs +++ b/kanidmd/lib/src/valueset/uuid.rs @@ -3,6 +3,7 @@ use std::collections::BTreeSet; use smolset::SmolSet; use crate::prelude::*; +use crate::repl::proto::ReplAttrV1; use crate::schema::SchemaAttribute; use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet}; @@ -27,6 +28,11 @@ impl ValueSetUuid { Ok(Box::new(ValueSetUuid { set })) } + pub fn from_repl_v1(data: &[Uuid]) -> Result { + let set = data.iter().copied().collect(); + Ok(Box::new(ValueSetUuid { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and uuid is foreign. #[allow(clippy::should_implement_trait)] @@ -106,6 +112,12 @@ impl ValueSetT for ValueSetUuid { DbValueSetV2::Uuid(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Uuid { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::Uuid)) } @@ -172,6 +184,11 @@ impl ValueSetRefer { Ok(Box::new(ValueSetRefer { set })) } + pub fn from_repl_v1(data: &[Uuid]) -> Result { + let set = data.iter().copied().collect(); + Ok(Box::new(ValueSetRefer { set })) + } + // We need to allow this, because rust doesn't allow us to impl FromIterator on foreign // types, and uuid is foreign. #[allow(clippy::should_implement_trait)] @@ -255,6 +272,12 @@ impl ValueSetT for ValueSetRefer { DbValueSetV2::Reference(self.set.iter().cloned().collect()) } + fn to_repl_v1(&self) -> ReplAttrV1 { + ReplAttrV1::Reference { + set: self.set.iter().cloned().collect(), + } + } + fn to_partialvalue_iter(&self) -> Box + '_> { Box::new(self.set.iter().copied().map(PartialValue::Refer)) }