20230130 hackweek replication (#1358)

Add initial support for refreshing the content of a new server in a replication topology. This is embedded in test cases only for now.
This commit is contained in:
Firstyear 2023-02-15 10:25:51 +10:00 committed by GitHub
parent 876aef9a6e
commit fe24056fdc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
66 changed files with 3627 additions and 1189 deletions

View file

@ -63,6 +63,7 @@ pub enum ConsistencyError {
BackendAllIdsSync,
BackendIndexSync,
ChangelogDesynchronised(u64),
ChangeStateDesynchronised(u64),
RuvInconsistent(String),
}
@ -249,6 +250,7 @@ pub enum OperationError {
ReplReplayFailure,
ReplEntryNotChanged,
ReplInvalidRUVState,
ReplDomainLevelUnsatisfiable,
}
impl PartialEq for OperationError {

View file

@ -21,7 +21,7 @@ pub fn scaling_user_create_single(c: &mut Criterion) {
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter_custom(|iters| {
let mut elapsed = Duration::from_secs(0);
println!("iters, size -> {:?}, {:?}", iters, size);
println!("iters, size -> {iters:?}, {size:?}");
for _i in 0..iters {
let mut rt = tokio::runtime::Builder::new_current_thread();
@ -37,7 +37,7 @@ pub fn scaling_user_create_single(c: &mut Criterion) {
let start = Instant::now();
for counter in 0..size {
let mut idms_prox_write = idms.proxy_write(ct).await;
let name = format!("testperson_{}", counter);
let name = format!("testperson_{counter}");
let e1 = entry_init!(
("class", Value::new_class("object")),
("class", Value::new_class("person")),
@ -74,12 +74,12 @@ pub fn scaling_user_create_batched(c: &mut Criterion) {
group.bench_with_input(BenchmarkId::from_parameter(size), size, |b, &size| {
b.iter_custom(|iters| {
let mut elapsed = Duration::from_secs(0);
println!("iters, size -> {:?}, {:?}", iters, size);
println!("iters, size -> {iters:?}, {size:?}");
let data: Vec<_> = (0..size)
.into_iter()
.map(|i| {
let name = format!("testperson_{}", i);
let name = format!("testperson_{i}");
entry_init!(
("class", Value::new_class("object")),
("class", Value::new_class("person")),

View file

@ -437,9 +437,9 @@ impl std::fmt::Debug for DbEntry {
DbEntryVers::V1(dbe_v1) => {
write!(f, "v1 - {{ ")?;
for (k, vs) in dbe_v1.attrs.iter() {
write!(f, "{} - [", k)?;
write!(f, "{k} - [")?;
for v in vs {
write!(f, "{:?}, ", v)?;
write!(f, "{v:?}, ")?;
}
write!(f, "], ")?;
}
@ -448,8 +448,8 @@ impl std::fmt::Debug for DbEntry {
DbEntryVers::V2(dbe_v2) => {
write!(f, "v2 - {{ ")?;
for (k, vs) in dbe_v2.attrs.iter() {
write!(f, "{} - [", k)?;
write!(f, "{:?}, ", vs)?;
write!(f, "{k} - [")?;
write!(f, "{vs:?}, ")?;
write!(f, "], ")?;
}
write!(f, "}}")
@ -466,24 +466,24 @@ impl std::fmt::Display for DbEntry {
match dbe_v1.attrs.get("uuid") {
Some(uuids) => {
for uuid in uuids {
write!(f, "{:?}, ", uuid)?;
write!(f, "{uuid:?}, ")?;
}
}
None => write!(f, "Uuid(INVALID), ")?,
};
if let Some(names) = dbe_v1.attrs.get("name") {
for name in names {
write!(f, "{:?}, ", name)?;
write!(f, "{name:?}, ")?;
}
}
if let Some(names) = dbe_v1.attrs.get("attributename") {
for name in names {
write!(f, "{:?}, ", name)?;
write!(f, "{name:?}, ")?;
}
}
if let Some(names) = dbe_v1.attrs.get("classname") {
for name in names {
write!(f, "{:?}, ", name)?;
write!(f, "{name:?}, ")?;
}
}
write!(f, "}}")
@ -492,18 +492,18 @@ impl std::fmt::Display for DbEntry {
write!(f, "v2 - {{ ")?;
match dbe_v2.attrs.get("uuid") {
Some(uuids) => {
write!(f, "{:?}, ", uuids)?;
write!(f, "{uuids:?}, ")?;
}
None => write!(f, "Uuid(INVALID), ")?,
};
if let Some(names) = dbe_v2.attrs.get("name") {
write!(f, "{:?}, ", names)?;
write!(f, "{names:?}, ")?;
}
if let Some(names) = dbe_v2.attrs.get("attributename") {
write!(f, "{:?}, ", names)?;
write!(f, "{names:?}, ")?;
}
if let Some(names) = dbe_v2.attrs.get("classname") {
write!(f, "{:?}, ", names)?;
write!(f, "{names:?}, ")?;
}
write!(f, "}}")
}

View file

@ -12,8 +12,6 @@ use webauthn_rs_core::proto::{COSEKey, UserVerificationPolicy};
#[derive(Serialize, Deserialize, Debug)]
pub struct DbCidV1 {
#[serde(rename = "d")]
pub domain_id: Uuid,
#[serde(rename = "s")]
pub server_id: Uuid,
#[serde(rename = "t")]
@ -276,8 +274,8 @@ impl fmt::Display for DbCred {
DbCred::TmpWn { webauthn, uuid } => {
write!(f, "TmpWn ( w {}, u {} )", webauthn.len(), uuid)
}
DbCred::V2Password { password: _, uuid } => write!(f, "V2Pw ( u {} )", uuid),
DbCred::V2GenPassword { password: _, uuid } => write!(f, "V2GPw ( u {} )", uuid),
DbCred::V2Password { password: _, uuid } => write!(f, "V2Pw ( u {uuid} )"),
DbCred::V2GenPassword { password: _, uuid } => write!(f, "V2GPw ( u {uuid} )"),
DbCred::V2PasswordMfa {
password: _,
totp,
@ -688,7 +686,7 @@ mod tests {
let x = vec![dbcred];
let json = serde_json::to_string(&x).unwrap();
eprintln!("{}", json);
eprintln!("{json}");
let _e_dbcred: Vec<DbCred> = serde_json::from_str(&json).unwrap();

View file

@ -379,7 +379,7 @@ pub trait IdlSqliteTransaction {
.or_else(|e| serde_cbor::from_slice(d.as_slice()).map_err(|_| e))
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: Serde CBOR Error");
eprintln!("CRITICAL: Serde CBOR Error -> {:?}", e);
eprintln!("CRITICAL: Serde CBOR Error -> {e:?}");
OperationError::SerdeCborError
})?,
),
@ -410,7 +410,7 @@ pub trait IdlSqliteTransaction {
.or_else(|e| serde_cbor::from_slice(d.as_slice()).map_err(|_| e))
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: Serde CBOR Error");
eprintln!("CRITICAL: Serde CBOR Error -> {:?}", e);
eprintln!("CRITICAL: Serde CBOR Error -> {e:?}");
OperationError::SerdeCborError
})?,
),
@ -442,7 +442,7 @@ pub trait IdlSqliteTransaction {
.or_else(|_| serde_cbor::from_slice(d.as_slice()))
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error");
eprintln!("CRITICAL: Serde JSON Error -> {:?}", e);
eprintln!("CRITICAL: Serde JSON Error -> {e:?}");
OperationError::SerdeJsonError
})?,
),
@ -500,7 +500,7 @@ pub trait IdlSqliteTransaction {
.ok_or(OperationError::InvalidEntryId)
.and_then(|data| {
data.into_dbentry()
.map(|(id, db_e)| (id, format!("{:?}", db_e)))
.map(|(id, db_e)| (id, format!("{db_e:?}")))
})
}
@ -511,7 +511,7 @@ pub trait IdlSqliteTransaction {
// TODO: Once we have slopes we can add .exists_table, and assert
// it's an idx table.
let query = format!("SELECT key, idl FROM {}", index_name);
let query = format!("SELECT key, idl FROM {index_name}");
let mut stmt = self
.get_conn()
.prepare(query.as_str())
@ -1117,7 +1117,7 @@ impl IdlSqliteWriteTransaction {
.map(|_| ())
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error");
eprintln!("CRITICAL: rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {e:?}");
OperationError::SqliteError
})
})
@ -1165,7 +1165,7 @@ impl IdlSqliteWriteTransaction {
pub fn write_db_s_uuid(&self, nsid: Uuid) -> Result<(), OperationError> {
let data = serde_json::to_vec(&nsid).map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error");
eprintln!("CRITICAL: Serde JSON Error -> {:?}", e);
eprintln!("CRITICAL: Serde JSON Error -> {e:?}");
OperationError::SerdeJsonError
})?;
@ -1183,7 +1183,7 @@ impl IdlSqliteWriteTransaction {
.map(|_| ())
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: ruslite error");
eprintln!("CRITICAL: rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {e:?}");
OperationError::SqliteError
})
}
@ -1191,7 +1191,7 @@ impl IdlSqliteWriteTransaction {
pub fn write_db_d_uuid(&self, nsid: Uuid) -> Result<(), OperationError> {
let data = serde_json::to_vec(&nsid).map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error");
eprintln!("CRITICAL: Serde JSON Error -> {:?}", e);
eprintln!("CRITICAL: Serde JSON Error -> {e:?}");
OperationError::SerdeJsonError
})?;
@ -1209,7 +1209,7 @@ impl IdlSqliteWriteTransaction {
.map(|_| ())
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error");
eprintln!("CRITICAL: rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {e:?}");
OperationError::SqliteError
})
}
@ -1217,7 +1217,7 @@ impl IdlSqliteWriteTransaction {
pub fn set_db_ts_max(&self, ts: Duration) -> Result<(), OperationError> {
let data = serde_json::to_vec(&ts).map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: Serde JSON Error");
eprintln!("CRITICAL: Serde JSON Error -> {:?}", e);
eprintln!("CRITICAL: Serde JSON Error -> {e:?}");
OperationError::SerdeJsonError
})?;
@ -1235,7 +1235,7 @@ impl IdlSqliteWriteTransaction {
.map(|_| ())
.map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error");
eprintln!("CRITICAL: rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {e:?}");
OperationError::SqliteError
})
}
@ -1280,7 +1280,7 @@ impl IdlSqliteWriteTransaction {
pub(crate) fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> {
self.set_db_version_key(DBV_INDEXV, v).map_err(|e| {
admin_error!(immediate = true, ?e, "CRITICAL: rusqlite error");
eprintln!("CRITICAL: rusqlite error {:?}", e);
eprintln!("CRITICAL: rusqlite error {e:?}");
OperationError::SqliteError
})
}
@ -1515,11 +1515,10 @@ impl IdlSqlite {
.with_init(move |c| {
c.execute_batch(
format!(
"PRAGMA page_size={};
"PRAGMA page_size={fs_page_size};
PRAGMA journal_mode=WAL;
PRAGMA wal_autocheckpoint={};
PRAGMA wal_checkpoint(RESTART);",
fs_page_size, checkpoint_pages
PRAGMA wal_autocheckpoint={checkpoint_pages};
PRAGMA wal_checkpoint(RESTART);"
)
.as_str(),
)

View file

@ -965,7 +965,7 @@ impl<'a> BackendWriteTransaction<'a> {
// Check that every entry has a change associated
// that matches the cid?
entries.iter().try_for_each(|e| {
if e.get_changelog().contains_tail_cid(cid) {
if e.get_changestate().contains_tail_cid(cid) {
Ok(())
} else {
admin_error!(
@ -1004,6 +1004,42 @@ impl<'a> BackendWriteTransaction<'a> {
Ok(c_entries)
}
#[instrument(level = "debug", name = "be::create", skip_all)]
/// This is similar to create, but used in the replication path as it skips the
/// modification of the RUV and the checking of CIDs since these actions are not
/// required during a replication refresh (else we'd create an infinite replication
/// loop.)
pub fn refresh(
&mut self,
entries: Vec<Entry<EntrySealed, EntryNew>>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
if entries.is_empty() {
admin_error!("No entries provided to BE to create, invalid server call!");
return Err(OperationError::EmptyRequest);
}
// Assign id's to all the new entries.
let mut id_max = self.idlayer.get_id2entry_max_id()?;
let c_entries: Vec<_> = entries
.into_iter()
.map(|e| {
id_max += 1;
e.into_sealed_committed_id(id_max)
})
.collect();
self.idlayer.write_identries(c_entries.iter())?;
self.idlayer.set_id2entry_max_id(id_max);
// Now update the indexes as required.
for e in c_entries.iter() {
self.entry_index(None, Some(e))?
}
Ok(c_entries)
}
#[instrument(level = "debug", name = "be::modify", skip_all)]
pub fn modify(
&mut self,
@ -1019,7 +1055,7 @@ impl<'a> BackendWriteTransaction<'a> {
assert!(post_entries.len() == pre_entries.len());
post_entries.iter().try_for_each(|e| {
if e.get_changelog().contains_tail_cid(cid) {
if e.get_changestate().contains_tail_cid(cid) {
Ok(())
} else {
admin_error!(
@ -1070,28 +1106,28 @@ impl<'a> BackendWriteTransaction<'a> {
// Now that we have a list of entries we need to partition them into
// two sets. The entries that are tombstoned and ready to reap_tombstones, and
// the entries that need to have their change logs trimmed.
//
// Remember, these tombstones can be reaped because they were tombstoned at time
// point 'cid', and since we are now "past" that minimum cid, then other servers
// will also be trimming these out.
//
// Note unlike a changelog impl, we don't need to trim changestates here. We
// only need the RUV trimmed so that we know if other servers are laggin behind!
// First we trim changelogs. Go through each entry, and trim the CL, and write it back.
let mut entries: Vec<_> = entries.iter().map(|er| er.as_ref().clone()).collect();
entries
.iter_mut()
.try_for_each(|e| e.get_changelog_mut().trim_up_to(cid))?;
// Write down the cl trims
self.get_idlayer().write_identries(entries.iter())?;
// What entries are tombstones and ready to be deleted?
let (tombstones, leftover): (Vec<_>, Vec<_>) = entries
.into_iter()
.partition(|e| e.get_changelog().can_delete());
.partition(|e| e.get_changestate().can_delete(cid));
let ruv_idls = self.get_ruv().ruv_idls();
// Assert that anything leftover still either is *alive* OR is a tombstone
// and has entries in the RUV!
let ruv_idls = self.get_ruv().ruv_idls();
if !leftover
.iter()
.all(|e| e.get_changelog().is_live() || ruv_idls.contains(e.get_id()))
.all(|e| e.get_changestate().is_live() || ruv_idls.contains(e.get_id()))
{
admin_error!("Left over entries may be orphaned due to missing RUV entries");
return Err(OperationError::ReplInvalidRUVState);
@ -1114,7 +1150,8 @@ impl<'a> BackendWriteTransaction<'a> {
let sz = id_list.len();
self.get_idlayer().delete_identry(id_list.into_iter())?;
// Finally, purge the indexes from the entries we removed.
// Finally, purge the indexes from the entries we removed. These still have
// indexes due to class=tombstone.
tombstones
.iter()
.try_for_each(|e| self.entry_index(Some(e), None))?;
@ -1442,11 +1479,18 @@ impl<'a> BackendWriteTransaction<'a> {
Ok(())
}
#[cfg(test)]
pub fn purge_idxs(&mut self) -> Result<(), OperationError> {
fn purge_idxs(&mut self) -> Result<(), OperationError> {
unsafe { self.get_idlayer().purge_idxs() }
}
pub(crate) fn danger_delete_all_db_content(&mut self) -> Result<(), OperationError> {
unsafe {
self.get_idlayer()
.purge_id2entry()
.and_then(|_| self.purge_idxs())
}
}
#[cfg(test)]
pub fn load_test_idl(
&mut self,
@ -1604,6 +1648,12 @@ impl<'a> BackendWriteTransaction<'a> {
Ok(nsid)
}
/// Manually set a new domain UUID and store it into the DB. This is used
/// as part of a replication refresh.
pub fn set_db_d_uuid(&mut self, nsid: Uuid) -> Result<(), OperationError> {
self.get_idlayer().write_db_d_uuid(nsid)
}
/// This pulls the domain UUID from the database
pub fn get_db_d_uuid(&mut self) -> Uuid {
#[allow(clippy::expect_used)]
@ -2112,7 +2162,7 @@ mod tests {
"{}/.backup_test.json",
option_env!("OUT_DIR").unwrap_or("/tmp")
);
eprintln!(" ⚠️ {}", db_backup_file_name);
eprintln!(" ⚠️ {db_backup_file_name}");
run_test!(|be: &mut BackendWriteTransaction| {
// Important! Need db metadata setup!
be.reset_db_s_uuid().unwrap();
@ -2168,7 +2218,7 @@ mod tests {
"{}/.backup2_test.json",
option_env!("OUT_DIR").unwrap_or("/tmp")
);
eprintln!(" ⚠️ {}", db_backup_file_name);
eprintln!(" ⚠️ {db_backup_file_name}");
run_test!(|be: &mut BackendWriteTransaction| {
// Important! Need db metadata setup!
be.reset_db_s_uuid().unwrap();

View file

@ -39,13 +39,15 @@ pub const SYSTEM_INDEX_VERSION: i64 = 28;
* who don't muck with the levels, but it means that we can do mixed version
* upgrades.
*/
pub const DOMAIN_LEVEL_1: u32 = 1;
pub type DomainVersion = u32;
pub const DOMAIN_LEVEL_1: DomainVersion = 1;
// The minimum supported domain functional level
pub const DOMAIN_MIN_LEVEL: u32 = DOMAIN_LEVEL_1;
pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_LEVEL_1;
// The target supported domain functional level
pub const DOMAIN_TGT_LEVEL: u32 = DOMAIN_LEVEL_1;
pub const DOMAIN_TGT_LEVEL: DomainVersion = DOMAIN_LEVEL_1;
// The maximum supported domain functional level
pub const DOMAIN_MAX_LEVEL: u32 = DOMAIN_LEVEL_1;
pub const DOMAIN_MAX_LEVEL: DomainVersion = DOMAIN_LEVEL_1;
// On test builds, define to 60 seconds
#[cfg(test)]

View file

@ -223,6 +223,7 @@ pub const UUID_SCHEMA_ATTR_SYNC_ALLOWED: Uuid = uuid!("00000000-0000-0000-0000-f
pub const UUID_SCHEMA_ATTR_EMAILPRIMARY: Uuid = uuid!("00000000-0000-0000-0000-ffff00000126");
pub const UUID_SCHEMA_ATTR_EMAILALTERNATIVE: Uuid = uuid!("00000000-0000-0000-0000-ffff00000127");
pub const UUID_SCHEMA_ATTR_TOTP_IMPORT: Uuid = uuid!("00000000-0000-0000-0000-ffff00000128");
pub const UUID_SCHEMA_ATTR_REPLICATED: Uuid = uuid!("00000000-0000-0000-0000-ffff00000129");
// System and domain infos
// I'd like to strongly criticise william of the past for making poor choices about these allocations.

View file

@ -1,4 +1,4 @@
use super::uuids::UUID_DOMAIN_INFO;
use super::uuids::{UUID_DOMAIN_INFO, UUID_SYSTEM_CONFIG, UUID_SYSTEM_INFO};
use crate::value::{PartialValue, Value};
use url::Url;
@ -37,6 +37,8 @@ lazy_static! {
pub static ref PVCLASS_SYSTEM_CONFIG: PartialValue = PartialValue::new_class("system_config");
pub static ref PVCLASS_TOMBSTONE: PartialValue = PartialValue::new_class("tombstone");
pub static ref PVUUID_DOMAIN_INFO: PartialValue = PartialValue::Uuid(UUID_DOMAIN_INFO);
pub static ref PVUUID_SYSTEM_CONFIG: PartialValue = PartialValue::Uuid(UUID_SYSTEM_CONFIG);
pub static ref PVUUID_SYSTEM_INFO: PartialValue = PartialValue::Uuid(UUID_SYSTEM_INFO);
pub static ref CLASS_ACCESS_CONTROL_PROFILE: Value = Value::new_class("access_control_profile");
pub static ref CLASS_ACCESS_CONTROL_SEARCH: Value = Value::new_class("access_control_search");
pub static ref CLASS_ACCOUNT: Value = Value::new_class("account");

View file

@ -13,6 +13,9 @@ use webauthn_rs::prelude::{AuthenticationResult, Passkey, SecurityKey};
use webauthn_rs_core::proto::{Credential as WebauthnCredential, CredentialV3};
use crate::be::dbvalue::{DbBackupCodeV1, DbCred, DbPasswordV1};
use crate::repl::proto::{
ReplBackupCodeV1, ReplCredV1, ReplPasskeyV4V1, ReplPasswordV1, ReplSecurityKeyV4V1,
};
pub mod policy;
pub mod softlock;
@ -101,6 +104,30 @@ impl TryFrom<DbPasswordV1> for Password {
}
}
impl TryFrom<&ReplPasswordV1> for Password {
type Error = ();
fn try_from(value: &ReplPasswordV1) -> Result<Self, Self::Error> {
match value {
ReplPasswordV1::PBKDF2 { cost, salt, hash } => Ok(Password {
material: Kdf::PBKDF2(*cost, salt.0.clone(), hash.0.clone()),
}),
ReplPasswordV1::PBKDF2_SHA1 { cost, salt, hash } => Ok(Password {
material: Kdf::PBKDF2_SHA1(*cost, salt.0.clone(), hash.0.clone()),
}),
ReplPasswordV1::PBKDF2_SHA512 { cost, salt, hash } => Ok(Password {
material: Kdf::PBKDF2_SHA512(*cost, salt.0.clone(), hash.0.clone()),
}),
ReplPasswordV1::SSHA512 { salt, hash } => Ok(Password {
material: Kdf::SSHA512(salt.0.clone(), hash.0.clone()),
}),
ReplPasswordV1::NT_MD4 { hash } => Ok(Password {
material: Kdf::NT_MD4(hash.0.clone()),
}),
}
}
}
// OpenLDAP based their PBKDF2 implementation on passlib from python, that uses a
// non-standard base64 altchar set and padding that is not supported by
// anything else in the world. To manage this, we only ever encode to base64 with
@ -420,6 +447,33 @@ impl Password {
}
}
pub fn to_repl_v1(&self) -> ReplPasswordV1 {
match &self.material {
Kdf::PBKDF2(cost, salt, hash) => ReplPasswordV1::PBKDF2 {
cost: *cost,
salt: salt.clone().into(),
hash: hash.clone().into(),
},
Kdf::PBKDF2_SHA1(cost, salt, hash) => ReplPasswordV1::PBKDF2_SHA1 {
cost: *cost,
salt: salt.clone().into(),
hash: hash.clone().into(),
},
Kdf::PBKDF2_SHA512(cost, salt, hash) => ReplPasswordV1::PBKDF2_SHA512 {
cost: *cost,
salt: salt.clone().into(),
hash: hash.clone().into(),
},
Kdf::SSHA512(salt, hash) => ReplPasswordV1::SSHA512 {
salt: salt.clone().into(),
hash: hash.clone().into(),
},
Kdf::NT_MD4(hash) => ReplPasswordV1::NT_MD4 {
hash: hash.clone().into(),
},
}
}
pub fn requires_upgrade(&self) -> bool {
match &self.material {
Kdf::PBKDF2_SHA512(cost, salt, hash) | Kdf::PBKDF2(cost, salt, hash) => {
@ -447,6 +501,16 @@ impl TryFrom<DbBackupCodeV1> for BackupCodes {
}
}
impl TryFrom<&ReplBackupCodeV1> for BackupCodes {
type Error = ();
fn try_from(value: &ReplBackupCodeV1) -> Result<Self, Self::Error> {
Ok(BackupCodes {
code_set: value.codes.iter().cloned().collect(),
})
}
}
impl BackupCodes {
pub fn new(code_set: HashSet<String>) -> Self {
BackupCodes { code_set }
@ -465,6 +529,12 @@ impl BackupCodes {
code_set: self.code_set.clone(),
}
}
pub fn to_repl_v1(&self) -> ReplBackupCodeV1 {
ReplBackupCodeV1 {
codes: self.code_set.iter().cloned().collect(),
}
}
}
#[derive(Clone, Debug, PartialEq)]
@ -753,6 +823,85 @@ impl TryFrom<DbCred> for Credential {
}
impl Credential {
pub fn try_from_repl_v1(rc: &ReplCredV1) -> Result<(String, Self), ()> {
match rc {
ReplCredV1::TmpWn { tag, set } => {
let m_uuid: Option<Uuid> = set.get(0).map(|v| v.uuid);
let v_webauthn = set
.iter()
.map(|passkey| (passkey.tag.clone(), passkey.key.clone()))
.collect();
let type_ = CredentialType::Webauthn(v_webauthn);
match (m_uuid, type_.is_valid()) {
(Some(uuid), true) => Ok((tag.clone(), Credential { type_, uuid })),
_ => Err(()),
}
}
ReplCredV1::Password {
tag,
password,
uuid,
} => {
let v_password = Password::try_from(password)?;
let type_ = CredentialType::Password(v_password);
if type_.is_valid() {
Ok((tag.clone(), Credential { type_, uuid: *uuid }))
} else {
Err(())
}
}
ReplCredV1::GenPassword {
tag,
password,
uuid,
} => {
let v_password = Password::try_from(password)?;
let type_ = CredentialType::GeneratedPassword(v_password);
if type_.is_valid() {
Ok((tag.clone(), Credential { type_, uuid: *uuid }))
} else {
Err(())
}
}
ReplCredV1::PasswordMfa {
tag,
password,
totp,
backup_code,
webauthn,
uuid,
} => {
let v_password = Password::try_from(password)?;
let v_totp = totp
.iter()
.map(|(l, dbt)| Totp::try_from(dbt).map(|t| (l.clone(), t)))
.collect::<Result<Map<_, _>, _>>()?;
let v_backup_code = match backup_code {
Some(rbc) => Some(BackupCodes::try_from(rbc)?),
None => None,
};
let v_webauthn = webauthn
.iter()
.map(|sk| (sk.tag.clone(), sk.key.clone()))
.collect();
let type_ =
CredentialType::PasswordMfa(v_password, v_totp, v_webauthn, v_backup_code);
if type_.is_valid() {
Ok((tag.clone(), Credential { type_, uuid: *uuid }))
} else {
Err(())
}
}
}
}
/// Create a new credential that contains a CredentialType::Password
pub fn new_password_only(
policy: &CryptoPolicy,
@ -807,8 +956,7 @@ impl Credential {
let mut nmap = map.clone();
if nmap.insert(label.clone(), cred).is_some() {
return Err(OperationError::InvalidAttribute(format!(
"Webauthn label '{:?}' already exists",
label
"Webauthn label '{label:?}' already exists"
)));
}
CredentialType::PasswordMfa(pw.clone(), totp.clone(), nmap, backup_code.clone())
@ -838,8 +986,7 @@ impl Credential {
let mut nmap = map.clone();
if nmap.remove(label).is_none() {
return Err(OperationError::InvalidAttribute(format!(
"Removing Webauthn token with label '{:?}': does not exist",
label
"Removing Webauthn token with label '{label:?}': does not exist"
)));
}
if nmap.is_empty() {
@ -973,6 +1120,51 @@ impl Credential {
}
}
/// Extract this credential into it's Serialisable Replication form
pub fn to_repl_v1(&self, tag: String) -> ReplCredV1 {
let uuid = self.uuid;
match &self.type_ {
CredentialType::Password(pw) => ReplCredV1::Password {
tag,
password: pw.to_repl_v1(),
uuid,
},
CredentialType::GeneratedPassword(pw) => ReplCredV1::GenPassword {
tag,
password: pw.to_repl_v1(),
uuid,
},
CredentialType::PasswordMfa(pw, totp, map, backup_code) => ReplCredV1::PasswordMfa {
tag,
password: pw.to_repl_v1(),
totp: totp
.iter()
.map(|(l, t)| (l.clone(), t.to_repl_v1()))
.collect(),
backup_code: backup_code.as_ref().map(|b| b.to_repl_v1()),
webauthn: map
.iter()
.map(|(k, v)| ReplSecurityKeyV4V1 {
tag: k.clone(),
key: v.clone(),
})
.collect(),
uuid,
},
CredentialType::Webauthn(map) => ReplCredV1::TmpWn {
tag,
set: map
.iter()
.map(|(k, v)| ReplPasskeyV4V1 {
uuid,
tag: k.clone(),
key: v.clone(),
})
.collect(),
},
}
}
pub(crate) fn update_password(&self, pw: Password) -> Self {
let type_ = match &self.type_ {
CredentialType::Password(_) | CredentialType::GeneratedPassword(_) => {

View file

@ -8,6 +8,7 @@ use openssl::sign::Signer;
use rand::prelude::*;
use crate::be::dbvalue::{DbTotpAlgoV1, DbTotpV1};
use crate::repl::proto::{ReplTotpAlgoV1, ReplTotpV1};
// This is 64 bits of entropy, as the examples in https://tools.ietf.org/html/rfc6238 show.
const SECRET_SIZE_BYTES: usize = 8;
@ -115,6 +116,27 @@ impl TryFrom<DbTotpV1> for Totp {
}
}
impl TryFrom<&ReplTotpV1> for Totp {
type Error = ();
fn try_from(value: &ReplTotpV1) -> Result<Self, Self::Error> {
let algo = match value.algo {
ReplTotpAlgoV1::S1 => TotpAlgo::Sha1,
ReplTotpAlgoV1::S256 => TotpAlgo::Sha256,
ReplTotpAlgoV1::S512 => TotpAlgo::Sha512,
};
let digits = TotpDigits::try_from(value.digits)?;
Ok(Totp {
secret: value.key.0.clone(),
step: value.step,
algo,
digits,
})
}
}
impl TryFrom<ProtoTotp> for Totp {
type Error = ();
@ -170,6 +192,19 @@ impl Totp {
}
}
pub(crate) fn to_repl_v1(&self) -> ReplTotpV1 {
ReplTotpV1 {
key: self.secret.clone().into(),
step: self.step,
algo: match self.algo {
TotpAlgo::Sha1 => ReplTotpAlgoV1::S1,
TotpAlgo::Sha256 => ReplTotpAlgoV1::S256,
TotpAlgo::Sha512 => ReplTotpAlgoV1::S512,
},
digits: self.digits.into(),
}
}
fn digest(&self, counter: u64) -> Result<u32, TotpError> {
let hmac = self.algo.digest(&self.secret, counter)?;
// Now take the hmac and encode it as hotp expects.

File diff suppressed because it is too large Load diff

View file

@ -71,7 +71,7 @@ impl fmt::Debug for MfaRegState {
MfaRegState::TotpInvalidSha1(_, _, _) => "MfaRegState::TotpInvalidSha1",
MfaRegState::Passkey(_, _) => "MfaRegState::Passkey",
};
write!(f, "{}", t)
write!(f, "{t}")
}
}
@ -168,7 +168,7 @@ impl fmt::Debug for MfaRegStateStatus {
MfaRegStateStatus::BackupCodes(_) => "MfaRegStateStatus::BackupCodes",
MfaRegStateStatus::Passkey(_) => "MfaRegStateStatus::Passkey",
};
write!(f, "{}", t)
write!(f, "{t}")
}
}

View file

@ -75,10 +75,10 @@ impl LdapServer {
let basedn = ldap_domain_to_dc(domain_name.as_str());
let dnre = Regex::new(format!("^((?P<attr>[^=]+)=(?P<val>[^=]+),)?{}$", basedn).as_str())
let dnre = Regex::new(format!("^((?P<attr>[^=]+)=(?P<val>[^=]+),)?{basedn}$").as_str())
.map_err(|_| OperationError::InvalidEntryState)?;
let binddnre = Regex::new(format!("^(([^=,]+)=)?(?P<val>[^=,]+)(,{})?$", basedn).as_str())
let binddnre = Regex::new(format!("^(([^=,]+)=)?(?P<val>[^=,]+)(,{basedn})?$").as_str())
.map_err(|_| OperationError::InvalidEntryState)?;
let rootdse = LdapSearchResultEntry {
@ -513,7 +513,7 @@ impl LdapServer {
wr.gen_success(format!("u: {}", u.spn).as_str()),
)),
None => Ok(LdapResponseState::Respond(
wr.gen_operror(format!("Unbound Connection {}", eventid).as_str()),
wr.gen_operror(format!("Unbound Connection {eventid}").as_str()),
)),
},
} // end match server op
@ -542,9 +542,9 @@ fn operationerr_to_ldapresultcode(e: OperationError) -> (LdapResultCode, String)
(LdapResultCode::InvalidAttributeSyntax, s)
}
OperationError::SchemaViolation(se) => {
(LdapResultCode::UnwillingToPerform, format!("{:?}", se))
(LdapResultCode::UnwillingToPerform, format!("{se:?}"))
}
e => (LdapResultCode::Other, format!("{:?}", e)),
e => (LdapResultCode::Other, format!("{e:?}")),
}
}
@ -685,7 +685,7 @@ mod tests {
assert!(admin_t.effective_session == LdapSession::UnixBind(UUID_ADMIN));
let admin_t = task::block_on(ldaps.do_bind(
idms,
format!("uuid={},dc=example,dc=com", STR_UUID_ADMIN).as_str(),
format!("uuid={STR_UUID_ADMIN},dc=example,dc=com").as_str(),
TEST_PASSWORD,
))
.unwrap()
@ -703,7 +703,7 @@ mod tests {
assert!(admin_t.effective_session == LdapSession::UnixBind(UUID_ADMIN));
let admin_t = task::block_on(ldaps.do_bind(
idms,
format!("uuid={}", STR_UUID_ADMIN).as_str(),
format!("uuid={STR_UUID_ADMIN}").as_str(),
TEST_PASSWORD,
))
.unwrap()
@ -725,7 +725,7 @@ mod tests {
assert!(admin_t.effective_session == LdapSession::UnixBind(UUID_ADMIN));
let admin_t = task::block_on(ldaps.do_bind(
idms,
format!("{},dc=example,dc=com", STR_UUID_ADMIN).as_str(),
format!("{STR_UUID_ADMIN},dc=example,dc=com").as_str(),
TEST_PASSWORD,
))
.unwrap()

View file

@ -32,10 +32,10 @@ pub enum AuthState {
impl fmt::Debug for AuthState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AuthState::Choose(mechs) => write!(f, "AuthState::Choose({:?})", mechs),
AuthState::Continue(allow) => write!(f, "AuthState::Continue({:?})", allow),
AuthState::Denied(reason) => write!(f, "AuthState::Denied({:?})", reason),
AuthState::Success(_token, issue) => write!(f, "AuthState::Success({:?})", issue),
AuthState::Choose(mechs) => write!(f, "AuthState::Choose({mechs:?})"),
AuthState::Continue(allow) => write!(f, "AuthState::Continue({allow:?})"),
AuthState::Denied(reason) => write!(f, "AuthState::Denied({reason:?})"),
AuthState::Success(_token, issue) => write!(f, "AuthState::Success({issue:?})"),
}
}
}

View file

@ -143,10 +143,10 @@ impl fmt::Display for Oauth2TokenType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Oauth2TokenType::Access { session_id, .. } => {
write!(f, "access_token ({}) ", session_id)
write!(f, "access_token ({session_id}) ")
}
Oauth2TokenType::Refresh { session_id, .. } => {
write!(f, "refresh_token ({}) ", session_id)
write!(f, "refresh_token ({session_id}) ")
}
}
}
@ -389,13 +389,13 @@ impl<'a> Oauth2ResourceServersWriteTransaction<'a> {
token_endpoint.set_path("/oauth2/token");
let mut userinfo_endpoint = self.inner.origin.clone();
userinfo_endpoint.set_path(&format!("/oauth2/openid/{}/userinfo", name));
userinfo_endpoint.set_path(&format!("/oauth2/openid/{name}/userinfo"));
let mut jwks_uri = self.inner.origin.clone();
jwks_uri.set_path(&format!("/oauth2/openid/{}/public_key.jwk", name));
jwks_uri.set_path(&format!("/oauth2/openid/{name}/public_key.jwk"));
let mut iss = self.inner.origin.clone();
iss.set_path(&format!("/oauth2/openid/{}", name));
iss.set_path(&format!("/oauth2/openid/{name}"));
let scopes_supported: BTreeSet<String> =
scope_maps
@ -2193,7 +2193,7 @@ mod tests {
);
// * doesn't have :
let client_authz = Some(base64::encode(format!("test_resource_server {}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server {secret}")));
assert!(
idms_prox_read
.check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct)
@ -2202,7 +2202,7 @@ mod tests {
);
// * invalid client_id
let client_authz = Some(base64::encode(format!("NOT A REAL SERVER:{}", secret)));
let client_authz = Some(base64::encode(format!("NOT A REAL SERVER:{secret}")));
assert!(
idms_prox_read
.check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct)
@ -2220,7 +2220,7 @@ mod tests {
);
// ✅ Now the valid client_authz is in place.
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
// * expired exchange code (took too long)
assert!(
idms_prox_read
@ -2304,7 +2304,7 @@ mod tests {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let (secret, uat, ident, _) =
setup_oauth2_resource_server(idms, ct, true, false, false);
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
let mut idms_prox_read = task::block_on(idms.proxy_read());
@ -2370,7 +2370,7 @@ mod tests {
)
.expect("Failed to inspect token");
eprintln!("👉 {:?}", intr_response);
eprintln!("👉 {intr_response:?}");
assert!(intr_response.active);
assert!(intr_response.scope.as_deref() == Some("openid supplement"));
assert!(intr_response.client_id.as_deref() == Some("test_resource_server"));
@ -2419,7 +2419,7 @@ mod tests {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let (secret, uat, ident, _) =
setup_oauth2_resource_server(idms, ct, true, false, false);
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
let mut idms_prox_read = task::block_on(idms.proxy_read());
@ -2496,7 +2496,7 @@ mod tests {
ct,
)
.expect("Failed to inspect token");
eprintln!("👉 {:?}", intr_response);
eprintln!("👉 {intr_response:?}");
assert!(intr_response.active);
drop(idms_prox_read);
@ -2600,7 +2600,7 @@ mod tests {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let (secret, uat, ident, _) =
setup_oauth2_resource_server(idms, ct, true, false, false);
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
let mut idms_prox_read = task::block_on(idms.proxy_read());
@ -2943,7 +2943,7 @@ mod tests {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let (secret, uat, ident, _) =
setup_oauth2_resource_server(idms, ct, true, false, false);
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
let mut idms_prox_read = task::block_on(idms.proxy_read());
@ -3081,7 +3081,7 @@ mod tests {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let (secret, uat, ident, _) =
setup_oauth2_resource_server(idms, ct, true, false, true);
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
let mut idms_prox_read = task::block_on(idms.proxy_read());
@ -3178,7 +3178,7 @@ mod tests {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let (secret, uat, ident, _) =
setup_oauth2_resource_server(idms, ct, true, false, true);
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
let client_authz = Some(base64::encode(format!("test_resource_server:{secret}")));
let mut idms_prox_read = task::block_on(idms.proxy_read());

View file

@ -788,8 +788,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
let attr_schema = schema.get_attributes().get(scim_attr_name).ok_or_else(|| {
OperationError::InvalidAttribute(format!(
"No such attribute in schema - {}",
scim_attr_name
"No such attribute in schema - {scim_attr_name}"
))
})?;
@ -820,16 +819,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.ok_or_else(|| {
error!("Invalid value - not a valid unsigned integer");
OperationError::InvalidAttribute(format!(
"Invalid unsigned integer - {}",
scim_attr_name
"Invalid unsigned integer - {scim_attr_name}"
))
})
.and_then(|i| {
u32::try_from(i).map_err(|_| {
error!("Invalid value - not within the bounds of a u32");
OperationError::InvalidAttribute(format!(
"Out of bounds unsigned integer - {}",
scim_attr_name
"Out of bounds unsigned integer - {scim_attr_name}"
))
})
})
@ -849,8 +846,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
let external_id = complex.attrs.get("external_id").ok_or_else(|| {
error!("Invalid scim complex attr - missing required key external_id");
OperationError::InvalidAttribute(format!(
"missing required key external_id - {}",
scim_attr_name
"missing required key external_id - {scim_attr_name}"
))
})?;
@ -859,8 +855,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
_ => {
error!("Invalid external_id attribute - must be scim simple string");
Err(OperationError::InvalidAttribute(format!(
"external_id must be scim simple string - {}",
scim_attr_name
"external_id must be scim simple string - {scim_attr_name}"
)))
}
}?;
@ -889,8 +884,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.ok_or_else(|| {
error!("Invalid scim complex attr - missing required key external_id");
OperationError::InvalidAttribute(format!(
"missing required key external_id - {}",
scim_attr_name
"missing required key external_id - {scim_attr_name}"
))
})
.and_then(|external_id| match external_id {
@ -900,8 +894,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
"Invalid external_id attribute - must be scim simple string"
);
Err(OperationError::InvalidAttribute(format!(
"external_id must be scim simple string - {}",
scim_attr_name
"external_id must be scim simple string - {scim_attr_name}"
)))
}
})?;
@ -912,8 +905,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.ok_or_else(|| {
error!("Invalid scim complex attr - missing required key secret");
OperationError::InvalidAttribute(format!(
"missing required key secret - {}",
scim_attr_name
"missing required key secret - {scim_attr_name}"
))
})
.and_then(|secret| match secret {
@ -923,16 +915,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.map_err(|_| {
error!("Invalid secret attribute - must be base64 string");
OperationError::InvalidAttribute(format!(
"secret must be base64 string - {}",
scim_attr_name
"secret must be base64 string - {scim_attr_name}"
))
})
}
_ => {
error!("Invalid secret attribute - must be scim simple string");
Err(OperationError::InvalidAttribute(format!(
"secret must be scim simple string - {}",
scim_attr_name
"secret must be scim simple string - {scim_attr_name}"
)))
}
})?;
@ -941,8 +931,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.ok_or_else(|| {
error!("Invalid scim complex attr - missing required key algo");
OperationError::InvalidAttribute(format!(
"missing required key algo - {}",
scim_attr_name
"missing required key algo - {scim_attr_name}"
))
})
.and_then(|algo_str| {
@ -955,8 +944,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
_ => {
error!("Invalid algo attribute - must be one of sha1, sha256 or sha512");
Err(OperationError::InvalidAttribute(format!(
"algo must be one of sha1, sha256 or sha512 - {}",
scim_attr_name
"algo must be one of sha1, sha256 or sha512 - {scim_attr_name}"
)))
}
}
@ -964,8 +952,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
_ => {
error!("Invalid algo attribute - must be scim simple string");
Err(OperationError::InvalidAttribute(format!(
"algo must be scim simple string - {}",
scim_attr_name
"algo must be scim simple string - {scim_attr_name}"
)))
}
}
@ -974,8 +961,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
let step = complex.attrs.get("step").ok_or_else(|| {
error!("Invalid scim complex attr - missing required key step");
OperationError::InvalidAttribute(format!(
"missing required key step - {}",
scim_attr_name
"missing required key step - {scim_attr_name}"
))
}).and_then(|step| {
match step {
@ -984,16 +970,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
Some(s) if s >= 30 => Ok(s),
_ =>
Err(OperationError::InvalidAttribute(format!(
"step must be a positive integer value equal to or greater than 30 - {}",
scim_attr_name
"step must be a positive integer value equal to or greater than 30 - {scim_attr_name}"
))),
}
}
_ => {
error!("Invalid step attribute - must be scim simple number");
Err(OperationError::InvalidAttribute(format!(
"step must be scim simple number - {}",
scim_attr_name
"step must be scim simple number - {scim_attr_name}"
)))
}
}
@ -1005,8 +989,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
.ok_or_else(|| {
error!("Invalid scim complex attr - missing required key digits");
OperationError::InvalidAttribute(format!(
"missing required key digits - {}",
scim_attr_name
"missing required key digits - {scim_attr_name}"
))
})
.and_then(|digits| match digits {
@ -1014,15 +997,13 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
Some(6) => Ok(TotpDigits::Six),
Some(8) => Ok(TotpDigits::Eight),
_ => Err(OperationError::InvalidAttribute(format!(
"digits must be a positive integer value of 6 OR 8 - {}",
scim_attr_name
"digits must be a positive integer value of 6 OR 8 - {scim_attr_name}"
))),
},
_ => {
error!("Invalid digits attribute - must be scim simple number");
Err(OperationError::InvalidAttribute(format!(
"digits must be scim simple number - {}",
scim_attr_name
"digits must be scim simple number - {scim_attr_name}"
)))
}
})?;
@ -1035,8 +1016,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
(syn, mv, sa) => {
error!(?syn, ?mv, ?sa, "Unsupported scim attribute conversion. This may be a syntax error in your import, or a missing feature in Kanidm.");
Err(OperationError::InvalidAttribute(format!(
"Unsupported attribute conversion - {}",
scim_attr_name
"Unsupported attribute conversion - {scim_attr_name}"
)))
}
}
@ -1951,7 +1931,7 @@ mod tests {
assert!(task::block_on(apply_phase_3_test(
idms,
vec![ScimEntry {
schemas: vec![format!("{}system", SCIM_SCHEMA_SYNC)],
schemas: vec![format!("{SCIM_SCHEMA_SYNC}system")],
id: user_sync_uuid,
external_id: Some("cn=testgroup,ou=people,dc=test".to_string()),
meta: None,

View file

@ -179,7 +179,7 @@ impl IdmServer {
let valid = url.domain().map(|effective_domain| {
// We need to prepend the '.' here to ensure that myexample.com != example.com,
// rather than just ends with.
effective_domain.ends_with(&format!(".{}", rp_id))
effective_domain.ends_with(&format!(".{rp_id}"))
|| effective_domain == rp_id
}).unwrap_or(false);

View file

@ -69,8 +69,8 @@ pub mod prelude {
pub use crate::constants::*;
pub use crate::entry::{
Entry, EntryCommitted, EntryInit, EntryInitNew, EntryInvalid, EntryInvalidCommitted,
EntryInvalidNew, EntryNew, EntryReduced, EntryReducedCommitted, EntrySealed,
EntrySealedCommitted, EntrySealedNew, EntryTuple, EntryValid,
EntryInvalidNew, EntryNew, EntryReduced, EntryReducedCommitted, EntryRefresh,
EntryRefreshNew, EntrySealed, EntrySealedCommitted, EntrySealedNew, EntryTuple, EntryValid,
};
pub use crate::event::{CreateEvent, DeleteEvent, ExistsEvent, ModifyEvent, SearchEvent};
pub use crate::filter::{

View file

@ -288,18 +288,6 @@ macro_rules! run_delete_test {
}};
}
#[cfg(test)]
macro_rules! run_entrychangelog_test {
($test_fn:expr) => {{
let _ = sketching::test_init();
let schema_outer = Schema::new().expect("Failed to init schema");
let schema_txn = schema_outer.read();
$test_fn(&schema_txn)
}};
}
#[allow(unused_macros)]
#[macro_export]
macro_rules! modlist {

View file

@ -21,6 +21,9 @@ fn get_cand_attr_set<VALID, STATE>(
cand: &[Entry<VALID, STATE>],
attr: &str,
) -> Result<BTreeMap<PartialValue, Uuid>, OperationError> {
// This is building both the set of values to search for uniqueness, but ALSO
// is detecting if any modified or current entries in the cand set also duplicated
// do to the ennforcing that the PartialValue must be unique in the cand_attr set.
let mut cand_attr: BTreeMap<PartialValue, Uuid> = BTreeMap::new();
cand.iter()
@ -53,22 +56,18 @@ fn get_cand_attr_set<VALID, STATE>(
.map(|()| cand_attr)
}
fn enforce_unique<STATE>(
fn enforce_unique<VALID, STATE>(
qs: &mut QueryServerWriteTransaction,
cand: &[Entry<EntryInvalid, STATE>],
cand: &[Entry<VALID, STATE>],
attr: &str,
) -> Result<(), OperationError> {
trace!(?attr);
// Build a set of all the value -> uuid for the cands.
// If already exist, reject due to dup.
let cand_attr = get_cand_attr_set(cand, attr).map_err(|e| {
admin_error!(err = ?e, "failed to get cand attr set");
admin_error!(err = ?e, ?attr, "failed to get cand attr set");
e
})?;
trace!(?cand_attr);
// No candidates to check!
if cand_attr.is_empty() {
return Ok(());
@ -235,6 +234,21 @@ impl Plugin for AttrUnique {
r
}
fn pre_repl_refresh(
qs: &mut QueryServerWriteTransaction,
cand: &[EntryRefreshNew],
) -> Result<(), OperationError> {
let uniqueattrs = {
let schema = qs.get_schema();
schema.get_attributes_unique()
};
let r: Result<(), OperationError> = uniqueattrs
.iter()
.try_for_each(|attr| enforce_unique(qs, cand, attr.as_str()));
r
}
#[instrument(level = "debug", name = "attrunique_verify", skip(qs))]
fn verify(qs: &mut QueryServerReadTransaction) -> Vec<Result<(), ConsistencyError>> {
// Only check live entries, not recycled.

View file

@ -35,8 +35,7 @@ fn apply_gidnumber<T: Clone>(e: &mut Entry<EntryInvalid, T>) -> Result<(), Opera
// assert the value is greater than the system range.
if gid < GID_SYSTEM_NUMBER_MIN {
return Err(OperationError::InvalidAttribute(format!(
"gidnumber {} may overlap with system range {}",
gid, GID_SYSTEM_NUMBER_MIN
"gidnumber {gid} may overlap with system range {GID_SYSTEM_NUMBER_MIN}"
)));
}
@ -48,8 +47,7 @@ fn apply_gidnumber<T: Clone>(e: &mut Entry<EntryInvalid, T>) -> Result<(), Opera
// If they provided us with a gid number, ensure it's in a safe range.
if gid <= GID_SAFETY_NUMBER_MIN {
Err(OperationError::InvalidAttribute(format!(
"gidnumber {} overlaps into system secure range {}",
gid, GID_SAFETY_NUMBER_MIN
"gidnumber {gid} overlaps into system secure range {GID_SAFETY_NUMBER_MIN}"
)))
} else {
Ok(())

View file

@ -216,27 +216,16 @@ impl Plugin for MemberOf {
cand: &[Entry<EntrySealed, EntryCommitted>],
ce: &CreateEvent,
) -> Result<(), OperationError> {
let dyngroup_change = super::dyngroup::DynGroup::post_create(qs, cand, &ce.ident)?;
Self::post_create_inner(qs, cand, &ce.ident)
}
let group_affect = cand
.iter()
.map(|e| e.get_uuid())
.chain(dyngroup_change.into_iter())
.chain(
cand.iter()
.filter_map(|e| {
// Is it a group?
if e.attribute_equality("class", &PVCLASS_GROUP) {
e.get_ava_as_refuuid("member")
} else {
None
}
})
.flatten(),
)
.collect();
apply_memberof(qs, group_affect)
#[instrument(level = "debug", name = "memberof_post_repl_refresh", skip_all)]
fn post_repl_refresh(
qs: &mut QueryServerWriteTransaction,
cand: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> {
let ident = Identity::from_internal();
Self::post_create_inner(qs, cand, &ident)
}
#[instrument(level = "debug", name = "memberof_post_modify", skip_all)]
@ -376,6 +365,34 @@ impl Plugin for MemberOf {
}
impl MemberOf {
fn post_create_inner(
qs: &mut QueryServerWriteTransaction,
cand: &[Entry<EntrySealed, EntryCommitted>],
ident: &Identity,
) -> Result<(), OperationError> {
let dyngroup_change = super::dyngroup::DynGroup::post_create(qs, cand, ident)?;
let group_affect = cand
.iter()
.map(|e| e.get_uuid())
.chain(dyngroup_change.into_iter())
.chain(
cand.iter()
.filter_map(|e| {
// Is it a group?
if e.attribute_equality("class", &PVCLASS_GROUP) {
e.get_ava_as_refuuid("member")
} else {
None
}
})
.flatten(),
)
.collect();
apply_memberof(qs, group_affect)
}
fn post_modify_inner(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<EntrySealedCommitted>],

View file

@ -124,6 +124,28 @@ trait Plugin {
Err(OperationError::InvalidState)
}
fn pre_repl_refresh(
_qs: &mut QueryServerWriteTransaction,
_cand: &[EntryRefreshNew],
) -> Result<(), OperationError> {
admin_error!(
"plugin {} has an unimplemented pre_repl_refresh!",
Self::id()
);
Err(OperationError::InvalidState)
}
fn post_repl_refresh(
_qs: &mut QueryServerWriteTransaction,
_cand: &[EntrySealedCommitted],
) -> Result<(), OperationError> {
admin_error!(
"plugin {} has an unimplemented post_repl_refresh!",
Self::id()
);
Err(OperationError::InvalidState)
}
fn verify(_qs: &mut QueryServerReadTransaction) -> Vec<Result<(), ConsistencyError>> {
admin_error!("plugin {} has an unimplemented verify!", Self::id());
vec![Err(ConsistencyError::Unknown)]
@ -258,6 +280,23 @@ impl Plugins {
.and_then(|_| memberof::MemberOf::post_delete(qs, cand, de))
}
#[instrument(level = "debug", name = "plugins::run_pre_repl_refresh", skip_all)]
pub fn run_pre_repl_refresh(
qs: &mut QueryServerWriteTransaction,
cand: &[EntryRefreshNew],
) -> Result<(), OperationError> {
attrunique::AttrUnique::pre_repl_refresh(qs, cand)
}
#[instrument(level = "debug", name = "plugins::run_post_repl_refresh", skip_all)]
pub fn run_post_repl_refresh(
qs: &mut QueryServerWriteTransaction,
cand: &[EntrySealedCommitted],
) -> Result<(), OperationError> {
refint::ReferentialIntegrity::post_repl_refresh(qs, cand)
.and_then(|_| memberof::MemberOf::post_repl_refresh(qs, cand))
}
#[instrument(level = "debug", name = "plugins::run_verify", skip_all)]
pub fn run_verify(
qs: &mut QueryServerReadTransaction,

View file

@ -109,6 +109,14 @@ impl Plugin for ReferentialIntegrity {
Self::post_modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "refint_post_repl_refresh", skip_all)]
fn post_repl_refresh(
qs: &mut QueryServerWriteTransaction,
cand: &[EntrySealedCommitted],
) -> Result<(), OperationError> {
Self::post_modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "refint_post_delete", skip_all)]
fn post_delete(
qs: &mut QueryServerWriteTransaction,

View file

@ -9,29 +9,28 @@ use serde::{Deserialize, Serialize};
pub struct Cid {
// Mental note: Derive ord always checks in order of struct fields.
pub ts: Duration,
pub d_uuid: Uuid,
pub s_uuid: Uuid,
}
impl fmt::Display for Cid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}-{}-{}", self.ts.as_nanos(), self.d_uuid, self.s_uuid)
write!(f, "{:032}-{}", self.ts.as_nanos(), self.s_uuid)
}
}
impl Cid {
#[cfg(test)]
pub(crate) fn new(d_uuid: Uuid, s_uuid: Uuid, ts: Duration) -> Self {
Cid { d_uuid, s_uuid, ts }
pub(crate) fn new(s_uuid: Uuid, ts: Duration) -> Self {
Cid { s_uuid, ts }
}
pub fn new_lamport(d_uuid: Uuid, s_uuid: Uuid, ts: Duration, max_ts: &Duration) -> Self {
pub fn new_lamport(s_uuid: Uuid, ts: Duration, max_ts: &Duration) -> Self {
let ts = if ts > *max_ts {
ts
} else {
*max_ts + Duration::from_nanos(1)
};
Cid { ts, d_uuid, s_uuid }
Cid { ts, s_uuid }
}
#[cfg(test)]
@ -42,7 +41,6 @@ impl Cid {
#[cfg(test)]
pub unsafe fn new_count(c: u64) -> Self {
Cid {
d_uuid: uuid!("00000000-0000-0000-0000-000000000000"),
s_uuid: uuid!("00000000-0000-0000-0000-000000000000"),
ts: Duration::new(c, 0),
}
@ -51,7 +49,6 @@ impl Cid {
#[cfg(test)]
pub fn new_random_s_d(ts: Duration) -> Self {
Cid {
d_uuid: Uuid::new_v4(),
s_uuid: Uuid::new_v4(),
ts,
}
@ -62,7 +59,6 @@ impl Cid {
self.ts
.checked_sub(Duration::from_secs(secs))
.map(|r| Cid {
d_uuid: uuid!("00000000-0000-0000-0000-000000000000"),
s_uuid: uuid!("00000000-0000-0000-0000-000000000000"),
ts: r,
})
@ -82,12 +78,10 @@ mod tests {
fn test_cid_ordering() {
// Check diff ts
let cid_a = Cid::new(
uuid!("00000000-0000-0000-0000-000000000001"),
uuid!("00000000-0000-0000-0000-000000000001"),
Duration::new(5, 0),
);
let cid_b = Cid::new(
uuid!("00000000-0000-0000-0000-000000000001"),
uuid!("00000000-0000-0000-0000-000000000001"),
Duration::new(15, 0),
);
@ -96,30 +90,12 @@ mod tests {
assert!(cid_a.cmp(&cid_b) == Ordering::Less);
assert!(cid_b.cmp(&cid_a) == Ordering::Greater);
// check same ts diff d_uuid
let cid_c = Cid::new(
uuid!("00000000-0000-0000-0000-000000000000"),
uuid!("00000000-0000-0000-0000-000000000001"),
Duration::new(5, 0),
);
let cid_d = Cid::new(
uuid!("00000000-0000-0000-0000-000000000001"),
uuid!("00000000-0000-0000-0000-000000000001"),
Duration::new(5, 0),
);
assert!(cid_c.cmp(&cid_c) == Ordering::Equal);
assert!(cid_c.cmp(&cid_d) == Ordering::Less);
assert!(cid_d.cmp(&cid_c) == Ordering::Greater);
// check same ts, d_uuid, diff s_uuid
let cid_e = Cid::new(
uuid!("00000000-0000-0000-0000-000000000001"),
uuid!("00000000-0000-0000-0000-000000000000"),
Duration::new(5, 0),
);
let cid_f = Cid::new(
uuid!("00000000-0000-0000-0000-000000000001"),
uuid!("00000000-0000-0000-0000-000000000001"),
Duration::new(5, 0),
);
@ -131,8 +107,7 @@ mod tests {
#[test]
fn test_cid_lamport() {
let d_uuid = uuid!("00000000-0000-0000-0000-000000000001");
let s_uuid = d_uuid;
let s_uuid = uuid!("00000000-0000-0000-0000-000000000001");
let ts5 = Duration::new(5, 0);
let ts10 = Duration::new(10, 0);
@ -140,12 +115,12 @@ mod tests {
let cid_z = unsafe { Cid::new_zero() };
let cid_a = Cid::new_lamport(d_uuid, s_uuid, ts5, &ts5);
let cid_a = Cid::new_lamport(s_uuid, ts5, &ts5);
assert!(cid_a.cmp(&cid_z) == Ordering::Greater);
let cid_b = Cid::new_lamport(d_uuid, s_uuid, ts15, &ts10);
let cid_b = Cid::new_lamport(s_uuid, ts15, &ts10);
assert!(cid_b.cmp(&cid_a) == Ordering::Greater);
// Even with an older ts, we should still step forward.
let cid_c = Cid::new_lamport(d_uuid, s_uuid, ts10, &ts15);
let cid_c = Cid::new_lamport(s_uuid, ts10, &ts15);
assert!(cid_c.cmp(&cid_b) == Ordering::Greater);
}
}

View file

@ -0,0 +1,214 @@
use super::proto::*;
use crate::plugins::Plugins;
use crate::prelude::*;
impl<'a> QueryServerReadTransaction<'a> {
// Get the current state of "where we are up to"
//
// There are two approaches we can use here. We can either store a cookie
// related to the supplier we are fetching from, or we can use our RUV state.
//
// Initially I'm using RUV state, because it lets us select exactly what has
// changed, where the cookie approach is more coarse grained. The cookie also
// requires some more knowledge about what supplier we are communicating too
// where the RUV approach doesn't since the supplier calcs the diff.
#[instrument(level = "debug", skip_all)]
pub fn consumer_get_state(&mut self) -> Result<(), OperationError> {
Ok(())
}
}
impl<'a> QueryServerWriteTransaction<'a> {
// Apply the state changes if they are valid.
#[instrument(level = "debug", skip_all)]
pub fn consumer_apply_changes(&mut self) -> Result<(), OperationError> {
Ok(())
}
pub fn consumer_apply_refresh(
&mut self,
ctx: &ReplRefreshContext,
) -> Result<(), OperationError> {
match ctx {
ReplRefreshContext::V1 {
domain_version,
domain_uuid,
schema_entries,
meta_entries,
entries,
} => self.consumer_apply_refresh_v1(
*domain_version,
*domain_uuid,
schema_entries,
meta_entries,
entries,
),
}
}
fn consumer_refresh_create_entries(
&mut self,
ctx_entries: &[ReplEntryV1],
) -> Result<(), OperationError> {
let candidates = ctx_entries
.iter()
.map(EntryRefreshNew::from_repl_entry_v1)
.collect::<Result<Vec<EntryRefreshNew>, _>>()
.map_err(|e| {
error!("Failed to convert entries from supplier");
e
})?;
Plugins::run_pre_repl_refresh(self, candidates.as_slice()).map_err(|e| {
admin_error!(
"Refresh operation failed (pre_repl_refresh plugin), {:?}",
e
);
e
})?;
// No need to assign CID's since this is a repl import.
let norm_cand = candidates
.into_iter()
.map(|e| {
e.validate(&self.schema)
.map_err(|e| {
admin_error!("Schema Violation in create validate {:?}", e);
OperationError::SchemaViolation(e)
})
.map(|e| {
// Then seal the changes?
e.seal(&self.schema)
})
})
.collect::<Result<Vec<EntrySealedNew>, _>>()?;
// Do not run plugs!
let commit_cand = self.be_txn.refresh(norm_cand).map_err(|e| {
admin_error!("betxn create failure {:?}", e);
e
})?;
Plugins::run_post_repl_refresh(self, &commit_cand).map_err(|e| {
admin_error!(
"Refresh operation failed (post_repl_refresh plugin), {:?}",
e
);
e
})?;
self.changed_uuid
.extend(commit_cand.iter().map(|e| e.get_uuid()));
Ok(())
}
#[instrument(level = "debug", skip_all)]
fn consumer_apply_refresh_v1(
&mut self,
ctx_domain_version: DomainVersion,
ctx_domain_uuid: Uuid,
ctx_schema_entries: &[ReplEntryV1],
ctx_meta_entries: &[ReplEntryV1],
ctx_entries: &[ReplEntryV1],
) -> Result<(), OperationError> {
// Can we apply the domain version validly?
// if domain_version >= min_support ...
if ctx_domain_version < DOMAIN_MIN_LEVEL {
error!("Unable to proceed with consumer refresh - incoming domain level is lower than our minimum supported level. {} < {}", ctx_domain_version, DOMAIN_MIN_LEVEL);
return Err(OperationError::ReplDomainLevelUnsatisfiable);
} else if ctx_domain_version > DOMAIN_MAX_LEVEL {
error!("Unable to proceed with consumer refresh - incoming domain level is greater than our maximum supported level. {} > {}", ctx_domain_version, DOMAIN_MAX_LEVEL);
return Err(OperationError::ReplDomainLevelUnsatisfiable);
} else {
debug!(
"Proceeding to refresh from domain at level {}",
ctx_domain_version
);
};
// == ⚠️ Below this point we begin to make changes! ==
// Update the d_uuid. This is what defines us as being part of this repl topology!
self.be_txn.set_db_d_uuid(ctx_domain_uuid).map_err(|e| {
error!("Failed to reset domain uuid");
e
})?;
// Do we need to reset our s_uuid to avoid potential RUV conflicts?
// - I don't think so, since the refresh is supplying and rebuilding
// our local state.
// Delete all entries - *proper delete, not just tombstone!*
self.be_txn.danger_delete_all_db_content().map_err(|e| {
error!("Failed to clear existing server database content");
e
})?;
// Reset this transactions schema to a completely clean slate.
self.schema.generate_in_memory().map_err(|e| {
error!("Failed to reset in memory schema to clean state");
e
})?;
// Apply the schema entries first. This is the foundation that everything
// else will build upon!
self.consumer_refresh_create_entries(ctx_schema_entries)
.map_err(|e| {
error!("Failed to refresh schema entries");
e
})?;
// We need to reload schema now!
self.reload_schema().map_err(|e| {
error!("Failed to reload schema");
e
})?;
// We have to reindex to force all the existing indexes to be dumped
// and recreated before we start to import.
self.reindex().map_err(|e| {
error!("Failed to reload schema");
e
})?;
// Apply the domain info entry / system info / system config entry?
self.consumer_refresh_create_entries(ctx_meta_entries)
.map_err(|e| {
error!("Failed to refresh schema entries");
e
})?;
// NOTE: The domain info we receive here will have the domain version populated!
// That's okay though, because all the incoming data is already at the right
// version!
self.reload_domain_info().map_err(|e| {
error!("Failed to reload domain info");
e
})?;
// Mark that everything changed so that post commit hooks function as expected.
self.changed_schema = true;
self.changed_acp = true;
self.changed_oauth2 = true;
self.changed_domain = true;
// That's it! We are GOOD to go!
// Create all the entries. Note we don't hit plugins here beside post repl plugs.
self.consumer_refresh_create_entries(ctx_entries)
.map_err(|e| {
error!("Failed to refresh schema entries");
e
})?;
// Run post repl plugins
Ok(())
}
}

View file

@ -0,0 +1,601 @@
use std::collections::btree_map::Keys;
use std::collections::BTreeMap;
use std::fmt;
use std::ops::Bound;
use std::ops::Bound::*;
use kanidm_proto::v1::ConsistencyError;
use super::cid::Cid;
use crate::entry::{compare_attrs, Eattrs};
use crate::prelude::*;
use crate::schema::SchemaTransaction;
use crate::valueset;
#[derive(Debug, Clone)]
pub struct EntryChangelog {
/// The set of "entries as they existed at a point in time". This allows us to rewind
/// to a point-in-time, and then to start to "replay" applying all changes again.
///
/// A subtle and important piece of information is that an anchor can be considered
/// as the "state as existing between two Cid's". This means for Cid X, this state is
/// the "moment before X". This is important, as for a create we define the initial anchor
/// as "nothing". It's means for the anchor at time X, that changes that occurred at time
/// X have NOT been replayed and applied!
anchors: BTreeMap<Cid, State>,
changes: BTreeMap<Cid, Change>,
}
/*
impl fmt::Display for EntryChangelog {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f
}
}
*/
/// A change defines the transitions that occurred within this Cid (transaction). A change is applied
/// as a whole, or rejected during the replay process.
#[derive(Debug, Clone)]
pub struct Change {
s: Vec<Transition>,
}
#[derive(Debug, Clone)]
enum State {
NonExistent,
Live(Eattrs),
Recycled(Eattrs),
Tombstone(Eattrs),
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
State::NonExistent => write!(f, "NonExistent"),
State::Live(_) => write!(f, "Live"),
State::Recycled(_) => write!(f, "Recycled"),
State::Tombstone(_) => write!(f, "Tombstone"),
}
}
}
#[derive(Debug, Clone)]
enum Transition {
Create(Eattrs),
ModifyPurge(AttrString),
ModifyPresent(AttrString, Box<Value>),
ModifyRemoved(AttrString, Box<PartialValue>),
ModifyAssert(AttrString, Box<PartialValue>),
Recycle,
Revive,
Tombstone(Eattrs),
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
Transition::Create(_) => write!(f, "Create"),
Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a),
Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a),
Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a),
Transition::ModifyAssert(a, _) => write!(f, "ModifyAssert({})", a),
Transition::Recycle => write!(f, "Recycle"),
Transition::Revive => write!(f, "Revive"),
Transition::Tombstone(_) => write!(f, "Tombstone"),
}
}
}
impl State {
fn apply_change(self, change: &Change) -> Result<Self, Self> {
let mut state = self;
for transition in change.s.iter() {
match (&mut state, transition) {
(State::NonExistent, Transition::Create(attrs)) => {
trace!("NonExistent + Create -> Live");
state = State::Live(attrs.clone());
}
(State::Live(ref mut attrs), Transition::ModifyPurge(attr)) => {
trace!("Live + ModifyPurge({}) -> Live", attr);
attrs.remove(attr);
}
(State::Live(ref mut attrs), Transition::ModifyPresent(attr, value)) => {
trace!("Live + ModifyPresent({}) -> Live", attr);
if let Some(vs) = attrs.get_mut(attr) {
let r = vs.insert_checked(value.as_ref().clone());
assert!(r.is_ok());
// Reject if it fails?
} else {
#[allow(clippy::expect_used)]
let vs = valueset::from_value_iter(std::iter::once(value.as_ref().clone()))
.expect("Unable to fail - always single value, and only one type!");
attrs.insert(attr.clone(), vs);
}
}
(State::Live(ref mut attrs), Transition::ModifyRemoved(attr, value)) => {
trace!("Live + ModifyRemoved({}) -> Live", attr);
let rm = if let Some(vs) = attrs.get_mut(attr) {
vs.remove(value);
vs.is_empty()
} else {
false
};
if rm {
attrs.remove(attr);
};
}
(State::Live(ref mut attrs), Transition::ModifyAssert(attr, value)) => {
trace!("Live + ModifyAssert({}) -> Live", attr);
if attrs
.get(attr)
.map(|vs| vs.contains(value))
.unwrap_or(false)
{
// Valid
} else {
warn!("{} + {:?} -> Assertion not met - REJECTING", attr, value);
return Err(state);
}
}
(State::Live(attrs), Transition::Recycle) => {
trace!("Live + Recycle -> Recycled");
state = State::Recycled(attrs.clone());
}
(State::Live(_), Transition::Tombstone(attrs)) => {
trace!("Live + Tombstone -> Tombstone");
state = State::Tombstone(attrs.clone());
}
(State::Recycled(attrs), Transition::Revive) => {
trace!("Recycled + Revive -> Live");
state = State::Live(attrs.clone());
}
(State::Recycled(ref mut attrs), Transition::ModifyPurge(attr)) => {
trace!("Recycled + ModifyPurge({}) -> Recycled", attr);
attrs.remove(attr);
}
(State::Recycled(attrs), Transition::ModifyRemoved(attr, value)) => {
trace!("Recycled + ModifyRemoved({}) -> Recycled", attr);
let rm = if let Some(vs) = attrs.get_mut(attr) {
vs.remove(value);
vs.is_empty()
} else {
false
};
if rm {
attrs.remove(attr);
};
}
(State::Recycled(_), Transition::Tombstone(attrs)) => {
trace!("Recycled + Tombstone -> Tombstone");
state = State::Tombstone(attrs.clone());
}
// ==============================
// Invalid States
/*
(State::NonExistent, Transition::ModifyPurge(_))
| (State::NonExistent, Transition::ModifyPresent(_, _))
| (State::NonExistent, Transition::ModifyRemoved(_, _))
| (State::NonExistent, Transition::Recycle)
| (State::NonExistent, Transition::Revive)
| (State::NonExistent, Transition::Tombstone(_))
| (State::Live(_), Transition::Create(_))
| (State::Live(_), Transition::Revive)
| (State::Recycled(_), Transition::Create(_))
| (State::Recycled(_), Transition::Recycle)
| (State::Recycled(_), Transition::ModifyPresent(_, _))
| (State::Tombstone(_), _)
*/
(s, t) => {
warn!("{} + {} -> REJECTING", s, t);
return Err(state);
}
};
}
// Everything must have applied, all good then.
trace!(?state, "applied changes");
Ok(state)
}
}
impl EntryChangelog {
pub fn new(cid: Cid, attrs: Eattrs, _schema: &dyn SchemaTransaction) -> Self {
// I think we need to reduce the attrs based on what is / is not replicated.?
let anchors = btreemap![(cid.clone(), State::NonExistent)];
let changes = btreemap![(
cid,
Change {
s: vec![Transition::Create(attrs)]
}
)];
EntryChangelog { anchors, changes }
}
// TODO: work out if the below comment about uncommenting is still valid
// Uncomment this once we have a real on-disk storage of the changelog
pub fn new_without_schema(cid: Cid, attrs: Eattrs) -> Self {
// I think we need to reduce the attrs based on what is / is not replicated.?
// We need to pick a state that reflects the current state WRT to tombstone
// or recycled!
let class = attrs.get("class");
let (anchors, changes) =
if class
.as_ref()
.map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue))
.unwrap_or(false)
{
(btreemap![(cid, State::Tombstone(attrs))], BTreeMap::new())
} else if class
.as_ref()
.map(|c| c.contains(&PVCLASS_RECYCLED as &PartialValue))
.unwrap_or(false)
{
(btreemap![(cid, State::Recycled(attrs))], BTreeMap::new())
} else {
(
btreemap![(cid.clone(), State::NonExistent)],
btreemap![(
cid,
Change {
s: vec![Transition::Create(attrs)]
}
)],
)
};
EntryChangelog { anchors, changes }
}
pub fn add_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
where
T: IntoIterator<Item = Value>,
{
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
viter
.into_iter()
.map(|v| Transition::ModifyPresent(AttrString::from(attr), Box::new(v)))
.for_each(|t| change.s.push(t));
}
pub fn remove_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
where
T: IntoIterator<Item = PartialValue>,
{
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
viter
.into_iter()
.map(|v| Transition::ModifyRemoved(AttrString::from(attr), Box::new(v)))
.for_each(|t| change.s.push(t));
}
pub fn assert_ava(&mut self, cid: &Cid, attr: &str, value: PartialValue) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::ModifyAssert(
AttrString::from(attr),
Box::new(value),
))
}
pub fn purge_ava(&mut self, cid: &Cid, attr: &str) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change
.s
.push(Transition::ModifyPurge(AttrString::from(attr)));
}
pub fn recycled(&mut self, cid: &Cid) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Recycle);
}
pub fn revive(&mut self, cid: &Cid) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Revive);
}
pub fn tombstone(&mut self, cid: &Cid, attrs: Eattrs) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Tombstone(attrs));
}
/// Replay our changes from and including the replay Cid, up to the latest point
/// in time. We also return a vector of *rejected* Cid's showing what is in the
/// change log that is considered invalid.
fn replay(
&self,
from_cid: Bound<&Cid>,
to_cid: Bound<&Cid>,
) -> Result<(State, Vec<Cid>), OperationError> {
// Select the anchor_cid that is *earlier* or *equals* to the replay_cid.
// if not found, we are *unable to* perform this replay which indicates a problem!
let (anchor_cid, anchor) = if matches!(from_cid, Unbounded) {
// If the from is unbounded, and to is unbounded, we want
// the earliest anchor possible.
// If from is unbounded and to is bounded, we want the earliest
// possible.
self.anchors.iter().next()
} else {
// If from has a bound, we want an anchor "earlier than" from, regardless
// of the to bound state.
self.anchors.range((Unbounded, from_cid)).next_back()
}
.ok_or_else(|| {
admin_error!(
?from_cid,
?to_cid,
"Failed to locate anchor in replay range"
);
OperationError::ReplReplayFailure
})?;
trace!(?anchor_cid, ?anchor);
// Load the entry attribute state at that time.
let mut replay_state = anchor.clone();
let mut rejected_cid = Vec::new();
// For each change
for (change_cid, change) in self.changes.range((Included(anchor_cid), to_cid)) {
// Apply the change.
trace!(?change_cid, ?change);
replay_state = match replay_state.apply_change(change) {
Ok(mut new_state) => {
// Indicate that this was the highest CID so far.
match &mut new_state {
State::NonExistent => {
trace!("pass");
}
State::Live(ref mut attrs)
| State::Recycled(ref mut attrs)
| State::Tombstone(ref mut attrs) => {
let cv = vs_cid![change_cid.clone()];
let _ = attrs.insert(AttrString::from("last_modified_cid"), cv);
}
};
new_state
}
Err(previous_state) => {
warn!("rejecting invalid change {:?}", change_cid);
rejected_cid.push(change_cid.clone());
previous_state
}
};
}
// Return the eattrs state.
Ok((replay_state, rejected_cid))
}
#[instrument(
level = "trace",
name = "verify",
skip(self, _schema, expected_attrs, results)
)]
pub fn verify(
&self,
_schema: &dyn SchemaTransaction,
expected_attrs: &Eattrs,
entry_id: u64,
results: &mut Vec<Result<(), ConsistencyError>>,
) {
// We need to be able to take any anchor entry, and replay that when all changes
// are applied we get the *same entry* as the current state.
debug_assert!(results.is_empty());
// For each anchor (we only needs it's change id.)
for cid in self.anchors.keys() {
match self.replay(Included(cid), Unbounded) {
Ok((entry_state, rejected)) => {
trace!(?rejected);
match entry_state {
State::Live(attrs) | State::Recycled(attrs) | State::Tombstone(attrs) => {
if compare_attrs(&attrs, expected_attrs) {
// valid
trace!("changelog is synchronised");
} else {
// ruh-roh.
warn!("changelog has desynchronised!");
debug!(?attrs);
debug!(?expected_attrs);
debug_assert!(false);
results
.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)));
}
}
State::NonExistent => {
warn!("entry does not exist - changelog is corrupted?!");
results.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)))
}
}
}
Err(e) => {
error!(?e);
}
}
}
debug_assert!(results.is_empty());
}
pub fn contains_tail_cid(&self, cid: &Cid) -> bool {
if let Some(tail_cid) = self.changes.keys().next_back() {
if tail_cid == cid {
return true;
}
};
false
}
pub fn can_delete(&self) -> bool {
// Changelog should be empty.
// should have a current anchor state of tombstone.
self.changes.is_empty()
&& matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
}
pub fn is_live(&self) -> bool {
!matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
}
pub fn cid_iter(&self) -> Keys<Cid, Change> {
self.changes.keys()
}
/*
fn insert_anchor(&mut self, cid: Cid, entry_state: State) {
// When we insert an anchor, we have to remove all subsequent anchors (but not
// the preceding ones.)
let _ = self.anchors.split_off(&cid);
self.anchors.insert(cid.clone(), entry_state);
}
*/
pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> {
// Build a new anchor that is equal or less than this cid.
// In other words, the cid we are trimming to, should be remaining
// in the CL, and we should have an anchor that precedes it.
let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| {
error!(?e);
e
})?;
trace!(?rejected);
// Add the entry_state as an anchor. Use the CID we just
// trimmed to.
// insert_anchor will remove anything to the right, we also need to
// remove everything to the left, so just clear.
self.anchors.clear();
self.anchors.insert(cid.clone(), entry_state);
// And now split the CL.
let mut right = self.changes.split_off(cid);
std::mem::swap(&mut right, &mut self.changes);
// We can trace what we drop later?
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::entry::Eattrs;
// use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::repl::entry::{Change, EntryChangelog, State, Transition};
use crate::schema::{Schema, SchemaTransaction};
#[cfg(test)]
macro_rules! run_entrychangelog_test {
($test_fn:expr) => {{
let _ = sketching::test_init();
let schema_outer = Schema::new().expect("Failed to init schema");
let schema_txn = schema_outer.read();
$test_fn(&schema_txn)
}};
}
#[test]
fn test_entrychangelog_basic() {
run_entrychangelog_test!(|schema: &dyn SchemaTransaction| {
let cid = Cid::new_random_s_d(Duration::from_secs(1));
let eattrs = Eattrs::new();
let eclog = EntryChangelog::new(cid, eattrs, schema);
trace!(?eclog);
})
}
#[test]
fn test_entrychangelog_state_transitions() {
// Test that all our transitions are defined and work as
// expected.
assert!(State::NonExistent
.apply_change(&Change { s: vec![] })
.is_ok());
assert!(State::NonExistent
.apply_change(&Change {
s: vec![Transition::Create(Eattrs::new())]
})
.is_ok());
assert!(State::Live(Eattrs::new())
.apply_change(&Change { s: vec![] })
.is_ok());
assert!(State::Live(Eattrs::new())
.apply_change(&Change {
s: vec![Transition::Create(Eattrs::new())]
})
.is_err());
}
}

View file

@ -1,588 +1,210 @@
use std::collections::btree_map::Keys;
use std::collections::BTreeMap;
use std::fmt;
use std::ops::Bound;
use std::ops::Bound::*;
use kanidm_proto::v1::ConsistencyError;
use super::cid::Cid;
use crate::entry::{compare_attrs, Eattrs};
use crate::entry::Eattrs;
use crate::prelude::*;
use crate::schema::SchemaTransaction;
use crate::valueset;
// use crate::valueset;
use std::collections::BTreeMap;
#[derive(Debug, Clone)]
pub struct EntryChangelog {
/// The set of "entries as they existed at a point in time". This allows us to rewind
/// to a point-in-time, and then to start to "replay" applying all changes again.
///
/// A subtle and important piece of information is that an anchor can be considered
/// as the "state as existing between two Cid's". This means for Cid X, this state is
/// the "moment before X". This is important, as for a create we define the initial anchor
/// as "nothing". It's means for the anchor at time X, that changes that occurred at time
/// X have NOT been replayed and applied!
anchors: BTreeMap<Cid, State>,
changes: BTreeMap<Cid, Change>,
}
/*
impl fmt::Display for EntryChangelog {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f
}
}
*/
/// A change defines the transitions that occurred within this Cid (transaction). A change is applied
/// as a whole, or rejected during the replay process.
#[derive(Debug, Clone)]
pub struct Change {
s: Vec<Transition>,
pub enum State {
Live { changes: BTreeMap<AttrString, Cid> },
Tombstone { at: Cid },
}
#[derive(Debug, Clone)]
enum State {
NonExistent,
Live(Eattrs),
Recycled(Eattrs),
Tombstone(Eattrs),
pub struct EntryChangeState {
pub(super) st: State,
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
State::NonExistent => write!(f, "NonExistent"),
State::Live(_) => write!(f, "Live"),
State::Recycled(_) => write!(f, "Recycled"),
State::Tombstone(_) => write!(f, "Tombstone"),
}
}
}
impl EntryChangeState {
pub fn new(cid: &Cid, attrs: &Eattrs, _schema: &dyn SchemaTransaction) -> Self {
let changes = attrs
.keys()
.cloned()
.map(|attr| (attr, cid.clone()))
.collect();
#[derive(Debug, Clone)]
enum Transition {
Create(Eattrs),
ModifyPurge(AttrString),
ModifyPresent(AttrString, Box<Value>),
ModifyRemoved(AttrString, Box<PartialValue>),
ModifyAssert(AttrString, Box<PartialValue>),
Recycle,
Revive,
Tombstone(Eattrs),
}
let st = State::Live { changes };
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
Transition::Create(_) => write!(f, "Create"),
Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a),
Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a),
Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a),
Transition::ModifyAssert(a, _) => write!(f, "ModifyAssert({})", a),
Transition::Recycle => write!(f, "Recycle"),
Transition::Revive => write!(f, "Revive"),
Transition::Tombstone(_) => write!(f, "Tombstone"),
}
}
}
impl State {
fn apply_change(self, change: &Change) -> Result<Self, Self> {
let mut state = self;
for transition in change.s.iter() {
match (&mut state, transition) {
(State::NonExistent, Transition::Create(attrs)) => {
trace!("NonExistent + Create -> Live");
state = State::Live(attrs.clone());
}
(State::Live(ref mut attrs), Transition::ModifyPurge(attr)) => {
trace!("Live + ModifyPurge({}) -> Live", attr);
attrs.remove(attr);
}
(State::Live(ref mut attrs), Transition::ModifyPresent(attr, value)) => {
trace!("Live + ModifyPresent({}) -> Live", attr);
if let Some(vs) = attrs.get_mut(attr) {
let r = vs.insert_checked(value.as_ref().clone());
assert!(r.is_ok());
// Reject if it fails?
} else {
#[allow(clippy::expect_used)]
let vs = valueset::from_value_iter(std::iter::once(value.as_ref().clone()))
.expect("Unable to fail - always single value, and only one type!");
attrs.insert(attr.clone(), vs);
}
}
(State::Live(ref mut attrs), Transition::ModifyRemoved(attr, value)) => {
trace!("Live + ModifyRemoved({}) -> Live", attr);
let rm = if let Some(vs) = attrs.get_mut(attr) {
vs.remove(value);
vs.is_empty()
} else {
false
};
if rm {
attrs.remove(attr);
};
}
(State::Live(ref mut attrs), Transition::ModifyAssert(attr, value)) => {
trace!("Live + ModifyAssert({}) -> Live", attr);
if attrs
.get(attr)
.map(|vs| vs.contains(value))
.unwrap_or(false)
{
// Valid
} else {
warn!("{} + {:?} -> Assertion not met - REJECTING", attr, value);
return Err(state);
}
}
(State::Live(attrs), Transition::Recycle) => {
trace!("Live + Recycle -> Recycled");
state = State::Recycled(attrs.clone());
}
(State::Live(_), Transition::Tombstone(attrs)) => {
trace!("Live + Tombstone -> Tombstone");
state = State::Tombstone(attrs.clone());
}
(State::Recycled(attrs), Transition::Revive) => {
trace!("Recycled + Revive -> Live");
state = State::Live(attrs.clone());
}
(State::Recycled(ref mut attrs), Transition::ModifyPurge(attr)) => {
trace!("Recycled + ModifyPurge({}) -> Recycled", attr);
attrs.remove(attr);
}
(State::Recycled(attrs), Transition::ModifyRemoved(attr, value)) => {
trace!("Recycled + ModifyRemoved({}) -> Recycled", attr);
let rm = if let Some(vs) = attrs.get_mut(attr) {
vs.remove(value);
vs.is_empty()
} else {
false
};
if rm {
attrs.remove(attr);
};
}
(State::Recycled(_), Transition::Tombstone(attrs)) => {
trace!("Recycled + Tombstone -> Tombstone");
state = State::Tombstone(attrs.clone());
}
// ==============================
// Invalid States
/*
(State::NonExistent, Transition::ModifyPurge(_))
| (State::NonExistent, Transition::ModifyPresent(_, _))
| (State::NonExistent, Transition::ModifyRemoved(_, _))
| (State::NonExistent, Transition::Recycle)
| (State::NonExistent, Transition::Revive)
| (State::NonExistent, Transition::Tombstone(_))
| (State::Live(_), Transition::Create(_))
| (State::Live(_), Transition::Revive)
| (State::Recycled(_), Transition::Create(_))
| (State::Recycled(_), Transition::Recycle)
| (State::Recycled(_), Transition::ModifyPresent(_, _))
| (State::Tombstone(_), _)
*/
(s, t) => {
warn!("{} + {} -> REJECTING", s, t);
return Err(state);
}
};
}
// Everything must have applied, all good then.
trace!(?state, "applied changes");
Ok(state)
}
}
impl EntryChangelog {
pub fn new(cid: Cid, attrs: Eattrs, _schema: &dyn SchemaTransaction) -> Self {
// I think we need to reduce the attrs based on what is / is not replicated.?
let anchors = btreemap![(cid.clone(), State::NonExistent)];
let changes = btreemap![(
cid,
Change {
s: vec![Transition::Create(attrs)]
}
)];
EntryChangelog { anchors, changes }
EntryChangeState { st }
}
// TODO: work out if the below comment about uncommenting is still valid
// Uncomment this once we have a real on-disk storage of the changelog
pub fn new_without_schema(cid: Cid, attrs: Eattrs) -> Self {
// I think we need to reduce the attrs based on what is / is not replicated.?
// We need to pick a state that reflects the current state WRT to tombstone
// or recycled!
pub fn new_without_schema(cid: &Cid, attrs: &Eattrs) -> Self {
let class = attrs.get("class");
let (anchors, changes) = if class
let st = if class
.as_ref()
.map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue))
.unwrap_or(false)
{
(btreemap![(cid, State::Tombstone(attrs))], BTreeMap::new())
} else if class
.as_ref()
.map(|c| c.contains(&PVCLASS_RECYCLED as &PartialValue))
.unwrap_or(false)
{
(btreemap![(cid, State::Recycled(attrs))], BTreeMap::new())
State::Tombstone { at: cid.clone() }
} else {
(
btreemap![(cid.clone(), State::NonExistent)],
btreemap![(
cid,
Change {
s: vec![Transition::Create(attrs)]
}
)],
)
let changes = attrs
.keys()
.cloned()
.map(|attr| (attr, cid.clone()))
.collect();
State::Live { changes }
};
EntryChangelog { anchors, changes }
EntryChangeState { st }
}
pub fn add_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
where
T: IntoIterator<Item = Value>,
{
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
viter
.into_iter()
.map(|v| Transition::ModifyPresent(AttrString::from(attr), Box::new(v)))
.for_each(|t| change.s.push(t));
pub fn current(&self) -> &State {
&self.st
}
pub fn remove_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
where
T: IntoIterator<Item = PartialValue>,
{
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
viter
.into_iter()
.map(|v| Transition::ModifyRemoved(AttrString::from(attr), Box::new(v)))
.for_each(|t| change.s.push(t));
}
pub fn assert_ava(&mut self, cid: &Cid, attr: &str, value: PartialValue) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::ModifyAssert(
AttrString::from(attr),
Box::new(value),
))
}
pub fn purge_ava(&mut self, cid: &Cid, attr: &str) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change
.s
.push(Transition::ModifyPurge(AttrString::from(attr)));
}
pub fn recycled(&mut self, cid: &Cid) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Recycle);
}
pub fn revive(&mut self, cid: &Cid) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Revive);
}
pub fn tombstone(&mut self, cid: &Cid, attrs: Eattrs) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Tombstone(attrs));
}
/// Replay our changes from and including the replay Cid, up to the latest point
/// in time. We also return a vector of *rejected* Cid's showing what is in the
/// change log that is considered invalid.
fn replay(
&self,
from_cid: Bound<&Cid>,
to_cid: Bound<&Cid>,
) -> Result<(State, Vec<Cid>), OperationError> {
// Select the anchor_cid that is *earlier* or *equals* to the replay_cid.
// if not found, we are *unable to* perform this replay which indicates a problem!
let (anchor_cid, anchor) = if matches!(from_cid, Unbounded) {
// If the from is unbounded, and to is unbounded, we want
// the earliest anchor possible.
// If from is unbounded and to is bounded, we want the earliest
// possible.
self.anchors.iter().next()
} else {
// If from has a bound, we want an anchor "earlier than" from, regardless
// of the to bound state.
self.anchors.range((Unbounded, from_cid)).next_back()
}
.ok_or_else(|| {
admin_error!(
?from_cid,
?to_cid,
"Failed to locate anchor in replay range"
);
OperationError::ReplReplayFailure
})?;
trace!(?anchor_cid, ?anchor);
// Load the entry attribute state at that time.
let mut replay_state = anchor.clone();
let mut rejected_cid = Vec::new();
// For each change
for (change_cid, change) in self.changes.range((Included(anchor_cid), to_cid)) {
// Apply the change.
trace!(?change_cid, ?change);
replay_state = match replay_state.apply_change(change) {
Ok(mut new_state) => {
// Indicate that this was the highest CID so far.
match &mut new_state {
State::NonExistent => {
trace!("pass");
}
State::Live(ref mut attrs)
| State::Recycled(ref mut attrs)
| State::Tombstone(ref mut attrs) => {
let cv = vs_cid![change_cid.clone()];
let _ = attrs.insert(AttrString::from("last_modified_cid"), cv);
}
};
new_state
pub fn change_ava(&mut self, cid: &Cid, attr: &str) {
match &mut self.st {
State::Live { ref mut changes } => {
if let Some(change) = changes.get_mut(attr) {
// Update the cid.
if change != cid {
*change = cid.clone()
}
} else {
changes.insert(attr.into(), cid.clone());
}
Err(previous_state) => {
warn!("rejecting invalid change {:?}", change_cid);
rejected_cid.push(change_cid.clone());
previous_state
}
};
}
State::Tombstone { .. } => {
assert!(false)
}
}
// Return the eattrs state.
Ok((replay_state, rejected_cid))
}
#[instrument(
level = "trace",
name = "verify",
skip(self, _schema, expected_attrs, results)
)]
pub fn tombstone(&mut self, cid: &Cid) {
match &mut self.st {
State::Live { changes: _ } => self.st = State::Tombstone { at: cid.clone() },
State::Tombstone { .. } => {} // no-op
};
}
pub fn can_delete(&self, cid: &Cid) -> bool {
match &self.st {
State::Live { .. } => false,
State::Tombstone { at } => at < cid,
}
}
pub fn is_live(&self) -> bool {
match &self.st {
State::Live { .. } => true,
State::Tombstone { .. } => false,
}
}
pub fn contains_tail_cid(&self, cid: &Cid) -> bool {
// This is slow? Is it needed?
match &self.st {
State::Live { changes } => changes.values().any(|change| change == cid),
State::Tombstone { at } => at == cid,
}
}
pub fn cid_iter(&self) -> Vec<&Cid> {
match &self.st {
State::Live { changes } => {
let mut v: Vec<_> = changes.values().collect();
v.sort_unstable();
v.dedup();
v
}
State::Tombstone { at } => vec![at],
}
}
pub fn retain<F>(&mut self, f: F)
where
F: FnMut(&AttrString, &mut Cid) -> bool,
{
match &mut self.st {
State::Live { changes } => changes.retain(f),
State::Tombstone { .. } => {}
}
}
#[instrument(level = "trace", name = "verify", skip_all)]
pub fn verify(
&self,
_schema: &dyn SchemaTransaction,
schema: &dyn SchemaTransaction,
expected_attrs: &Eattrs,
entry_id: u64,
results: &mut Vec<Result<(), ConsistencyError>>,
) {
// We need to be able to take any anchor entry, and replay that when all changes
// are applied we get the *same entry* as the current state.
debug_assert!(results.is_empty());
let class = expected_attrs.get("class");
let is_ts = class
.as_ref()
.map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue))
.unwrap_or(false);
// For each anchor (we only needs it's change id.)
for cid in self.anchors.keys() {
match self.replay(Included(cid), Unbounded) {
Ok((entry_state, rejected)) => {
trace!(?rejected);
match (&self.st, is_ts) {
(State::Live { changes }, false) => {
// Check that all attrs from expected, have a value in our changes.
let inconsistent: Vec<_> = expected_attrs
.keys()
.filter(|attr| {
/*
* If the attribute is a replicated attribute, and it is NOT present
* in the change state then we are in a desync state.
*
* However, we don't check the inverse - if an entry is in the change state
* but is NOT replicated by schema. This is because there is is a way to
* delete an attribute in schema which will then prevent future replications
* of that value. However the value, while not being updated, will retain
* a state entry in the change state.
*
* For the entry to then be replicated once more, it would require it's schema
* attributes to be re-added and then the replication will resume from whatever
* receives the changes first. Generally there are lots of desync and edge
* cases here, which is why we pretty much don't allow schema to be deleted
* but we have to handle it here due to a test case that simulates this.
*/
let desync = schema.is_replicated(attr) && !changes.contains_key(*attr);
if desync {
debug!(%entry_id, %attr, %desync);
}
desync
})
.collect();
match entry_state {
State::Live(attrs) | State::Recycled(attrs) | State::Tombstone(attrs) => {
if compare_attrs(&attrs, expected_attrs) {
// valid
trace!("changelog is synchronised");
} else {
// ruh-roh.
warn!("changelog has desynchronised!");
debug!(?attrs);
debug!(?expected_attrs);
debug_assert!(false);
results
.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)));
}
}
State::NonExistent => {
warn!("entry does not exist - changelog is corrupted?!");
results.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)))
}
}
}
Err(e) => {
error!(?e);
if inconsistent.is_empty() {
trace!("changestate is synchronised");
} else {
warn!("changestate has desynchronised! Missing state attrs {inconsistent:?}");
results.push(Err(ConsistencyError::ChangeStateDesynchronised(entry_id)));
}
}
(State::Tombstone { .. }, true) => {
trace!("changestate is synchronised");
}
(State::Live { .. }, true) => {
warn!("changestate has desynchronised! State Live when tombstone is true");
results.push(Err(ConsistencyError::ChangeStateDesynchronised(entry_id)));
}
(State::Tombstone { .. }, false) => {
warn!("changestate has desynchronised! State Tombstone when tombstone is false");
results.push(Err(ConsistencyError::ChangeStateDesynchronised(entry_id)));
}
}
debug_assert!(results.is_empty());
}
}
pub fn contains_tail_cid(&self, cid: &Cid) -> bool {
if let Some(tail_cid) = self.changes.keys().next_back() {
if tail_cid == cid {
return true;
impl PartialEq for EntryChangeState {
fn eq(&self, rhs: &Self) -> bool {
match (&self.st, &rhs.st) {
(
State::Live {
changes: changes_left,
},
State::Live {
changes: changes_right,
},
) => changes_left.eq(changes_right),
(State::Tombstone { at: at_left }, State::Tombstone { at: at_right }) => {
at_left.eq(at_right)
}
};
false
}
pub fn can_delete(&self) -> bool {
// Changelog should be empty.
// should have a current anchor state of tombstone.
self.changes.is_empty()
&& matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
}
pub fn is_live(&self) -> bool {
!matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
}
pub fn cid_iter(&self) -> Keys<Cid, Change> {
self.changes.keys()
}
/*
fn insert_anchor(&mut self, cid: Cid, entry_state: State) {
// When we insert an anchor, we have to remove all subsequent anchors (but not
// the preceding ones.)
let _ = self.anchors.split_off(&cid);
self.anchors.insert(cid.clone(), entry_state);
}
*/
pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> {
// Build a new anchor that is equal or less than this cid.
// In other words, the cid we are trimming to, should be remaining
// in the CL, and we should have an anchor that precedes it.
let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| {
error!(?e);
e
})?;
trace!(?rejected);
// Add the entry_state as an anchor. Use the CID we just
// trimmed to.
// insert_anchor will remove anything to the right, we also need to
// remove everything to the left, so just clear.
self.anchors.clear();
self.anchors.insert(cid.clone(), entry_state);
// And now split the CL.
let mut right = self.changes.split_off(cid);
std::mem::swap(&mut right, &mut self.changes);
// We can trace what we drop later?
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::time::Duration;
use crate::entry::Eattrs;
// use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::repl::entry::{Change, EntryChangelog, State, Transition};
use crate::schema::{Schema, SchemaTransaction};
#[test]
fn test_entrychangelog_basic() {
run_entrychangelog_test!(|schema: &dyn SchemaTransaction| {
let cid = Cid::new_random_s_d(Duration::from_secs(1));
let eattrs = Eattrs::new();
let eclog = EntryChangelog::new(cid, eattrs, schema);
trace!(?eclog);
})
}
#[test]
fn test_entrychangelog_state_transitions() {
// Test that all our transitions are defined and work as
// expected.
assert!(State::NonExistent
.apply_change(&Change { s: vec![] })
.is_ok());
assert!(State::NonExistent
.apply_change(&Change {
s: vec![Transition::Create(Eattrs::new())]
})
.is_ok());
assert!(State::Live(Eattrs::new())
.apply_change(&Change { s: vec![] })
.is_ok());
assert!(State::Live(Eattrs::new())
.apply_change(&Change {
s: vec![Transition::Create(Eattrs::new())]
})
.is_err());
(_, _) => false,
}
}
}

View file

@ -2,5 +2,9 @@ pub mod cid;
pub mod entry;
pub mod ruv;
pub mod consumer;
pub mod proto;
pub mod supplier;
#[cfg(test)]
mod tests;

View file

@ -0,0 +1,497 @@
use super::cid::Cid;
use super::entry::EntryChangeState;
use super::entry::State;
use crate::entry::Eattrs;
use crate::prelude::*;
use crate::schema::{SchemaReadTransaction, SchemaTransaction};
use crate::valueset;
use base64urlsafedata::Base64UrlSafeData;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
use webauthn_rs::prelude::{
DeviceKey as DeviceKeyV4, Passkey as PasskeyV4, SecurityKey as SecurityKeyV4,
};
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplCidV1 {
#[serde(rename = "t")]
pub ts: Duration,
#[serde(rename = "s")]
pub s_uuid: Uuid,
}
// From / Into CID
impl From<&Cid> for ReplCidV1 {
fn from(cid: &Cid) -> Self {
ReplCidV1 {
ts: cid.ts,
s_uuid: cid.s_uuid,
}
}
}
impl From<ReplCidV1> for Cid {
fn from(cid: ReplCidV1) -> Self {
Cid {
ts: cid.ts,
s_uuid: cid.s_uuid,
}
}
}
impl From<&ReplCidV1> for Cid {
fn from(cid: &ReplCidV1) -> Self {
Cid {
ts: cid.ts,
s_uuid: cid.s_uuid,
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
pub struct ReplAddressV1 {
#[serde(rename = "f")]
pub formatted: String,
#[serde(rename = "s")]
pub street_address: String,
#[serde(rename = "l")]
pub locality: String,
#[serde(rename = "r")]
pub region: String,
#[serde(rename = "p")]
pub postal_code: String,
#[serde(rename = "c")]
pub country: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub enum ReplTotpAlgoV1 {
S1,
S256,
S512,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplTotpV1 {
pub key: Base64UrlSafeData,
pub step: u64,
pub algo: ReplTotpAlgoV1,
pub digits: u8,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub enum ReplPasswordV1 {
PBKDF2 {
cost: usize,
salt: Base64UrlSafeData,
hash: Base64UrlSafeData,
},
PBKDF2_SHA1 {
cost: usize,
salt: Base64UrlSafeData,
hash: Base64UrlSafeData,
},
PBKDF2_SHA512 {
cost: usize,
salt: Base64UrlSafeData,
hash: Base64UrlSafeData,
},
SSHA512 {
salt: Base64UrlSafeData,
hash: Base64UrlSafeData,
},
NT_MD4 {
hash: Base64UrlSafeData,
},
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplBackupCodeV1 {
pub codes: BTreeSet<String>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub enum ReplCredV1 {
TmpWn {
tag: String,
set: Vec<ReplPasskeyV4V1>,
},
Password {
tag: String,
password: ReplPasswordV1,
uuid: Uuid,
},
GenPassword {
tag: String,
password: ReplPasswordV1,
uuid: Uuid,
},
PasswordMfa {
tag: String,
password: ReplPasswordV1,
totp: Vec<(String, ReplTotpV1)>,
backup_code: Option<ReplBackupCodeV1>,
webauthn: Vec<ReplSecurityKeyV4V1>,
uuid: Uuid,
},
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub enum ReplIntentTokenV1 {
Valid {
token_id: String,
max_ttl: Duration,
},
InProgress {
token_id: String,
max_ttl: Duration,
session_id: Uuid,
session_ttl: Duration,
},
Consumed {
token_id: String,
max_ttl: Duration,
},
}
#[derive(Serialize, Deserialize, Debug)]
pub struct ReplSecurityKeyV4V1 {
pub tag: String,
pub key: SecurityKeyV4,
}
impl Eq for ReplSecurityKeyV4V1 {}
impl PartialEq for ReplSecurityKeyV4V1 {
fn eq(&self, other: &Self) -> bool {
self.key.cred_id() == other.key.cred_id()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ReplPasskeyV4V1 {
pub uuid: Uuid,
pub tag: String,
pub key: PasskeyV4,
}
impl Eq for ReplPasskeyV4V1 {}
impl PartialEq for ReplPasskeyV4V1 {
fn eq(&self, other: &Self) -> bool {
self.uuid == other.uuid && self.key.cred_id() == other.key.cred_id()
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ReplDeviceKeyV4V1 {
pub uuid: Uuid,
pub tag: String,
pub key: DeviceKeyV4,
}
impl Eq for ReplDeviceKeyV4V1 {}
impl PartialEq for ReplDeviceKeyV4V1 {
fn eq(&self, other: &Self) -> bool {
self.uuid == other.uuid && self.key.cred_id() == other.key.cred_id()
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplOauthScopeMapV1 {
pub refer: Uuid,
pub data: BTreeSet<String>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplOauth2SessionV1 {
pub refer: Uuid,
pub parent: Uuid,
pub expiry: Option<String>,
pub issued_at: String,
pub rs_uuid: Uuid,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Default)]
pub enum ReplAccessScopeV1 {
IdentityOnly,
#[default]
ReadOnly,
ReadWrite,
Synchronise,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub enum ReplIdentityIdV1 {
Internal,
Uuid(Uuid),
Synch(Uuid),
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplSessionV1 {
pub refer: Uuid,
pub label: String,
pub expiry: Option<String>,
pub issued_at: String,
pub issued_by: ReplIdentityIdV1,
pub scope: ReplAccessScopeV1,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub enum ReplAttrV1 {
Address {
set: Vec<ReplAddressV1>,
},
EmailAddress {
primary: String,
set: Vec<String>,
},
PublicBinary {
set: Vec<(String, Base64UrlSafeData)>,
},
PrivateBinary {
set: Vec<Base64UrlSafeData>,
},
Bool {
set: Vec<bool>,
},
Cid {
set: Vec<ReplCidV1>,
},
Credential {
set: Vec<ReplCredV1>,
},
IntentToken {
set: Vec<ReplIntentTokenV1>,
},
Passkey {
set: Vec<ReplPasskeyV4V1>,
},
DeviceKey {
set: Vec<ReplDeviceKeyV4V1>,
},
DateTime {
set: Vec<String>,
},
Iname {
set: Vec<String>,
},
IndexType {
set: Vec<u16>,
},
Iutf8 {
set: Vec<String>,
},
JsonFilter {
set: Vec<String>,
},
JwsKeyEs256 {
set: Vec<Base64UrlSafeData>,
},
JwsKeyRs256 {
set: Vec<Base64UrlSafeData>,
},
NsUniqueId {
set: Vec<String>,
},
SecretValue {
set: Vec<String>,
},
RestrictedString {
set: Vec<String>,
},
Uint32 {
set: Vec<u32>,
},
Url {
set: Vec<Url>,
},
Utf8 {
set: Vec<String>,
},
Uuid {
set: Vec<Uuid>,
},
Reference {
set: Vec<Uuid>,
},
SyntaxType {
set: Vec<u16>,
},
Spn {
set: Vec<(String, String)>,
},
UiHint {
set: Vec<u16>,
},
SshKey {
set: Vec<(String, String)>,
},
OauthScope {
set: Vec<String>,
},
OauthScopeMap {
set: Vec<ReplOauthScopeMapV1>,
},
Oauth2Session {
set: Vec<ReplOauth2SessionV1>,
},
Session {
set: Vec<ReplSessionV1>,
},
TotpSecret {
set: Vec<(String, ReplTotpV1)>,
},
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct ReplAttrStateV1 {
cid: ReplCidV1,
attr: Option<ReplAttrV1>,
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub enum ReplStateV1 {
Live {
attrs: BTreeMap<String, ReplAttrStateV1>,
},
Tombstone {
at: ReplCidV1,
},
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
// I think partial entries should be separate? This clearly implies a refresh.
pub struct ReplEntryV1 {
uuid: Uuid,
// Change State
st: ReplStateV1,
}
impl ReplEntryV1 {
pub fn new(entry: &EntrySealedCommitted, schema: &SchemaReadTransaction) -> ReplEntryV1 {
let cs = entry.get_changestate();
let uuid = entry.get_uuid();
let st = match cs.current() {
State::Live { changes } => {
let live_attrs = entry.get_ava();
let attrs = changes
.iter()
.filter_map(|(attr_name, cid)| {
if schema.is_replicated(attr_name) {
let live_attr = live_attrs.get(attr_name.as_str());
let cid = cid.into();
let attr = live_attr.and_then(|maybe|
// There is a quirk in the way we currently handle certain
// types of adds/deletes that it may be possible to have an
// empty value set still in memory on a supplier. In the future
// we may make it so in memory valuesets can be empty and sent
// but for now, if it's an empty set in any capacity, we map
// to None and just send the Cid since they have the same result
// on how the entry/attr state looks at each end.
if maybe.len() > 0 {
Some(maybe.to_repl_v1())
} else {
None
}
);
Some((attr_name.to_string(), ReplAttrStateV1 { cid, attr }))
} else {
None
}
})
.collect();
ReplStateV1::Live { attrs }
}
State::Tombstone { at } => ReplStateV1::Tombstone { at: at.into() },
};
ReplEntryV1 { uuid, st }
}
pub fn rehydrate(&self) -> Result<(EntryChangeState, Eattrs), OperationError> {
match &self.st {
ReplStateV1::Live { attrs } => {
trace!("{:#?}", attrs);
// We need to build two sets, one for the Entry Change States, and one for the
// Eattrs.
let mut changes = BTreeMap::default();
let mut eattrs = Eattrs::default();
for (attr_name, ReplAttrStateV1 { cid, attr }) in attrs.iter() {
let astring: AttrString = attr_name.as_str().into();
let cid: Cid = cid.into();
if let Some(attr_value) = attr {
let v = valueset::from_repl_v1(attr_value).map_err(|e| {
error!("Unable to restore valueset for {}", attr_name);
e
})?;
if eattrs.insert(astring.clone(), v).is_some() {
error!(
"Impossible eattrs state, attribute {} appears to be duplicated!",
attr_name
);
return Err(OperationError::InvalidEntryState);
}
}
if changes.insert(astring, cid).is_some() {
error!(
"Impossible changes state, attribute {} appears to be duplicated!",
attr_name
);
return Err(OperationError::InvalidEntryState);
}
}
let ecstate = EntryChangeState {
st: State::Live { changes },
};
Ok((ecstate, eattrs))
}
ReplStateV1::Tombstone { at } => {
let at: Cid = at.into();
let mut eattrs = Eattrs::default();
let class_ava = vs_iutf8!["object", "tombstone"];
let last_mod_ava = vs_cid![at.clone()];
eattrs.insert(AttrString::from("uuid"), vs_uuid![self.uuid]);
eattrs.insert(AttrString::from("class"), class_ava);
eattrs.insert(AttrString::from("last_modified_cid"), last_mod_ava);
let ecstate = EntryChangeState {
st: State::Tombstone { at },
};
Ok((ecstate, eattrs))
}
}
}
}
// From / Into Entry
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum ReplRefreshContext {
V1 {
domain_version: DomainVersion,
domain_uuid: Uuid,
schema_entries: Vec<ReplEntryV1>,
meta_entries: Vec<ReplEntryV1>,
entries: Vec<ReplEntryV1>,
},
}

View file

@ -9,6 +9,7 @@ use kanidm_proto::v1::ConsistencyError;
use crate::prelude::*;
use crate::repl::cid::Cid;
use std::fmt;
pub struct ReplicationUpdateVector {
// This sorts by time. Should we look up by IDL or by UUID?
@ -42,6 +43,15 @@ pub struct ReplicationUpdateVectorWriteTransaction<'a> {
data: BptreeMapWriteTxn<'a, Cid, IDLBitRange>,
}
impl<'a> fmt::Debug for ReplicationUpdateVectorWriteTransaction<'a> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "RUV DUMP")?;
self.data
.iter()
.try_for_each(|(cid, idl)| writeln!(f, "* [{cid} {idl:?}]"))
}
}
pub struct ReplicationUpdateVectorReadTransaction<'a> {
data: BptreeMapReadTxn<'a, Cid, IDLBitRange>,
}
@ -59,10 +69,10 @@ pub trait ReplicationUpdateVectorTransaction {
for entry in entries {
// The DB id we need.
let eid = entry.get_id();
let eclog = entry.get_changelog();
let ecstate = entry.get_changestate();
// We don't need the details of the change - only the cid of the
// change that this entry was involved in.
for cid in eclog.cid_iter() {
for cid in ecstate.cid_iter() {
if let Some(idl) = check_ruv.get_mut(cid) {
// We can't guarantee id order, so we have to do this properly.
idl.insert_id(eid);
@ -91,7 +101,15 @@ pub trait ReplicationUpdateVectorTransaction {
while let (Some((ck, cv)), Some((sk, sv))) = (&check_next, &snap_next) {
match ck.cmp(sk) {
Ordering::Equal => {
if cv == sv {
// Counter intuitive, but here we check that the check set is a *subset*
// of the ruv snapshot. This is because when we have an entry that is
// tombstoned, all it's CID interactions are "lost" and it's cid becomes
// that of when it was tombstoned. So the "rebuilt" ruv will miss that
// entry.
//
// In the future the RUV concept may be ditched entirely anyway, thoughts needed.
let intersect = *cv & *sv;
if *cv == &intersect {
trace!("{:?} is consistent!", ck);
} else {
admin_warn!("{:?} is NOT consistent! IDL's differ", ck);
@ -102,15 +120,17 @@ pub trait ReplicationUpdateVectorTransaction {
snap_next = snap_iter.next();
}
Ordering::Less => {
// Due to deletes, it can be that the check ruv is missing whole entries
// in a rebuild.
admin_warn!("{:?} is NOT consistent! CID missing from RUV", ck);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
// debug_assert!(false);
// results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
check_next = check_iter.next();
}
Ordering::Greater => {
admin_warn!("{:?} is NOT consistent! CID should not exist in RUV", sk);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string())));
// debug_assert!(false);
// results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string())));
snap_next = snap_iter.next();
}
}
@ -118,15 +138,15 @@ pub trait ReplicationUpdateVectorTransaction {
while let Some((ck, _cv)) = &check_next {
admin_warn!("{:?} is NOT consistent! CID missing from RUV", ck);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
// debug_assert!(false);
// results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
check_next = check_iter.next();
}
while let Some((sk, _sv)) = &snap_next {
admin_warn!("{:?} is NOT consistent! CID should not exist in RUV", sk);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string())));
// debug_assert!(false);
// results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string())));
snap_next = snap_iter.next();
}
@ -162,10 +182,10 @@ impl<'a> ReplicationUpdateVectorWriteTransaction<'a> {
for entry in entries {
// The DB id we need.
let eid = entry.get_id();
let eclog = entry.get_changelog();
let ecstate = entry.get_changestate();
// We don't need the details of the change - only the cid of the
// change that this entry was involved in.
for cid in eclog.cid_iter() {
for cid in ecstate.cid_iter() {
if let Some(idl) = rebuild_ruv.get_mut(cid) {
// We can't guarantee id order, so we have to do this properly.
idl.insert_id(eid);

View file

@ -0,0 +1,101 @@
use super::proto::{ReplEntryV1, ReplRefreshContext};
use crate::prelude::*;
impl<'a> QueryServerReadTransaction<'a> {
// Given a consumers state, calculate the differential of changes they
// need to be sent to bring them to the equivalent state.
// We use the RUV or Cookie to determine if:
// * The consumer requires a full-reinit.
// * Which entry attr-states need to be sent, if any
#[instrument(level = "debug", skip_all)]
pub fn supplier_provide_changes(&mut self) -> Result<(), OperationError> {
Ok(())
}
#[instrument(level = "debug", skip_all)]
pub fn supplier_provide_refresh(&mut self) -> Result<ReplRefreshContext, OperationError> {
// Get the current schema. We use this for attribute and entry filtering.
let schema = self.get_schema();
// A refresh must provide
//
// * the current domain version
let domain_version = self.d_info.d_vers;
let domain_uuid = self.d_info.d_uuid;
// * the domain uuid
// * the set of schema entries
// * the set of non-schema entries
// - We must exclude certain entries and attributes!
// * schema defines what we exclude!
let schema_filter = filter!(f_or!([
f_eq("class", PVCLASS_ATTRIBUTETYPE.clone()),
f_eq("class", PVCLASS_CLASSTYPE.clone()),
]));
let meta_filter = filter!(f_or!([
f_eq("uuid", PVUUID_DOMAIN_INFO.clone()),
f_eq("uuid", PVUUID_SYSTEM_INFO.clone()),
f_eq("uuid", PVUUID_SYSTEM_CONFIG.clone()),
]));
let entry_filter = filter!(f_and!([
f_pres("class"),
f_andnot(f_or(vec![
// These are from above!
f_eq("class", PVCLASS_ATTRIBUTETYPE.clone()),
f_eq("class", PVCLASS_CLASSTYPE.clone()),
f_eq("uuid", PVUUID_DOMAIN_INFO.clone()),
f_eq("uuid", PVUUID_SYSTEM_INFO.clone()),
f_eq("uuid", PVUUID_SYSTEM_CONFIG.clone()),
])),
]));
let schema_entries = self
.internal_search(schema_filter)
.map(|ent| {
ent.into_iter()
.map(|e| ReplEntryV1::new(e.as_ref(), schema))
.collect()
})
.map_err(|e| {
error!("Failed to access schema entries");
e
})?;
let meta_entries = self
.internal_search(meta_filter)
.map(|ent| {
ent.into_iter()
.map(|e| ReplEntryV1::new(e.as_ref(), schema))
.collect()
})
.map_err(|e| {
error!("Failed to access meta entries");
e
})?;
let entries = self
.internal_search(entry_filter)
.map(|ent| {
ent.into_iter()
.map(|e| ReplEntryV1::new(e.as_ref(), schema))
.collect()
})
.map_err(|e| {
error!("Failed to access entries");
e
})?;
Ok(ReplRefreshContext::V1 {
domain_version,
domain_uuid,
schema_entries,
meta_entries,
entries,
})
}
}

View file

@ -1,6 +1,99 @@
// use crate::prelude::*;
use crate::prelude::*;
use std::collections::BTreeMap;
#[tokio::test]
async fn multiple_qs_setup() {
assert!(true);
#[qs_pair_test]
async fn test_repl_refresh_basic(server_a: &QueryServer, server_b: &QueryServer) {
// Rebuild / refresh the content of server a with the content from b.
// To ensure we have a spectrum of content, we do some setup here such as creating
// tombstones.
let mut server_a_txn = server_a.write(duration_from_epoch_now()).await;
let mut server_b_txn = server_b.read().await;
// First, build the refresh context.
let refresh_context = server_b_txn
.supplier_provide_refresh()
.expect("Failed to build refresh");
// Verify content of the refresh
// eprintln!("{:#?}", refresh_context);
// Apply it to the server
assert!(server_a_txn
.consumer_apply_refresh(&refresh_context)
.and_then(|_| server_a_txn.commit())
.is_ok());
// Verify the content of server_a and server_b are identical.
let mut server_a_txn = server_a.read().await;
// Need same d_uuid
assert_eq!(
server_a_txn.get_domain_uuid(),
server_b_txn.get_domain_uuid()
);
let domain_entry_a = server_a_txn
.internal_search_uuid(UUID_DOMAIN_INFO)
.expect("Failed to access domain info");
let domain_entry_b = server_b_txn
.internal_search_uuid(UUID_DOMAIN_INFO)
.expect("Failed to access domain info");
// Same d_vers / domain info.
assert_eq!(domain_entry_a, domain_entry_b);
trace!("a {:#?}", domain_entry_a.get_changestate());
trace!("b {:#?}", domain_entry_b.get_changestate());
// Compare that their change states are identical too.
assert_eq!(
domain_entry_a.get_changestate(),
domain_entry_b.get_changestate()
);
// There is some metadata here we should also consider testing such as key
// reloads? These are done at the IDM level, but this is QS level, so do we need to change
// these tests? Or should they be separate repl tests later?
assert_eq!(*server_a_txn.d_info, *server_b_txn.d_info);
// Now assert everything else in the db matches.
let entries_a = server_a_txn
.internal_search(filter_all!(f_pres("class")))
.map(|ents| {
ents.into_iter()
.map(|e| (e.get_uuid(), e))
.collect::<BTreeMap<_, _>>()
})
.expect("Failed to access all entries");
let entries_b = server_a_txn
.internal_search(filter_all!(f_pres("class")))
.map(|ents| {
ents.into_iter()
.map(|e| (e.get_uuid(), e))
.collect::<BTreeMap<_, _>>()
})
.expect("Failed to access all entries");
// Basically do a select * then put into btreemaps and compare them all.
// Need to have the same length!
assert_eq!(entries_a.len(), entries_b.len());
// We don't use the uuid-keys here since these are compared internally, they are
// just to sort the two sets.
std::iter::zip(entries_a.values(), entries_b.values()).for_each(|(ent_a, ent_b)| {
assert_eq!(ent_a, ent_b);
assert_eq!(ent_a.get_changestate(), ent_b.get_changestate());
});
// Done! The entry content are identical as are their replication metadata. We are good
// to go!
// Both servers will be post-test validated.
}

View file

@ -90,6 +90,7 @@ pub struct SchemaAttribute {
pub unique: bool,
pub phantom: bool,
pub sync_allowed: bool,
pub replicated: bool,
pub index: Vec<IndexType>,
pub syntax: SyntaxType,
}
@ -136,10 +137,15 @@ impl SchemaAttribute {
admin_error!("missing unique - {}", name);
OperationError::InvalidSchemaState("missing unique".to_string())
})?;
let phantom = value.get_ava_single_bool("phantom").unwrap_or(false);
let sync_allowed = value.get_ava_single_bool("sync_allowed").unwrap_or(false);
// Default, all attributes are replicated unless you opt in for them to NOT be.
// Generally this is internal to the server only, so we don't advertise it.
let replicated = value.get_ava_single_bool("replicated").unwrap_or(true);
// index vec
// even if empty, it SHOULD be present ... (is that valid to put an empty set?)
// The get_ava_opt_index handles the optional case for us :)
@ -161,6 +167,7 @@ impl SchemaAttribute {
unique,
phantom,
sync_allowed,
replicated,
index,
syntax,
})
@ -486,6 +493,22 @@ pub trait SchemaTransaction {
res
}
fn is_replicated(&self, attr: &str) -> bool {
match self.get_attributes().get(attr) {
Some(a_schema) => {
// We'll likely add more conditions here later.
!(a_schema.phantom || !a_schema.replicated)
}
None => {
warn!(
"Attribute {} was not found in schema during replication request",
attr
);
false
}
}
}
fn is_multivalue(&self, attr: &str) -> Result<bool, SchemaError> {
match self.get_attributes().get(attr) {
Some(a_schema) => Ok(a_schema.multivalue),
@ -672,6 +695,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality, IndexType::Presence],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -688,6 +712,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality, IndexType::Presence],
syntax: SyntaxType::Uuid,
},
@ -704,6 +729,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Cid,
},
@ -718,6 +744,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: true,
phantom: false,
sync_allowed: true,
replicated: true,
index: vec![IndexType::Equality, IndexType::Presence],
syntax: SyntaxType::Utf8StringIname,
},
@ -734,6 +761,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: true,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::SecurityPrincipalName,
},
@ -748,6 +776,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: true,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -762,6 +791,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: true,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -776,6 +806,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: true,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8String,
},
@ -788,6 +819,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Boolean,
});
@ -799,6 +831,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Boolean,
});
@ -810,6 +843,19 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Boolean,
});
self.attributes.insert(AttrString::from("replicated"), SchemaAttribute {
name: AttrString::from("replicated"),
uuid: UUID_SCHEMA_ATTR_REPLICATED,
description: String::from("If true, this attribute or class can by replicated between nodes in the topology"),
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Boolean,
});
@ -825,6 +871,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Boolean,
},
@ -841,6 +888,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::IndexId,
},
@ -857,6 +905,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::SyntaxId,
},
@ -873,6 +922,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -889,6 +939,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -905,6 +956,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -921,6 +973,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -936,7 +989,8 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -952,7 +1006,8 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -969,6 +1024,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -984,7 +1040,8 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1001,7 +1058,8 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Boolean,
},
@ -1019,6 +1077,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality, IndexType::SubString],
syntax: SyntaxType::JsonFilter,
},
@ -1035,6 +1094,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1052,6 +1112,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality, IndexType::SubString],
syntax: SyntaxType::JsonFilter,
},
@ -1068,6 +1129,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1082,6 +1144,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1098,6 +1161,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1115,6 +1179,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1131,6 +1196,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1144,7 +1210,8 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1160,6 +1227,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1174,6 +1242,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1188,6 +1257,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: true,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1205,6 +1275,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Uint32,
},
@ -1220,6 +1291,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringIname,
},
@ -1236,6 +1308,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1252,6 +1325,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1270,6 +1344,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: true,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1286,6 +1361,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1300,6 +1376,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: false,
sync_allowed: false,
replicated: true,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1315,6 +1392,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: true,
replicated: false,
index: vec![],
syntax: SyntaxType::Utf8String,
},
@ -1330,6 +1408,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: true,
replicated: false,
index: vec![],
syntax: SyntaxType::TotpSecret,
},
@ -1346,6 +1425,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1360,6 +1440,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1374,6 +1455,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::Uuid,
},
@ -1388,6 +1470,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1402,6 +1485,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::Utf8StringIname,
},
@ -1416,6 +1500,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::SshKey,
},
@ -1430,6 +1515,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::SshKey,
},
@ -1444,6 +1530,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::EmailAddress,
},
@ -1458,6 +1545,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::EmailAddress,
},
@ -1472,6 +1560,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::EmailAddress,
},
@ -1486,6 +1575,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::EmailAddress,
},
@ -1500,6 +1590,7 @@ impl<'a> SchemaWriteTransaction<'a> {
unique: false,
phantom: true,
sync_allowed: false,
replicated: false,
index: vec![],
syntax: SyntaxType::Uint32,
},
@ -1513,6 +1604,7 @@ impl<'a> SchemaWriteTransaction<'a> {
uuid: UUID_SCHEMA_CLASS_ATTRIBUTETYPE,
description: String::from("Definition of a schema attribute"),
systemmay: vec![
AttrString::from("replicated"),
AttrString::from("phantom"),
AttrString::from("sync_allowed"),
AttrString::from("index"),

View file

@ -2429,7 +2429,7 @@ mod tests {
// Test allowed to create
test_acp_create!(&ce_admin, vec![acp.clone()], &r1_set, true);
// Test Fails due to protected from sync object
test_acp_create!(&ce_admin, vec![acp.clone()], &r2_set, false);
test_acp_create!(&ce_admin, vec![acp], &r2_set, false);
}
#[test]
@ -2601,6 +2601,6 @@ mod tests {
// Test reject rem
test_acp_modify!(&me_rem, vec![acp_allow.clone()], &r2_set, false);
// Test reject purge
test_acp_modify!(&me_purge, vec![acp_allow.clone()], &r2_set, false);
test_acp_modify!(&me_purge, vec![acp_allow], &r2_set, false);
}
}

View file

@ -67,7 +67,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Now, normalise AND validate!
let res: Result<Vec<Entry<EntrySealed, EntryNew>>, OperationError> = candidates
let norm_cand = candidates
.into_iter()
.map(|e| {
e.validate(&self.schema)
@ -80,9 +80,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
e.seal(&self.schema)
})
})
.collect();
let norm_cand: Vec<Entry<_, _>> = res?;
.collect::<Result<Vec<EntrySealedNew>, _>>()?;
// Run any pre-create plugins now with schema validated entries.
// This is important for normalisation of certain types IE class

View file

@ -79,8 +79,8 @@ impl QueryServer {
admin_debug!(?system_info_version);
if system_info_version > 0 {
if system_info_version <= 6 {
error!("Your instance of Kanidm is version 1.1.0-alpha.9 or lower, and you are trying to perform a skip upgrade. This will not work.");
if system_info_version <= 9 {
error!("Your instance of Kanidm is version 1.1.0-alpha.10 or lower, and you are trying to perform a skip upgrade. This will not work.");
error!("You need to upgrade one version at a time to ensure upgrade migrations are performed in the correct order.");
return Err(OperationError::InvalidState);
}
@ -103,6 +103,10 @@ impl QueryServer {
ts_write_3.commit()
})?;
// Here is where in the future we will need to apply domain version increments.
// The actually migrations are done in a transaction though, this just needs to
// bump the version in it's own transaction.
admin_debug!("Database version check and migrations success! ☀️ ");
Ok(())
}

View file

@ -55,18 +55,19 @@ enum ServerPhase {
Running,
}
#[derive(Debug, Clone)]
struct DomainInfo {
d_uuid: Uuid,
d_name: String,
d_display: String,
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DomainInfo {
pub(crate) d_uuid: Uuid,
pub(crate) d_name: String,
pub(crate) d_display: String,
pub(crate) d_vers: DomainVersion,
}
#[derive(Clone)]
pub struct QueryServer {
phase: Arc<CowCell<ServerPhase>>,
s_uuid: Uuid,
d_info: Arc<CowCell<DomainInfo>>,
pub(crate) d_info: Arc<CowCell<DomainInfo>>,
be: Backend,
schema: Arc<Schema>,
accesscontrols: Arc<AccessControls>,
@ -81,7 +82,7 @@ pub struct QueryServerReadTransaction<'a> {
be_txn: BackendReadTransaction<'a>,
// Anything else? In the future, we'll need to have a schema transaction
// type, maybe others?
d_info: CowCellReadTxn<DomainInfo>,
pub(crate) d_info: CowCellReadTxn<DomainInfo>,
schema: SchemaReadTransaction,
accesscontrols: AccessControlsReadTransaction<'a>,
_db_ticket: SemaphorePermit<'a>,
@ -99,18 +100,18 @@ pub struct QueryServerWriteTransaction<'a> {
d_info: CowCellWriteTxn<'a, DomainInfo>,
curtime: Duration,
cid: Cid,
be_txn: BackendWriteTransaction<'a>,
schema: SchemaWriteTransaction<'a>,
pub(crate) be_txn: BackendWriteTransaction<'a>,
pub(crate) schema: SchemaWriteTransaction<'a>,
accesscontrols: AccessControlsWriteTransaction<'a>,
// We store a set of flags that indicate we need a reload of
// schema or acp, which is tested by checking the classes of the
// changing content.
changed_schema: bool,
changed_acp: bool,
changed_oauth2: bool,
changed_domain: bool,
pub(crate) changed_schema: bool,
pub(crate) changed_acp: bool,
pub(crate) changed_oauth2: bool,
pub(crate) changed_domain: bool,
// Store the list of changed uuids for other invalidation needs?
changed_uuid: HashSet<Uuid>,
pub(crate) changed_uuid: HashSet<Uuid>,
_db_ticket: SemaphorePermit<'a>,
_write_ticket: SemaphorePermit<'a>,
resolve_filter_cache:
@ -642,7 +643,7 @@ pub trait QueryServerTransaction<'a> {
Some(v) => v.to_proto_string_clone(),
None => uuid_to_proto_string(*u),
};
Ok(format!("{}: {:?}", u, m))
Ok(format!("{u}: {m:?}"))
})
.collect();
v
@ -663,7 +664,7 @@ pub trait QueryServerTransaction<'a> {
.copied()
.map(|ur| {
let rdn = self.uuid_to_rdn(ur)?;
Ok(format!("{},{}", rdn, basedn).into_bytes())
Ok(format!("{rdn},{basedn}").into_bytes())
})
.collect();
v
@ -926,6 +927,9 @@ impl QueryServer {
let d_info = Arc::new(CowCell::new(DomainInfo {
d_uuid,
// Start with our minimum supported level.
// This will be reloaded from the DB shortly :)
d_vers: DOMAIN_MIN_LEVEL,
d_name: domain_name.clone(),
// we set the domain_display_name to the configuration file's domain_name
// here because the database is not started, so we cannot pull it from there.
@ -1009,7 +1013,7 @@ impl QueryServer {
let ts_max = be_txn
.get_db_ts_max(curtime)
.expect("Unable to get db_ts_max");
let cid = Cid::new_lamport(self.s_uuid, d_info.d_uuid, curtime, &ts_max);
let cid = Cid::new_lamport(self.s_uuid, curtime, &ts_max);
QueryServerWriteTransaction {
// I think this is *not* needed, because commit is mut self which should
@ -1054,7 +1058,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
}
#[instrument(level = "debug", name = "reload_schema", skip(self))]
fn reload_schema(&mut self) -> Result<(), OperationError> {
pub(crate) fn reload_schema(&mut self) -> Result<(), OperationError> {
// supply entries to the writable schema to reload from.
// find all attributes.
let filt = filter!(f_eq("class", PVCLASS_ATTRIBUTETYPE.clone()));
@ -1266,10 +1270,19 @@ impl<'a> QueryServerWriteTransaction<'a> {
/// Pulls the domain name from the database and updates the DomainInfo data in memory
#[instrument(level = "debug", skip_all)]
fn reload_domain_info(&mut self) -> Result<(), OperationError> {
pub(crate) fn reload_domain_info(&mut self) -> Result<(), OperationError> {
let domain_name = self.get_db_domain_name()?;
let display_name = self.get_db_domain_display_name()?;
let domain_uuid = self.be_txn.get_db_d_uuid();
let mut_d_info = self.d_info.get_mut();
if mut_d_info.d_uuid != domain_uuid {
admin_warn!(
"Using domain uuid from the database {} - was {} in memory",
domain_name,
mut_d_info.d_name,
);
mut_d_info.d_uuid = domain_uuid;
}
if mut_d_info.d_name != domain_name {
admin_warn!(
"Using domain name from the database {} - was {} in memory",
@ -1401,23 +1414,18 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Write the cid to the db. If this fails, we can't assume replication
// will be stable, so return if it fails.
be_txn.set_db_ts_max(cid.ts)?;
// Validate the schema as we just loaded it.
let r = schema.validate();
if r.is_empty() {
// Schema has been validated, so we can go ahead and commit it with the be
// because both are consistent.
schema
.commit()
.map(|_| d_info.commit())
.map(|_| phase.commit())
.map(|_| dyngroup_cache.commit())
.and_then(|_| accesscontrols.commit())
.and_then(|_| be_txn.commit())
} else {
Err(OperationError::ConsistencyError(r))
}
// Audit done
// Point of no return - everything has been validated and reloaded.
//
// = Lets commit =
schema
.commit()
.map(|_| d_info.commit())
.map(|_| phase.commit())
.map(|_| dyngroup_cache.commit())
.and_then(|_| accesscontrols.commit())
.and_then(|_| be_txn.commit())
}
}
@ -1510,7 +1518,7 @@ mod tests {
assert!(r1 == Ok(None));
// Name does exist
let r3 = server_txn.uuid_to_spn(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"));
println!("{:?}", r3);
println!("{r3:?}");
assert!(r3.unwrap().unwrap() == Value::new_spn_str("testperson1", "example.com"));
// Name is not syntax normalised (but exists)
let r4 = server_txn.uuid_to_spn(uuid!("CC8E95B4-C24F-4D68-BA54-8BED76F63930"));
@ -1543,7 +1551,7 @@ mod tests {
assert!(r1.unwrap() == "uuid=bae3f507-e6c3-44ba-ad01-f8ff1083534a");
// Name does exist
let r3 = server_txn.uuid_to_rdn(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"));
println!("{:?}", r3);
println!("{r3:?}");
assert!(r3.unwrap() == "spn=testperson1@example.com");
// Uuid is not syntax normalised (but exists)
let r4 = server_txn.uuid_to_rdn(uuid!("CC8E95B4-C24F-4D68-BA54-8BED76F63930"));

View file

@ -684,7 +684,7 @@ impl PartialValue {
| PartialValue::SshKey(tag) => tag.to_string(),
// This will never match as we never index radius creds! See generate_idx_eq_keys
PartialValue::SecretValue | PartialValue::PrivateBinary => "_".to_string(),
PartialValue::Spn(name, realm) => format!("{}@{}", name, realm),
PartialValue::Spn(name, realm) => format!("{name}@{realm}"),
PartialValue::Uint32(u) => u.to_string(),
// This will never work, we don't allow equality searching on Cid's
PartialValue::Cid(_) => "_".to_string(),
@ -1503,10 +1503,10 @@ impl Value {
let fp = spk.fingerprint();
format!("{}: {}", tag, fp.hash)
}
Err(_) => format!("{}: corrupted ssh public key", tag),
Err(_) => format!("{tag}: corrupted ssh public key"),
}
}
Value::Spn(n, r) => format!("{}@{}", n, r),
Value::Spn(n, r) => format!("{n}@{r}"),
_ => unreachable!(),
}
}

View file

@ -4,6 +4,7 @@ use smolset::SmolSet;
use crate::be::dbvalue::DbValueAddressV1;
use crate::prelude::*;
use crate::repl::proto::{ReplAddressV1, ReplAttrV1};
use crate::schema::SchemaAttribute;
use crate::value::Address;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -49,6 +50,33 @@ impl ValueSetAddress {
.collect();
Ok(Box::new(ValueSetAddress { set }))
}
pub fn from_repl_v1(data: &[ReplAddressV1]) -> Result<ValueSet, OperationError> {
let set = data
.iter()
.cloned()
.map(
|ReplAddressV1 {
formatted,
street_address,
locality,
region,
postal_code,
country,
}| {
Address {
formatted,
street_address,
locality,
region,
postal_code,
country,
}
},
)
.collect();
Ok(Box::new(ValueSetAddress { set }))
}
}
impl FromIterator<Address> for Option<Box<ValueSetAddress>> {
@ -142,6 +170,23 @@ impl ValueSetT for ValueSetAddress {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Address {
set: self
.set
.iter()
.map(|a| ReplAddressV1 {
formatted: a.formatted.clone(),
street_address: a.street_address.clone(),
locality: a.locality.clone(),
region: a.region.clone(),
postal_code: a.postal_code.clone(),
country: a.country.clone(),
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(
self.set
@ -217,6 +262,19 @@ impl ValueSetEmailAddress {
}
}
pub fn from_repl_v1(primary: &String, data: &[String]) -> Result<ValueSet, OperationError> {
let set: BTreeSet<_> = data.iter().cloned().collect();
if set.contains(primary) {
Ok(Box::new(ValueSetEmailAddress {
primary: primary.clone(),
set,
}))
} else {
Err(OperationError::InvalidValueState)
}
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -337,6 +395,13 @@ impl ValueSetT for ValueSetEmailAddress {
DbValueSetV2::EmailAddress(self.primary.clone(), self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::EmailAddress {
primary: self.primary.clone(),
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::EmailAddress))
}

View file

@ -1,9 +1,11 @@
use base64urlsafedata::Base64UrlSafeData;
use std::collections::btree_map::Entry as BTreeEntry;
use std::collections::BTreeMap;
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -28,6 +30,11 @@ impl ValueSetPrivateBinary {
Ok(Box::new(ValueSetPrivateBinary { set }))
}
pub fn from_repl_v1(data: &[Base64UrlSafeData]) -> Result<ValueSet, OperationError> {
let set = data.iter().map(|b| b.0.clone()).collect();
Ok(Box::new(ValueSetPrivateBinary { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and vec is foreign
#[allow(clippy::should_implement_trait)]
@ -95,6 +102,12 @@ impl ValueSetT for ValueSetPrivateBinary {
DbValueSetV2::PrivateBinary(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::PrivateBinary {
set: self.set.iter().cloned().map(|b| b.into()).collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(
self.set
@ -160,6 +173,11 @@ impl ValueSetPublicBinary {
Ok(Box::new(ValueSetPublicBinary { map }))
}
pub fn from_repl_v1(data: &[(String, Base64UrlSafeData)]) -> Result<ValueSet, OperationError> {
let map = data.iter().map(|(k, v)| (k.clone(), v.0.clone())).collect();
Ok(Box::new(ValueSetPublicBinary { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -243,6 +261,16 @@ impl ValueSetT for ValueSetPublicBinary {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::PublicBinary {
set: self
.map
.iter()
.map(|(tag, bin)| (tag.clone(), bin.clone().into()))
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::PublicBinary))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetBool {
Ok(Box::new(ValueSetBool { set }))
}
pub fn from_repl_v1(data: &[bool]) -> Result<ValueSet, OperationError> {
let set = data.iter().copied().collect();
Ok(Box::new(ValueSetBool { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and bool is foreign.
#[allow(clippy::should_implement_trait)]
@ -101,6 +107,12 @@ impl ValueSetT for ValueSetBool {
DbValueSetV2::Bool(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Bool {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::new_bool))
}

View file

@ -3,6 +3,7 @@ use smolset::SmolSet;
use crate::be::dbvalue::DbCidV1;
use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::repl::proto::{ReplAttrV1, ReplCidV1};
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -26,13 +27,17 @@ impl ValueSetCid {
let set = data
.into_iter()
.map(|dc| Cid {
d_uuid: dc.domain_id,
s_uuid: dc.server_id,
ts: dc.timestamp,
})
.collect();
Ok(Box::new(ValueSetCid { set }))
}
pub fn from_repl_v1(data: &[ReplCidV1]) -> Result<ValueSet, OperationError> {
let set = data.iter().map(|dc| dc.into()).collect();
Ok(Box::new(ValueSetCid { set }))
}
}
impl FromIterator<Cid> for Option<Box<ValueSetCid>> {
@ -105,11 +110,7 @@ impl ValueSetT for ValueSetCid {
}
fn to_proto_string_clone_iter(&self) -> Box<dyn Iterator<Item = String> + '_> {
Box::new(
self.set
.iter()
.map(|c| format!("{:?}_{}_{}", c.ts, c.d_uuid, c.s_uuid)),
)
Box::new(self.set.iter().map(|c| format!("{:?}_{}", c.ts, c.s_uuid)))
}
fn to_db_valueset_v2(&self) -> DbValueSetV2 {
@ -117,7 +118,6 @@ impl ValueSetT for ValueSetCid {
self.set
.iter()
.map(|c| DbCidV1 {
domain_id: c.d_uuid,
server_id: c.s_uuid,
timestamp: c.ts,
})
@ -125,6 +125,12 @@ impl ValueSetT for ValueSetCid {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Cid {
set: self.set.iter().map(|c| c.into()).collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::new_cid))
}

View file

@ -8,6 +8,9 @@ use crate::be::dbvalue::{
};
use crate::credential::Credential;
use crate::prelude::*;
use crate::repl::proto::{
ReplAttrV1, ReplCredV1, ReplDeviceKeyV4V1, ReplIntentTokenV1, ReplPasskeyV4V1,
};
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, IntentTokenState, ValueSet};
@ -40,6 +43,16 @@ impl ValueSetCredential {
Ok(Box::new(ValueSetCredential { map }))
}
pub fn from_repl_v1(data: &[ReplCredV1]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.map(|dc| {
Credential::try_from_repl_v1(dc).map_err(|()| OperationError::InvalidValueState)
})
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetCredential { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -125,6 +138,16 @@ impl ValueSetT for ValueSetCredential {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Credential {
set: self
.map
.iter()
.map(|(tag, cred)| cred.to_repl_v1(tag.clone()))
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::Cred))
}
@ -214,6 +237,36 @@ impl ValueSetIntentToken {
Ok(Box::new(ValueSetIntentToken { map }))
}
pub fn from_repl_v1(data: &[ReplIntentTokenV1]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.map(|dits| match dits {
ReplIntentTokenV1::Valid { token_id, max_ttl } => (
token_id.clone(),
IntentTokenState::Valid { max_ttl: *max_ttl },
),
ReplIntentTokenV1::InProgress {
token_id,
max_ttl,
session_id,
session_ttl,
} => (
token_id.clone(),
IntentTokenState::InProgress {
max_ttl: *max_ttl,
session_id: *session_id,
session_ttl: *session_ttl,
},
),
ReplIntentTokenV1::Consumed { token_id, max_ttl } => (
token_id.clone(),
IntentTokenState::Consumed { max_ttl: *max_ttl },
),
})
.collect();
Ok(Box::new(ValueSetIntentToken { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -317,6 +370,35 @@ impl ValueSetT for ValueSetIntentToken {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::IntentToken {
set: self
.map
.iter()
.map(|(u, s)| match s {
IntentTokenState::Valid { max_ttl } => ReplIntentTokenV1::Valid {
token_id: u.clone(),
max_ttl: *max_ttl,
},
IntentTokenState::InProgress {
max_ttl,
session_id,
session_ttl,
} => ReplIntentTokenV1::InProgress {
token_id: u.clone(),
max_ttl: *max_ttl,
session_id: *session_id,
session_ttl: *session_ttl,
},
IntentTokenState::Consumed { max_ttl } => ReplIntentTokenV1::Consumed {
token_id: u.clone(),
max_ttl: *max_ttl,
},
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::IntentToken))
}
@ -378,6 +460,17 @@ impl ValueSetPasskey {
Ok(Box::new(ValueSetPasskey { map }))
}
pub fn from_repl_v1(data: &[ReplPasskeyV4V1]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.cloned()
.map(|k| match k {
ReplPasskeyV4V1 { uuid, tag, key } => Ok((uuid, (tag, key))),
})
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetPasskey { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -467,6 +560,20 @@ impl ValueSetT for ValueSetPasskey {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Passkey {
set: self
.map
.iter()
.map(|(u, (t, k))| ReplPasskeyV4V1 {
uuid: *u,
tag: t.clone(),
key: k.clone(),
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::Passkey))
}
@ -537,6 +644,17 @@ impl ValueSetDeviceKey {
Ok(Box::new(ValueSetDeviceKey { map }))
}
pub fn from_repl_v1(data: &[ReplDeviceKeyV4V1]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.cloned()
.map(|k| match k {
ReplDeviceKeyV4V1 { uuid, tag, key } => Ok((uuid, (tag, key))),
})
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetDeviceKey { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -626,6 +744,20 @@ impl ValueSetT for ValueSetDeviceKey {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::DeviceKey {
set: self
.map
.iter()
.map(|(u, (t, k))| ReplDeviceKeyV4V1 {
uuid: *u,
tag: t.clone(),
key: k.clone(),
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().copied().map(PartialValue::DeviceKey))
}

View file

@ -2,6 +2,7 @@ use smolset::SmolSet;
use time::OffsetDateTime;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -33,6 +34,18 @@ impl ValueSetDateTime {
Ok(Box::new(ValueSetDateTime { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data
.iter()
.map(|s| {
OffsetDateTime::parse(s, time::Format::Rfc3339)
.map(|odt| odt.to_offset(time::UtcOffset::UTC))
.map_err(|_| OperationError::InvalidValueState)
})
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetDateTime { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and offset date time is foreign
#[allow(clippy::should_implement_trait)]
@ -123,6 +136,19 @@ impl ValueSetT for ValueSetDateTime {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::DateTime {
set: self
.set
.iter()
.map(|odt| {
debug_assert!(odt.offset() == time::UtcOffset::UTC);
odt.format(time::Format::Rfc3339)
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::DateTime))
}

View file

@ -1,6 +1,7 @@
use std::collections::BTreeSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetIname {
Ok(Box::new(ValueSetIname { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetIname { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and str is foreign
#[allow(clippy::should_implement_trait)]
@ -107,6 +113,12 @@ impl ValueSetT for ValueSetIname {
DbValueSetV2::Iname(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Iname {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().map(|i| PartialValue::new_iname(i.as_str())))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -26,6 +27,12 @@ impl ValueSetIndex {
Ok(Box::new(ValueSetIndex { set }))
}
pub fn from_repl_v1(data: &[u16]) -> Result<ValueSet, OperationError> {
let set: Result<_, _> = data.iter().copied().map(IndexType::try_from).collect();
let set = set.map_err(|_| OperationError::InvalidValueState)?;
Ok(Box::new(ValueSetIndex { set }))
}
// We need to allow this, because there seems to be a bug using it fromiterator in entry.rs
#[allow(clippy::should_implement_trait)]
pub fn from_iter<T>(iter: T) -> Option<Box<ValueSetIndex>>
@ -101,6 +108,12 @@ impl ValueSetT for ValueSetIndex {
DbValueSetV2::IndexType(self.set.iter().map(|s| *s as u16).collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::IndexType {
set: self.set.iter().map(|s| *s as u16).collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::Index))
}

View file

@ -2,6 +2,7 @@ use std::collections::BTreeSet;
use super::iname::ValueSetIname;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -26,6 +27,11 @@ impl ValueSetIutf8 {
Ok(Box::new(ValueSetIutf8 { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetIutf8 { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and str is foreign.
#[allow(clippy::should_implement_trait)]
@ -108,6 +114,12 @@ impl ValueSetT for ValueSetIutf8 {
DbValueSetV2::Iutf8(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Iutf8 {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().map(|i| PartialValue::new_iutf8(i.as_str())))
}

View file

@ -2,6 +2,7 @@ use kanidm_proto::v1::Filter as ProtoFilter;
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -21,10 +22,18 @@ impl ValueSetJsonFilter {
self.set.insert(b)
}
pub fn from_dbvs2(data: Vec<String>) -> Result<ValueSet, OperationError> {
pub fn from_dbvs2(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data
.into_iter()
.map(|s| serde_json::from_str(&s).map_err(|_| OperationError::SerdeJsonError))
.iter()
.map(|s| serde_json::from_str(s).map_err(|_| OperationError::SerdeJsonError))
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetJsonFilter { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data
.iter()
.map(|s| serde_json::from_str(s).map_err(|_| OperationError::SerdeJsonError))
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetJsonFilter { set }))
}
@ -123,6 +132,20 @@ impl ValueSetT for ValueSetJsonFilter {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::JsonFilter {
set: self
.set
.iter()
.map(|s| {
#[allow(clippy::expect_used)]
serde_json::to_string(s)
.expect("A json filter value was corrupted during run-time")
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::JsonFilt))
}

View file

@ -1,7 +1,9 @@
use base64urlsafedata::Base64UrlSafeData;
use compact_jwt::{JwaAlg, JwsSigner};
use hashbrown::HashSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -36,6 +38,19 @@ impl ValueSetJwsKeyEs256 {
Ok(Box::new(ValueSetJwsKeyEs256 { set }))
}
pub fn from_repl_v1(data: &[Base64UrlSafeData]) -> Result<ValueSet, OperationError> {
let set = data
.iter()
.map(|b| {
JwsSigner::from_es256_der(b.0.as_slice()).map_err(|e| {
debug!(?e, "Error occurred parsing ES256 DER");
OperationError::InvalidValueState
})
})
.collect::<Result<HashSet<_>, _>>()?;
Ok(Box::new(ValueSetJwsKeyEs256 { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and jwssigner is foreign
#[allow(clippy::should_implement_trait)]
@ -109,12 +124,24 @@ impl ValueSetT for ValueSetJwsKeyEs256 {
fn to_db_valueset_v2(&self) -> DbValueSetV2 {
DbValueSetV2::JwsKeyEs256(self.set.iter()
.filter_map(|k| k.private_key_to_der()
.map_err(|e| {
error!(?e, "Unable to process private key to der, likely corrupted - this key will be LOST");
})
.ok())
.collect())
.map(|k| {
#[allow(clippy::expect_used)]
k.private_key_to_der()
.expect("Unable to process private key to der, likely corrupted. You must restore from backup.")
})
.collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::JwsKeyEs256 { set: self.set.iter()
.map(|k| {
#[allow(clippy::expect_used)]
k.private_key_to_der()
.expect("Unable to process private key to der, likely corrupted. You must restore from backup.")
})
.map(|b| b.into())
.collect()
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
@ -192,6 +219,19 @@ impl ValueSetJwsKeyRs256 {
Ok(Box::new(ValueSetJwsKeyRs256 { set }))
}
pub fn from_repl_v1(data: &[Base64UrlSafeData]) -> Result<ValueSet, OperationError> {
let set = data
.iter()
.map(|b| {
JwsSigner::from_rs256_der(b.0.as_slice()).map_err(|e| {
debug!(?e, "Error occurred parsing RS256 DER");
OperationError::InvalidValueState
})
})
.collect::<Result<HashSet<_>, _>>()?;
Ok(Box::new(ValueSetJwsKeyRs256 { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and jwssigner is foreign
#[allow(clippy::should_implement_trait)]
@ -265,12 +305,24 @@ impl ValueSetT for ValueSetJwsKeyRs256 {
fn to_db_valueset_v2(&self) -> DbValueSetV2 {
DbValueSetV2::JwsKeyRs256(self.set.iter()
.filter_map(|k| k.private_key_to_der()
.map_err(|e| {
error!(?e, "Unable to process private key to der, likely corrupted - this key will be LOST");
})
.ok())
.collect())
.map(|k| {
#[allow(clippy::expect_used)]
k.private_key_to_der()
.expect("Unable to process private key to der, likely corrupted. You must restore from backup.")
})
.collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::JwsKeyRs256 { set: self.set.iter()
.map(|k| {
#[allow(clippy::expect_used)]
k.private_key_to_der()
.expect("Unable to process private key to der, likely corrupted. You must restore from backup.")
})
.map(|b| b.into())
.collect()
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {

View file

@ -14,7 +14,7 @@ use webauthn_rs::prelude::Passkey as PasskeyV4;
use crate::be::dbvalue::DbValueSetV2;
use crate::credential::{totp::Totp, Credential};
use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::repl::{cid::Cid, proto::ReplAttrV1};
use crate::schema::SchemaAttribute;
use crate::value::{Address, IntentTokenState, Oauth2Session, Session};
@ -107,6 +107,8 @@ pub trait ValueSetT: std::fmt::Debug + DynClone {
fn to_db_valueset_v2(&self) -> DbValueSetV2;
fn to_repl_v1(&self) -> ReplAttrV1;
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_>;
fn to_value_iter(&self) -> Box<dyn Iterator<Item = Value> + '_>;
@ -659,7 +661,7 @@ pub fn from_db_valueset_v2(dbvs: DbValueSetV2) -> Result<ValueSet, OperationErro
DbValueSetV2::RestrictedString(set) => ValueSetRestricted::from_dbvs2(set),
DbValueSetV2::Spn(set) => ValueSetSpn::from_dbvs2(set),
DbValueSetV2::Cid(set) => ValueSetCid::from_dbvs2(set),
DbValueSetV2::JsonFilter(set) => ValueSetJsonFilter::from_dbvs2(set),
DbValueSetV2::JsonFilter(set) => ValueSetJsonFilter::from_dbvs2(&set),
DbValueSetV2::NsUniqueId(set) => ValueSetNsUniqueId::from_dbvs2(set),
DbValueSetV2::Url(set) => ValueSetUrl::from_dbvs2(set),
DbValueSetV2::DateTime(set) => ValueSetDateTime::from_dbvs2(set),
@ -686,3 +688,44 @@ pub fn from_db_valueset_v2(dbvs: DbValueSetV2) -> Result<ValueSet, OperationErro
}
}
}
pub fn from_repl_v1(rv1: &ReplAttrV1) -> Result<ValueSet, OperationError> {
match rv1 {
ReplAttrV1::Iutf8 { set } => ValueSetIutf8::from_repl_v1(set),
ReplAttrV1::Utf8 { set } => ValueSetUtf8::from_repl_v1(set),
ReplAttrV1::IndexType { set } => ValueSetIndex::from_repl_v1(set),
ReplAttrV1::SyntaxType { set } => ValueSetSyntax::from_repl_v1(set),
ReplAttrV1::Cid { set } => ValueSetCid::from_repl_v1(set),
ReplAttrV1::Bool { set } => ValueSetBool::from_repl_v1(set),
ReplAttrV1::Uuid { set } => ValueSetUuid::from_repl_v1(set),
ReplAttrV1::Uint32 { set } => ValueSetUint32::from_repl_v1(set),
ReplAttrV1::Iname { set } => ValueSetIname::from_repl_v1(set),
ReplAttrV1::PrivateBinary { set } => ValueSetPrivateBinary::from_repl_v1(set),
ReplAttrV1::SecretValue { set } => ValueSetSecret::from_repl_v1(set),
ReplAttrV1::Reference { set } => ValueSetRefer::from_repl_v1(set),
ReplAttrV1::JwsKeyEs256 { set } => ValueSetJwsKeyEs256::from_repl_v1(set),
ReplAttrV1::JwsKeyRs256 { set } => ValueSetJwsKeyRs256::from_repl_v1(set),
ReplAttrV1::Spn { set } => ValueSetSpn::from_repl_v1(set),
ReplAttrV1::JsonFilter { set } => ValueSetJsonFilter::from_repl_v1(set),
ReplAttrV1::UiHint { set } => ValueSetUiHint::from_repl_v1(set),
ReplAttrV1::Address { set } => ValueSetAddress::from_repl_v1(set),
ReplAttrV1::EmailAddress { primary, set } => {
ValueSetEmailAddress::from_repl_v1(primary, set)
}
ReplAttrV1::PublicBinary { set } => ValueSetPublicBinary::from_repl_v1(set),
ReplAttrV1::Credential { set } => ValueSetCredential::from_repl_v1(set),
ReplAttrV1::IntentToken { set } => ValueSetIntentToken::from_repl_v1(set),
ReplAttrV1::Passkey { set } => ValueSetPasskey::from_repl_v1(set),
ReplAttrV1::DeviceKey { set } => ValueSetDeviceKey::from_repl_v1(set),
ReplAttrV1::DateTime { set } => ValueSetDateTime::from_repl_v1(set),
ReplAttrV1::Url { set } => ValueSetUrl::from_repl_v1(set),
ReplAttrV1::NsUniqueId { set } => ValueSetNsUniqueId::from_repl_v1(set),
ReplAttrV1::RestrictedString { set } => ValueSetRestricted::from_repl_v1(set),
ReplAttrV1::SshKey { set } => ValueSetSshKey::from_repl_v1(set),
ReplAttrV1::OauthScope { set } => ValueSetOauthScope::from_repl_v1(set),
ReplAttrV1::OauthScopeMap { set } => ValueSetOauthScopeMap::from_repl_v1(set),
ReplAttrV1::Oauth2Session { set } => ValueSetOauth2Session::from_repl_v1(set),
ReplAttrV1::Session { set } => ValueSetSession::from_repl_v1(set),
ReplAttrV1::TotpSecret { set } => ValueSetTotpSecret::from_repl_v1(set),
}
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::value::NSUNIQUEID_RE;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -26,6 +27,11 @@ impl ValueSetNsUniqueId {
Ok(Box::new(ValueSetNsUniqueId { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetNsUniqueId { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and String is foreign.
#[allow(clippy::should_implement_trait)]
@ -102,6 +108,12 @@ impl ValueSetT for ValueSetNsUniqueId {
DbValueSetV2::NsUniqueId(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::NsUniqueId {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::Nsuniqueid))
}

View file

@ -3,6 +3,7 @@ use std::collections::{BTreeMap, BTreeSet};
use crate::be::dbvalue::DbValueOauthScopeMapV1;
use crate::prelude::*;
use crate::repl::proto::{ReplAttrV1, ReplOauthScopeMapV1};
use crate::schema::SchemaAttribute;
use crate::value::OAUTHSCOPE_RE;
use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet};
@ -28,6 +29,11 @@ impl ValueSetOauthScope {
Ok(Box::new(ValueSetOauthScope { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetOauthScope { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and String is foreign.
#[allow(clippy::should_implement_trait)]
@ -104,6 +110,12 @@ impl ValueSetT for ValueSetOauthScope {
DbValueSetV2::OauthScope(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::OauthScope {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::OauthScope))
}
@ -168,11 +180,15 @@ impl ValueSetOauthScopeMap {
pub fn from_dbvs2(data: Vec<DbValueOauthScopeMapV1>) -> Result<ValueSet, OperationError> {
let map = data
.into_iter()
.map(|dbv| {
let u = dbv.refer;
let m = dbv.data.into_iter().collect();
(u, m)
})
.map(|DbValueOauthScopeMapV1 { refer, data }| (refer, data.into_iter().collect()))
.collect();
Ok(Box::new(ValueSetOauthScopeMap { map }))
}
pub fn from_repl_v1(data: &[ReplOauthScopeMapV1]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.map(|ReplOauthScopeMapV1 { refer, data }| (*refer, data.clone()))
.collect();
Ok(Box::new(ValueSetOauthScopeMap { map }))
}
@ -281,6 +297,19 @@ impl ValueSetT for ValueSetOauthScopeMap {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::OauthScopeMap {
set: self
.map
.iter()
.map(|(u, m)| ReplOauthScopeMapV1 {
refer: *u,
data: m.iter().cloned().collect(),
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::Refer))
}

View file

@ -1,6 +1,7 @@
use std::collections::BTreeSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetRestricted {
Ok(Box::new(ValueSetRestricted { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetRestricted { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and String is foreign.
#[allow(clippy::should_implement_trait)]
@ -111,6 +117,12 @@ impl ValueSetT for ValueSetRestricted {
DbValueSetV2::RestrictedString(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::RestrictedString {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::RestrictedString))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetSecret {
Ok(Box::new(ValueSetSecret { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetSecret { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and String is foreign.
#[allow(clippy::should_implement_trait)]
@ -92,6 +98,12 @@ impl ValueSetT for ValueSetSecret {
DbValueSetV2::SecretValue(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::SecretValue {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().map(|_| PartialValue::SecretValue))
}

View file

@ -7,6 +7,9 @@ use crate::be::dbvalue::{
DbValueAccessScopeV1, DbValueIdentityId, DbValueOauth2Session, DbValueSession,
};
use crate::prelude::*;
use crate::repl::proto::{
ReplAccessScopeV1, ReplAttrV1, ReplIdentityIdV1, ReplOauth2SessionV1, ReplSessionV1,
};
use crate::schema::SchemaAttribute;
use crate::value::{Oauth2Session, Session};
use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet};
@ -105,6 +108,83 @@ impl ValueSetSession {
Ok(Box::new(ValueSetSession { map }))
}
pub fn from_repl_v1(data: &[ReplSessionV1]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.filter_map(
|ReplSessionV1 {
refer,
label,
expiry,
issued_at,
issued_by,
scope,
}| {
// Convert things.
let issued_at = OffsetDateTime::parse(issued_at, time::Format::Rfc3339)
.map(|odt| odt.to_offset(time::UtcOffset::UTC))
.map_err(|e| {
admin_error!(
?e,
"Invalidating session {} due to invalid issued_at timestamp",
refer
)
})
.ok()?;
// This is a bit annoying. In the case we can't parse the optional
// expiry, we need to NOT return the session so that it's immediately
// invalidated. To do this we have to invert some of the options involved
// here.
let expiry = expiry
.as_ref()
.map(|e_inner| {
OffsetDateTime::parse(e_inner, time::Format::Rfc3339)
.map(|odt| odt.to_offset(time::UtcOffset::UTC))
// We now have an
// Option<Result<ODT, _>>
})
.transpose()
// Result<Option<ODT>, _>
.map_err(|e| {
admin_error!(
?e,
"Invalidating session {} due to invalid expiry timestamp",
refer
)
})
// Option<Option<ODT>>
.ok()?;
let issued_by = match issued_by {
ReplIdentityIdV1::Internal => IdentityId::Internal,
ReplIdentityIdV1::Uuid(u) => IdentityId::User(*u),
ReplIdentityIdV1::Synch(u) => IdentityId::Synch(*u),
};
let scope = match scope {
ReplAccessScopeV1::IdentityOnly => AccessScope::IdentityOnly,
ReplAccessScopeV1::ReadOnly => AccessScope::ReadOnly,
ReplAccessScopeV1::ReadWrite => AccessScope::ReadWrite,
ReplAccessScopeV1::Synchronise => AccessScope::Synchronise,
};
Some((
*refer,
Session {
label: label.to_string(),
expiry,
issued_at,
issued_by,
scope,
},
))
},
)
.collect();
Ok(Box::new(ValueSetSession { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -216,6 +296,38 @@ impl ValueSetT for ValueSetSession {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Session {
set: self
.map
.iter()
.map(|(u, m)| ReplSessionV1 {
refer: *u,
label: m.label.clone(),
expiry: m.expiry.map(|odt| {
debug_assert!(odt.offset() == time::UtcOffset::UTC);
odt.format(time::Format::Rfc3339)
}),
issued_at: {
debug_assert!(m.issued_at.offset() == time::UtcOffset::UTC);
m.issued_at.format(time::Format::Rfc3339)
},
issued_by: match m.issued_by {
IdentityId::Internal => ReplIdentityIdV1::Internal,
IdentityId::User(u) => ReplIdentityIdV1::Uuid(u),
IdentityId::Synch(u) => ReplIdentityIdV1::Synch(u),
},
scope: match m.scope {
AccessScope::IdentityOnly => ReplAccessScopeV1::IdentityOnly,
AccessScope::ReadOnly => ReplAccessScopeV1::ReadOnly,
AccessScope::ReadWrite => ReplAccessScopeV1::ReadWrite,
AccessScope::Synchronise => ReplAccessScopeV1::Synchronise,
},
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::Refer))
}
@ -345,6 +457,71 @@ impl ValueSetOauth2Session {
Ok(Box::new(ValueSetOauth2Session { map, rs_filter }))
}
pub fn from_repl_v1(data: &[ReplOauth2SessionV1]) -> Result<ValueSet, OperationError> {
let mut rs_filter = BTreeSet::new();
let map = data
.iter()
.filter_map(
|ReplOauth2SessionV1 {
refer,
parent,
expiry,
issued_at,
rs_uuid,
}| {
// Convert things.
let issued_at = OffsetDateTime::parse(issued_at, time::Format::Rfc3339)
.map(|odt| odt.to_offset(time::UtcOffset::UTC))
.map_err(|e| {
admin_error!(
?e,
"Invalidating session {} due to invalid issued_at timestamp",
refer
)
})
.ok()?;
// This is a bit annoying. In the case we can't parse the optional
// expiry, we need to NOT return the session so that it's immediately
// invalidated. To do this we have to invert some of the options involved
// here.
let expiry = expiry
.as_ref()
.map(|e_inner| {
OffsetDateTime::parse(e_inner, time::Format::Rfc3339)
.map(|odt| odt.to_offset(time::UtcOffset::UTC))
// We now have an
// Option<Result<ODT, _>>
})
.transpose()
// Result<Option<ODT>, _>
.map_err(|e| {
admin_error!(
?e,
"Invalidating session {} due to invalid expiry timestamp",
refer
)
})
// Option<Option<ODT>>
.ok()?;
// Insert to the rs_filter.
rs_filter.insert(*rs_uuid);
Some((
*refer,
Oauth2Session {
parent: *parent,
expiry,
issued_at,
rs_uuid: *rs_uuid,
},
))
},
)
.collect();
Ok(Box::new(ValueSetOauth2Session { rs_filter, map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -475,6 +652,28 @@ impl ValueSetT for ValueSetOauth2Session {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Oauth2Session {
set: self
.map
.iter()
.map(|(u, m)| ReplOauth2SessionV1 {
refer: *u,
parent: m.parent,
expiry: m.expiry.map(|odt| {
debug_assert!(odt.offset() == time::UtcOffset::UTC);
odt.format(time::Format::Rfc3339)
}),
issued_at: {
debug_assert!(m.issued_at.offset() == time::UtcOffset::UTC);
m.issued_at.format(time::Format::Rfc3339)
},
rs_uuid: m.rs_uuid,
})
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::Refer))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetSpn {
Ok(Box::new(ValueSetSpn { set }))
}
pub fn from_repl_v1(data: &[(String, String)]) -> Result<ValueSet, OperationError> {
let set = data.iter().map(|(a, b)| (a.clone(), b.clone())).collect();
Ok(Box::new(ValueSetSpn { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -82,10 +88,7 @@ impl ValueSetT for ValueSetSpn {
}
fn generate_idx_eq_keys(&self) -> Vec<String> {
self.set
.iter()
.map(|(n, d)| format!("{}@{}", n, d))
.collect()
self.set.iter().map(|(n, d)| format!("{n}@{d}")).collect()
}
fn syntax(&self) -> SyntaxType {
@ -97,13 +100,19 @@ impl ValueSetT for ValueSetSpn {
}
fn to_proto_string_clone_iter(&self) -> Box<dyn Iterator<Item = String> + '_> {
Box::new(self.set.iter().map(|(n, d)| format!("{}@{}", n, d)))
Box::new(self.set.iter().map(|(n, d)| format!("{n}@{d}")))
}
fn to_db_valueset_v2(&self) -> DbValueSetV2 {
DbValueSetV2::Spn(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Spn {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(
self.set

View file

@ -3,6 +3,7 @@ use std::collections::BTreeMap;
use crate::be::dbvalue::DbValueTaggedStringV1;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -27,6 +28,14 @@ impl ValueSetSshKey {
Ok(Box::new(ValueSetSshKey { map }))
}
pub fn from_repl_v1(data: &[(String, String)]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.map(|(tag, data)| (tag.clone(), data.clone()))
.collect();
Ok(Box::new(ValueSetSshKey { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -112,6 +121,16 @@ impl ValueSetT for ValueSetSshKey {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::SshKey {
set: self
.map
.iter()
.map(|(tag, key)| (tag.clone(), key.clone()))
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::SshKey))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,12 @@ impl ValueSetSyntax {
let set = set.map_err(|_| OperationError::InvalidValueState)?;
Ok(Box::new(ValueSetSyntax { set }))
}
pub fn from_repl_v1(data: &[u16]) -> Result<ValueSet, OperationError> {
let set: Result<_, _> = data.iter().copied().map(SyntaxType::try_from).collect();
let set = set.map_err(|_| OperationError::InvalidValueState)?;
Ok(Box::new(ValueSetSyntax { set }))
}
}
impl FromIterator<SyntaxType> for Option<Box<ValueSetSyntax>> {
@ -101,6 +108,12 @@ impl ValueSetT for ValueSetSyntax {
DbValueSetV2::SyntaxType(self.set.iter().map(|s| *s as u16).collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::SyntaxType {
set: self.set.iter().map(|s| *s as u16).collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::Syntax))
}

View file

@ -5,6 +5,7 @@ use crate::credential::totp::Totp;
use crate::prelude::*;
use crate::be::dbvalue::DbTotpV1;
use crate::repl::proto::{ReplAttrV1, ReplTotpV1};
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -36,6 +37,18 @@ impl ValueSetTotpSecret {
Ok(Box::new(ValueSetTotpSecret { map }))
}
pub fn from_repl_v1(data: &[(String, ReplTotpV1)]) -> Result<ValueSet, OperationError> {
let map = data
.iter()
.map(|(l, data)| {
Totp::try_from(data)
.map_err(|()| OperationError::InvalidValueState)
.map(|t| (l.clone(), t))
})
.collect::<Result<_, _>>()?;
Ok(Box::new(ValueSetTotpSecret { map }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and tuples are always foreign.
#[allow(clippy::should_implement_trait)]
@ -118,6 +131,16 @@ impl ValueSetT for ValueSetTotpSecret {
)
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::TotpSecret {
set: self
.map
.iter()
.map(|(label, totp)| (label.clone(), totp.to_repl_v1()))
.collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.map.keys().cloned().map(PartialValue::Utf8))
}

View file

@ -1,6 +1,7 @@
use std::collections::BTreeSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -27,6 +28,12 @@ impl ValueSetUiHint {
let set = set.map_err(|_| OperationError::InvalidValueState)?;
Ok(Box::new(ValueSetUiHint { set }))
}
pub fn from_repl_v1(data: &[u16]) -> Result<ValueSet, OperationError> {
let set: Result<_, _> = data.iter().copied().map(UiHint::try_from).collect();
let set = set.map_err(|_| OperationError::InvalidValueState)?;
Ok(Box::new(ValueSetUiHint { set }))
}
}
impl ValueSetT for ValueSetUiHint {
@ -90,6 +97,12 @@ impl ValueSetT for ValueSetUiHint {
DbValueSetV2::UiHint(self.set.iter().map(|u| *u as u16).collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::UiHint {
set: self.set.iter().map(|u| *u as u16).collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::UiHint))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetUint32 {
Ok(Box::new(ValueSetUint32 { set }))
}
pub fn from_repl_v1(data: &[u32]) -> Result<ValueSet, OperationError> {
let set = data.iter().copied().collect();
Ok(Box::new(ValueSetUint32 { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and u32 is foreign.
#[allow(clippy::should_implement_trait)]
@ -104,6 +110,12 @@ impl ValueSetT for ValueSetUint32 {
DbValueSetV2::Uint32(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Uint32 {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::new_uint32))
}

View file

@ -1,6 +1,7 @@
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -25,6 +26,11 @@ impl ValueSetUrl {
Ok(Box::new(ValueSetUrl { set }))
}
pub fn from_repl_v1(data: &[Url]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetUrl { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and Url is foreign.
#[allow(clippy::should_implement_trait)]
@ -98,6 +104,12 @@ impl ValueSetT for ValueSetUrl {
DbValueSetV2::Url(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Url {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().cloned().map(PartialValue::Url))
}

View file

@ -1,6 +1,7 @@
use std::collections::BTreeSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{DbValueSetV2, ValueSet};
@ -24,6 +25,11 @@ impl ValueSetUtf8 {
let set = data.into_iter().collect();
Ok(Box::new(ValueSetUtf8 { set }))
}
pub fn from_repl_v1(data: &[String]) -> Result<ValueSet, OperationError> {
let set = data.iter().cloned().collect();
Ok(Box::new(ValueSetUtf8 { set }))
}
}
impl ValueSetT for ValueSetUtf8 {
@ -93,6 +99,12 @@ impl ValueSetT for ValueSetUtf8 {
DbValueSetV2::Utf8(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Utf8 {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().map(|i| PartialValue::new_utf8s(i.as_str())))
}

View file

@ -3,6 +3,7 @@ use std::collections::BTreeSet;
use smolset::SmolSet;
use crate::prelude::*;
use crate::repl::proto::ReplAttrV1;
use crate::schema::SchemaAttribute;
use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet};
@ -27,6 +28,11 @@ impl ValueSetUuid {
Ok(Box::new(ValueSetUuid { set }))
}
pub fn from_repl_v1(data: &[Uuid]) -> Result<ValueSet, OperationError> {
let set = data.iter().copied().collect();
Ok(Box::new(ValueSetUuid { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and uuid is foreign.
#[allow(clippy::should_implement_trait)]
@ -106,6 +112,12 @@ impl ValueSetT for ValueSetUuid {
DbValueSetV2::Uuid(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Uuid {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::Uuid))
}
@ -172,6 +184,11 @@ impl ValueSetRefer {
Ok(Box::new(ValueSetRefer { set }))
}
pub fn from_repl_v1(data: &[Uuid]) -> Result<ValueSet, OperationError> {
let set = data.iter().copied().collect();
Ok(Box::new(ValueSetRefer { set }))
}
// We need to allow this, because rust doesn't allow us to impl FromIterator on foreign
// types, and uuid is foreign.
#[allow(clippy::should_implement_trait)]
@ -255,6 +272,12 @@ impl ValueSetT for ValueSetRefer {
DbValueSetV2::Reference(self.set.iter().cloned().collect())
}
fn to_repl_v1(&self) -> ReplAttrV1 {
ReplAttrV1::Reference {
set: self.set.iter().cloned().collect(),
}
}
fn to_partialvalue_iter(&self) -> Box<dyn Iterator<Item = PartialValue> + '_> {
Box::new(self.set.iter().copied().map(PartialValue::Refer))
}