20221123 iam migration work ()

Add the bulk of the needed parts for IAM migrations.
This commit is contained in:
Firstyear 2022-12-15 16:09:09 +10:00 committed by GitHub
parent c036f81d4e
commit e0e611f9df
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
49 changed files with 3716 additions and 718 deletions

4
Cargo.lock generated
View file

@ -2261,6 +2261,7 @@ dependencies = [
"tracing-subscriber",
"url",
"users",
"uuid",
]
[[package]]
@ -2292,6 +2293,7 @@ dependencies = [
"serde",
"serde_json",
"time 0.2.27",
"tracing",
"url",
"urlencoding",
"uuid",
@ -3761,7 +3763,7 @@ dependencies = [
[[package]]
name = "scim_proto"
version = "0.1.0"
source = "git+https://github.com/kanidm/scim.git#f7a9241bf413ac2e40cb974876d2b0433a866c74"
source = "git+https://github.com/kanidm/scim.git#cb147c80fb14dd87218698d3d5608f2abd8617d5"
dependencies = [
"base64urlsafedata",
"serde",

View file

@ -27,6 +27,7 @@ serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
toml.workspace = true
url = { workspace = true, features = ["serde"] }
uuid = { workspace = true, features = ["serde"] }
# For file metadata, should this me moved out?
kanidmd_lib.workspace = true

View file

@ -1,5 +1,7 @@
use serde::Deserialize;
use std::collections::HashMap;
use url::Url;
use uuid::Uuid;
#[derive(Debug, Deserialize)]
pub struct Config {
@ -9,4 +11,20 @@ pub struct Config {
pub ipa_sync_dn: String,
pub ipa_sync_pw: String,
pub ipa_sync_base_dn: String,
// pub entry: Option<Vec<EntryConfig>>,
#[serde(flatten)]
pub entry_map: HashMap<Uuid, EntryConfig>,
}
#[derive(Debug, Deserialize, Default, Clone)]
pub struct EntryConfig {
// uuid: Uuid,
// Default false
#[serde(default)]
pub exclude: bool,
// map_uuid: Option<Uuid>,
// map_external_id: Option<String>,
// map_name: Option<String>,
}

View file

@ -16,8 +16,9 @@ mod config;
#[cfg(test)]
mod tests;
use crate::config::Config;
use crate::config::{Config, EntryConfig};
use clap::Parser;
use std::collections::HashMap;
use std::fs::metadata;
use std::fs::File;
use std::io::Read;
@ -29,6 +30,7 @@ use tokio::runtime;
use tracing::{debug, error, info, warn};
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
use uuid::Uuid;
use kanidm_client::KanidmClientBuilder;
use kanidm_proto::scim_v1::{
@ -201,7 +203,13 @@ async fn driver_main(opt: Opt) {
// pre-process the entries.
// - > fn so we can test.
let scim_sync_request = match process_ipa_sync_result(scim_sync_status, sync_result).await {
let scim_sync_request = match process_ipa_sync_result(
scim_sync_status,
sync_result,
&sync_config.entry_map,
)
.await
{
Ok(ssr) => ssr,
Err(()) => return,
};
@ -212,13 +220,18 @@ async fn driver_main(opt: Opt) {
if let Err(e) = serde_json::to_writer_pretty(stdout, &scim_sync_request) {
error!(?e, "Failed to serialise scim sync request");
};
} else if opt.dry_run {
info!("dry-run complete");
info!("Success!");
} else {
if let Err(e) = rsclient.scim_v1_sync_update(&scim_sync_request).await {
error!(
?e,
"Failed to submit scim sync update - see the kanidmd server log for more details."
);
};
} else {
info!("Success!");
}
}
// done!
}
@ -226,6 +239,7 @@ async fn driver_main(opt: Opt) {
async fn process_ipa_sync_result(
from_state: ScimSyncState,
sync_result: LdapSyncRepl,
entry_config_map: &HashMap<Uuid, EntryConfig>,
) -> Result<ScimSyncRequest, ()> {
match sync_result {
LdapSyncRepl::Success {
@ -256,10 +270,16 @@ async fn process_ipa_sync_result(
// Future - make this par-map
let entries = entries
.into_iter()
.filter_map(|e| match ipa_to_scim_entry(e) {
Ok(Some(e)) => Some(Ok(e)),
Ok(None) => None,
Err(()) => Some(Err(())),
.filter_map(|e| {
let e_config = entry_config_map
.get(&e.entry_uuid)
.cloned()
.unwrap_or_default();
match ipa_to_scim_entry(e, &e_config) {
Ok(Some(e)) => Some(Ok(e)),
Ok(None) => None,
Err(()) => Some(Err(())),
}
})
.collect::<Result<Vec<_>, _>>();
@ -291,17 +311,27 @@ async fn process_ipa_sync_result(
}
}
fn ipa_to_scim_entry(sync_entry: LdapSyncReplEntry) -> Result<Option<ScimEntry>, ()> {
// TODO: Allow re-map of uuid -> uuid
fn ipa_to_scim_entry(
sync_entry: LdapSyncReplEntry,
entry_config: &EntryConfig,
) -> Result<Option<ScimEntry>, ()> {
debug!("{:#?}", sync_entry);
// Is this an entry we need to observe/look at?
// check the sync_entry state?
if sync_entry.state != LdapSyncStateValue::Add {
todo!();
unimplemented!();
}
let dn = sync_entry.entry.dn.clone();
// Is this an entry we need to observe/look at?
if entry_config.exclude {
info!("entry_config excludes {}", dn);
return Ok(None);
}
let oc = sync_entry.entry.attrs.get("objectclass").ok_or_else(|| {
error!("Invalid entry - no object class {}", dn);
})?;
@ -319,6 +349,12 @@ fn ipa_to_scim_entry(sync_entry: LdapSyncReplEntry) -> Result<Option<ScimEntry>,
error!("Missing required attribute uid");
})?;
// ⚠️ hardcoded skip on admin here!!!
if user_name == "admin" {
info!("kanidm excludes {}", dn);
return Ok(None);
}
let display_name = entry.remove_ava_single("cn").ok_or_else(|| {
error!("Missing required attribute cn");
})?;
@ -332,8 +368,9 @@ fn ipa_to_scim_entry(sync_entry: LdapSyncReplEntry) -> Result<Option<ScimEntry>,
})
.transpose()?;
let homedirectory = entry.remove_ava_single("homedirectory");
let password_import = entry.remove_ava_single("ipanthash");
let password_import = entry
.remove_ava_single("ipanthash")
.map(|s| format!("ipaNTHash: {}", s));
let login_shell = entry.remove_ava_single("loginshell");
let external_id = Some(entry.dn);
@ -344,7 +381,6 @@ fn ipa_to_scim_entry(sync_entry: LdapSyncReplEntry) -> Result<Option<ScimEntry>,
user_name,
display_name,
gidnumber,
homedirectory,
password_import,
login_shell,
}
@ -363,6 +399,12 @@ fn ipa_to_scim_entry(sync_entry: LdapSyncReplEntry) -> Result<Option<ScimEntry>,
error!("Missing required attribute cn");
})?;
// ⚠️ hardcoded skip on trust admins / editors / ipausers here!!!
if name == "trust admins" || name == "editors" || name == "ipausers" || name == "admins" {
info!("kanidm excludes {}", dn);
return Ok(None);
}
let description = entry.remove_ava_single("description");
let gidnumber = entry
@ -502,6 +544,4 @@ fn main() {
tracing::debug!("Using {} worker threads", par_count);
rt.block_on(async move { driver_main(opt).await });
info!("Success!");
}

View file

@ -18,14 +18,20 @@ pub struct Opt {
#[clap(parse(from_os_str), short, long, default_value_os_t = DEFAULT_IPA_CONFIG_PATH.into())]
pub ipa_sync_config: PathBuf,
#[clap(short, long, hide = true)]
/// Dump the ldap protocol inputs, as well as the scim outputs. This can be used
/// to create test cases for testing the parser.
///
/// No actions are taken on the kanidm instance, this is purely a dump of the
/// state in/out.
#[clap(short, long, hide = true)]
pub proto_dump: bool,
/// Read entries from ipa, and check the connection to kanidm, but take no actions against
/// kanidm that would change state.
#[clap(short = 'n')]
pub dry_run: bool,
/// Skip the root user permission check.
#[clap(short, long, hide = true)]
pub skip_root_check: bool,
}

View file

@ -1,5 +1,6 @@
use crate::process_ipa_sync_result;
use kanidm_proto::scim_v1::ScimSyncState;
use kanidm_proto::scim_v1::{ScimSyncRequest, ScimSyncState};
use std::collections::HashMap;
use ldap3_client::LdapSyncRepl;
@ -10,18 +11,28 @@ async fn test_ldap_to_scim() {
let sync_request: LdapSyncRepl =
serde_json::from_str(TEST_LDAP_SYNC_REPL_1).expect("failed to parse ldap sync");
let scim_sync_request = process_ipa_sync_result(ScimSyncState::Refresh, sync_request)
.await
.expect("failed to process ldap sync repl");
let expect_scim_request: ScimSyncRequest =
serde_json::from_str(TEST_SCIM_SYNC_REPL_1).expect("failed to parse scim sync");
assert!(matches!(
scim_sync_request.from_state,
ScimSyncState::Refresh
));
let entry_config_map = HashMap::default();
// need to setup a fake ldap sync result.
let scim_sync_request =
process_ipa_sync_result(ScimSyncState::Refresh, sync_request, &entry_config_map)
.await
.expect("failed to process ldap sync repl to scim");
// What do we expect?
println!(
"{}",
serde_json::to_string_pretty(&scim_sync_request).unwrap()
);
assert!(scim_sync_request.from_state == expect_scim_request.from_state);
assert!(scim_sync_request.to_state == expect_scim_request.to_state);
assert!(scim_sync_request.entries == expect_scim_request.entries);
assert!(scim_sync_request.delete_uuids == expect_scim_request.delete_uuids);
}
const TEST_LDAP_SYNC_REPL_1: &str = r#"
@ -421,3 +432,58 @@ const TEST_LDAP_SYNC_REPL_1: &str = r#"
}
}
"#;
const TEST_SCIM_SYNC_REPL_1: &str = r#"
{
"from_state": "Refresh",
"to_state": {
"Active": {
"cookie": "aXBhLXN5bmNyZXBsLWthbmkuZGV2LmJsYWNraGF0cy5uZXQuYXU6Mzg5I2NuPWRpcmVjdG9yeSBtYW5hZ2VyOmRjPWRldixkYz1ibGFja2hhdHMsZGM9bmV0LGRjPWF1Oih8KCYob2JqZWN0Q2xhc3M9cGVyc29uKShvYmplY3RDbGFzcz1pcGFudHVzZXJhdHRycykob2JqZWN0Q2xhc3M9cG9zaXhhY2NvdW50KSkoJihvYmplY3RDbGFzcz1ncm91cG9mbmFtZXMpKG9iamVjdENsYXNzPWlwYXVzZXJncm91cCkoIShvYmplY3RDbGFzcz1tZXBtYW5hZ2VkZW50cnkpKSkoJihvYmplY3RDbGFzcz1pcGF0b2tlbikob2JqZWN0Q2xhc3M9aXBhdG9rZW50b3RwKSkpIzEwOQ"
}
},
"entries": [
{
"schemas": [
"urn:ietf:params:scim:schemas:kanidm:1.0:person",
"urn:ietf:params:scim:schemas:kanidm:1.0:account",
"urn:ietf:params:scim:schemas:kanidm:1.0:posixaccount"
],
"id": "babb8302-43a1-11ed-a50d-919b4b1a5ec0",
"externalId": "uid=testuser,cn=users,cn=accounts,dc=dev,dc=blackhats,dc=net,dc=au",
"displayname": "Test User",
"gidnumber": 12345,
"loginshell": "/bin/sh",
"name": "testuser",
"password_import": "ipaNTHash: iEb36u6PsRetBr3YMLdYbA"
},
{
"schemas": [
"urn:ietf:params:scim:schemas:kanidm:1.0:group"
],
"id": "d547c581-5f26-11ed-a50d-919b4b1a5ec0",
"externalId": "cn=testgroup,cn=groups,cn=accounts,dc=dev,dc=blackhats,dc=net,dc=au",
"description": "Test group",
"name": "testgroup"
},
{
"schemas": [
"urn:ietf:params:scim:schemas:kanidm:1.0:group"
],
"id": "d547c583-5f26-11ed-a50d-919b4b1a5ec0",
"externalId": "cn=testexternal,cn=groups,cn=accounts,dc=dev,dc=blackhats,dc=net,dc=au",
"name": "testexternal"
},
{
"schemas": [
"urn:ietf:params:scim:schemas:kanidm:1.0:group",
"urn:ietf:params:scim:schemas:kanidm:1.0:posixgroup"
],
"id": "f90b0b81-5f26-11ed-a50d-919b4b1a5ec0",
"externalId": "cn=testposix,cn=groups,cn=accounts,dc=dev,dc=blackhats,dc=net,dc=au",
"gidnumber": 1234567,
"name": "testposix"
}
],
"delete_uuids": []
}
"#;

View file

@ -151,3 +151,95 @@ entries that Kanidm now has authority over will NOT be synced and will be highli
The administrator then needs to decide how to proceed with these conflicts determining which data
source is the authority on the information.
## Internal Batch Update Operation Phases
We have to consider in our batch updates that there are multiple stages of the update. This is because
we need to consider that at any point the lifecycle of a presented entry may change within a single
batch. Because of this, we have to treat the operation differently within kanidm to ensure a consistent outcome.
Additionally we have to "fail fast". This means that on any conflict the sync will abort and the administrator
must intervene.
To understand why we chose this, we have to look at what happens in a "soft fail" condition.
In this example we have an account named X and a group named Y. The group contains X as a member.
When we submit this for an initial sync, or after the account X is created, if we had a "soft" fail
during the import of the account, we would reject it from being added to Kanidm but would then continue
with the synchronisation. Then the group Y would be imported. Since the member pointing to X would
not be valid, it would be silently removed.
At this point we would have group Y imported, but it has no members and the account X would not
have been imported. The administrator may intervene and fix the account X to allow sync to proceed. However
this would not repair the missing group membership. To repair the group membership a change to group Y
would need to be triggered to also sync the group status.
Since the admin may not be aware of this, it would silently mean the membership is missing.
To avoid this, by "failing fast" if account X couldn't be imported for any reason, than we would
stop the whole sync process until it could be repaired. Then when repaired both the account X and
group Y would sync and the membership would be intact.
### Phase 1 - Validation of Update State
In this phase we need to assert that the batch operation can proceed and is consistent with the expectations
we have of the server's state.
Assert the token provided is valid, and contains the correct access requirements.
From this token, retrieve the related synchronisation entry.
Assert that the batch updates from and to state identifiers are consistent with the synchronisation
entry.
Retrieve the sync\_parent\_uuid from the sync entry.
Retrieve the sync\_authority value from the sync entry.
### Phase 2 - Entry Location, Creation and Authority
In this phase we are ensuring that all the entries within the operation are within the control of
this sync domain. We also ensure that entries we intend to act upon exist with our authority
markers such that the subsequent operations are all "modifications" rather than mixed create/modify
For each entry in the sync request, if an entry with that uuid exists retrieve it.
* If an entry exists in the database, assert that it's sync\_parent\_uuid is the same as our agreements.
* If there is no sync\_parent\_uuid or the sync\_parent\_uuid does not match, reject the operation.
* If no entry exists in the database, create a "stub" entry with our sync\_parent\_uuid
* Create the entry immediately, and then retrieve it.
### Phase 3 - Entry Assertion
Remove all attributes in the sync that are overlapped with our sync\_authority value.
For all uuids in the entry present set
Assert their attributes match what was synced in.
Resolve types that need resolving (name2uuid, externalid2uuid)
Write all
### Phase 4 - Entry Removal
For all uuids in the delete\_uuids set:
if their sync\_parent\_uuid matches ours, assert they are deleted (recycled).
### Phase 5 - Commit
Write the updated "state" from the request to\_state to our current state of the sync
Write an updated "authority" value to the agreement of what attributes we can change.
Commit the txn.

View file

@ -20,6 +20,7 @@ impl KanidmClient {
let mut new_acct = Entry {
attrs: BTreeMap::new(),
};
new_acct
.attrs
.insert("name".to_string(), vec![name.to_string()]);
@ -28,6 +29,7 @@ impl KanidmClient {
.attrs
.insert("description".to_string(), vec![description.to_string()]);
}
self.perform_post_request("/v1/sync_account", new_acct)
.await
}
@ -48,4 +50,17 @@ impl KanidmClient {
self.perform_delete_request(format!("/v1/sync_account/{}/_sync_token", id,).as_str())
.await
}
pub async fn idm_sync_account_force_refresh(&self, id: &str) -> Result<(), ClientError> {
let mut update_entry = Entry {
attrs: BTreeMap::new(),
};
update_entry
.attrs
.insert("sync_cookie".to_string(), Vec::with_capacity(0));
self.perform_patch_request(format!("/v1/sync_account/{}", id).as_str(), update_entry)
.await
}
}

View file

@ -23,6 +23,7 @@ scim_proto.workspace = true
serde = { workspace = true, features = ["derive"] }
serde_json.workspace = true
time = { workspace = true, features = ["serde", "std"] }
tracing.workspace = true
url = { workspace = true, features = ["serde"] }
urlencoding.workspace = true
uuid = { workspace = true, features = ["serde"] }

View file

@ -3,16 +3,16 @@ use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use uuid::Uuid;
pub use scim_proto::prelude::{ScimEntry, ScimError};
pub use scim_proto::prelude::{ScimAttr, ScimComplexAttr, ScimEntry, ScimError, ScimSimpleAttr};
use scim_proto::*;
#[derive(Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub enum ScimSyncState {
Refresh,
Active { cookie: Base64UrlSafeData },
}
#[derive(Serialize, Deserialize, Debug)]
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
pub struct ScimSyncRequest {
pub from_state: ScimSyncState,
pub to_state: ScimSyncState,
@ -23,7 +23,22 @@ pub struct ScimSyncRequest {
pub delete_uuids: Vec<Uuid>,
}
pub const SCIM_SCHEMA_SYNC_PERSON: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:sync:person";
impl ScimSyncRequest {
pub fn need_refresh(from_state: ScimSyncState) -> Self {
ScimSyncRequest {
from_state,
to_state: ScimSyncState::Refresh,
entries: Vec::default(),
delete_uuids: Vec::default(),
}
}
}
pub const SCIM_SCHEMA_SYNC: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:";
pub const SCIM_SCHEMA_SYNC_PERSON: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:person";
pub const SCIM_SCHEMA_SYNC_ACCOUNT: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:account";
pub const SCIM_SCHEMA_SYNC_POSIXACCOUNT: &str =
"urn:ietf:params:scim:schemas:kanidm:1.0:posixaccount";
#[derive(Serialize, Debug, Clone)]
#[serde(into = "ScimEntry")]
@ -33,17 +48,43 @@ pub struct ScimSyncPerson {
pub user_name: String,
pub display_name: String,
pub gidnumber: Option<u32>,
pub homedirectory: Option<String>,
pub password_import: Option<String>,
pub login_shell: Option<String>,
}
/*
impl TryFrom<ScimEntry> for ScimSyncPerson {
impl TryFrom<&ScimEntry> for ScimSyncPerson {
type Error = ScimError;
fn try_from(_value: ScimEntry) -> Result<Self, Self::Error> {
todo!();
fn try_from(value: &ScimEntry) -> Result<Self, Self::Error> {
if !(value.schemas.iter().any(|i| i == SCIM_SCHEMA_SYNC_PERSON)
&& value.schemas.iter().any(|i| i == SCIM_SCHEMA_SYNC_ACCOUNT)) {
return Err(ScimError::EntryMissingSchema);
}
let is_posix = value.schemas.iter().any(|i| i == SCIM_SCHEMA_SYNC_POSIXACCOUNT);
// we clone the inner atters, because these macros will pop things from them.
let attrs = value.attrs.clone();
// Pop stuff
if !attrs.is_empty() {
debug!(?attrs, "Excess attrs detected");
return Err(ScimError::InvalidAttribute);
}
Ok(ScimSyncPerson {
id,
external_id,
user_name,
display_name,
gidnumber,
password_import,
login_shell,
})
}
}
*/
@ -56,21 +97,30 @@ impl Into<ScimEntry> for ScimSyncPerson {
user_name,
display_name,
gidnumber,
homedirectory,
password_import,
login_shell,
} = self;
let schemas = vec![SCIM_SCHEMA_SYNC_PERSON.to_string()];
let schemas = if gidnumber.is_some() {
vec![
SCIM_SCHEMA_SYNC_PERSON.to_string(),
SCIM_SCHEMA_SYNC_ACCOUNT.to_string(),
SCIM_SCHEMA_SYNC_POSIXACCOUNT.to_string(),
]
} else {
vec![
SCIM_SCHEMA_SYNC_PERSON.to_string(),
SCIM_SCHEMA_SYNC_ACCOUNT.to_string(),
]
};
let mut attrs = BTreeMap::default();
set_string!(attrs, "userName", user_name);
set_string!(attrs, "displayName", display_name);
set_option_u32!(attrs, "gidNumber", gidnumber);
set_option_string!(attrs, "homeDirectory", homedirectory);
set_option_string!(attrs, "passwordImport", password_import);
set_option_string!(attrs, "loginShell", login_shell);
set_string!(attrs, "name", user_name);
set_string!(attrs, "displayname", display_name);
set_option_u32!(attrs, "gidnumber", gidnumber);
set_option_string!(attrs, "password_import", password_import);
set_option_string!(attrs, "loginshell", login_shell);
ScimEntry {
schemas,
@ -82,7 +132,8 @@ impl Into<ScimEntry> for ScimSyncPerson {
}
}
pub const SCIM_SCHEMA_SYNC_GROUP: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:sync:group";
pub const SCIM_SCHEMA_SYNC_GROUP: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:group";
pub const SCIM_SCHEMA_SYNC_POSIXGROUP: &str = "urn:ietf:params:scim:schemas:kanidm:1.0:posixgroup";
#[derive(Serialize, Debug, Clone)]
pub struct ScimExternalMember {
@ -115,11 +166,35 @@ pub struct ScimSyncGroup {
}
/*
impl TryFrom<ScimEntry> for ScimSyncPerson {
impl TryFrom<&ScimEntry> for ScimSyncGroup {
type Error = ScimError;
fn try_from(_value: ScimEntry) -> Result<Self, Self::Error> {
todo!();
fn try_from(value: &ScimEntry) -> Result<Self, Self::Error> {
if !value.schemas.iter().any(|i| i == SCIM_SCHEMA_SYNC_GROUP) {
return Err(ScimError::EntryMissingSchema);
}
let is_posix = value.schemas.iter().any(|i| i == SCIM_SCHEMA_SYNC_POSIXGROUP);
// we clone the inner atters, because these macros will pop things from them.
let attrs = value.attrs.clone();
// Pop stuff
if !attrs.is_empty() {
debug!(?attrs, "Excess attrs detected");
return Err(ScimError::InvalidAttribute);
}
Ok(ScimSyncGroup {
id,
external_id,
name,
description,
gidnumber,
members,
})
}
}
*/
@ -135,14 +210,21 @@ impl Into<ScimEntry> for ScimSyncGroup {
members,
} = self;
let schemas = vec![SCIM_SCHEMA_SYNC_GROUP.to_string()];
let schemas = if gidnumber.is_some() {
vec![
SCIM_SCHEMA_SYNC_GROUP.to_string(),
SCIM_SCHEMA_SYNC_POSIXGROUP.to_string(),
]
} else {
vec![SCIM_SCHEMA_SYNC_GROUP.to_string()]
};
let mut attrs = BTreeMap::default();
set_string!(attrs, "name", name);
set_option_u32!(attrs, "gidNumber", gidnumber);
set_option_u32!(attrs, "gidnumber", gidnumber);
set_option_string!(attrs, "description", description);
set_multi_complex!(attrs, "members", members);
set_multi_complex!(attrs, "member", members);
ScimEntry {
schemas,

View file

@ -217,6 +217,7 @@ pub enum OperationError {
InvalidValueState,
InvalidEntryId,
InvalidRequestState,
InvalidSyncState,
InvalidState,
InvalidEntryState,
InvalidUuid,
@ -224,6 +225,8 @@ pub enum OperationError {
InvalidAcpState(String),
InvalidSchemaState(String),
InvalidAccountState(String),
MissingEntries,
ModifyAssertionFailed,
BackendEngine,
SqliteError, //(RusqliteError)
FsError,

View file

@ -7,7 +7,8 @@ impl SynchOpt {
SynchOpt::Get(nopt) => nopt.copt.debug,
SynchOpt::Create { copt, .. }
| SynchOpt::GenerateToken { copt, .. }
| SynchOpt::DestroyToken { copt, .. } => copt.debug,
| SynchOpt::DestroyToken { copt, .. }
| SynchOpt::ForceRefresh { copt, .. } => copt.debug,
}
}
@ -63,6 +64,13 @@ impl SynchOpt {
Err(e) => error!("Error -> {:?}", e),
}
}
SynchOpt::ForceRefresh { account_id, copt } => {
let client = copt.to_client().await;
match client.idm_sync_account_force_refresh(&account_id).await {
Ok(()) => println!("Success"),
Err(e) => error!("Error -> {:?}", e),
}
}
}
}
}

View file

@ -775,6 +775,16 @@ pub enum SynchOpt {
#[clap(flatten)]
copt: CommonOpt,
},
/// Reset the sync cookie of this connector, so that on the next operation of the sync tool
/// a full refresh of the provider is requested. Kanidm attributes that have been granted
/// authority will *not* be lost or deleted.
#[clap(name = "force-refresh")]
ForceRefresh {
#[clap()]
account_id: String,
#[clap(flatten)]
copt: CommonOpt,
}
}
#[derive(Debug, Subcommand)]

View file

@ -320,6 +320,7 @@ pub fn generate_integrity_hash(filename: String) -> Result<String, String> {
// TODO: Add request limits.
pub fn create_https_server(
address: String,
domain: String,
// opt_tls_params: Option<SslAcceptorBuilder>,
opt_tls_params: Option<&TlsConfiguration>,
role: ServerRole,
@ -390,6 +391,8 @@ pub fn create_https_server(
tide::sessions::SessionMiddleware::new(tide::sessions::CookieStore::new(), cookie_key)
.with_session_ttl(None)
.with_cookie_name("kanidm-session")
// Without this, the cookies won't be used on subdomains of origin.
.with_cookie_domain(&domain)
// Im not sure if we need Lax here, I don't think we do because on the first get
// we don't need the cookie since wasm drives the fetches.
.with_same_site_policy(tide::http::cookies::SameSite::Strict),

View file

@ -791,7 +791,7 @@ pub async fn create_server_core(
// ⚠️ only start the sockets and listeners in non-config-test modes.
let h = self::https::create_https_server(
config.address,
// opt_tls_params,
config.domain,
config.tls_config.as_ref(),
config.role,
config.trust_x_forward_for,

View file

@ -924,6 +924,47 @@ pub trait AccessControlsTransaction<'a> {
Ok(r)
}
#[allow(clippy::cognitive_complexity)]
#[instrument(
level = "debug",
name = "access::batch_modify_allow_operation",
skip_all
)]
fn batch_modify_allow_operation(
&self,
me: &BatchModifyEvent,
_entries: &[Arc<EntrySealedCommitted>],
) -> Result<bool, OperationError> {
match &me.ident.origin {
IdentType::Internal => {
trace!("Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
IdentType::Synch(_) => {
security_critical!("Blocking sync check");
return Err(OperationError::InvalidState);
}
IdentType::User(_) => {}
};
info!(event = %me.ident, "Access check for batch modify event");
match me.ident.access_scope() {
AccessScope::IdentityOnly | AccessScope::ReadOnly | AccessScope::Synchronise => {
security_access!("denied ❌ - identity access scope is not permitted to modify");
return Ok(false);
}
AccessScope::ReadWrite => {
// As you were
}
};
error!("How did you get here?! Batch modify isn't public yet!");
debug_assert!(false);
Err(OperationError::InvalidState)
}
#[allow(clippy::cognitive_complexity)]
#[instrument(level = "debug", name = "access::create_allow_operation", skip_all)]
fn create_allow_operation(

View file

@ -36,6 +36,7 @@ const DEFAULT_CACHE_WMISS: usize = 4;
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
enum NameCacheKey {
Name2Uuid(String),
ExternalId2Uuid(String),
Uuid2Rdn(Uuid),
Uuid2Spn(Uuid),
}
@ -228,6 +229,30 @@ macro_rules! name2uuid {
}};
}
macro_rules! externalid2uuid {
(
$self:expr,
$name:expr
) => {{
let cache_key = NameCacheKey::ExternalId2Uuid($name.to_string());
let cache_r = $self.name_cache.get(&cache_key);
if let Some(NameCacheValue::U(uuid)) = cache_r {
trace!(?uuid, "Got cached externalid2uuid");
return Ok(Some(uuid.clone()));
} else {
trace!("Cache miss uuid for externalid2uuid");
}
let db_r = $self.db.externalid2uuid($name)?;
if let Some(uuid) = db_r {
$self
.name_cache
.insert(cache_key, NameCacheValue::U(uuid.clone()))
}
Ok(db_r)
}};
}
macro_rules! uuid2spn {
(
$self:expr,
@ -334,6 +359,8 @@ pub trait IdlArcSqliteTransaction {
fn name2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError>;
fn externalid2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError>;
fn uuid2spn(&mut self, uuid: Uuid) -> Result<Option<Value>, OperationError>;
fn uuid2rdn(&mut self, uuid: Uuid) -> Result<Option<String>, OperationError>;
@ -400,6 +427,10 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
name2uuid!(self, name)
}
fn externalid2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError> {
externalid2uuid!(self, name)
}
fn uuid2spn(&mut self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
uuid2spn!(self, uuid)
}
@ -485,6 +516,10 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
name2uuid!(self, name)
}
fn externalid2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError> {
externalid2uuid!(self, name)
}
fn uuid2spn(&mut self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
uuid2spn!(self, uuid)
}
@ -571,6 +606,10 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
db.write_name2uuid_add(k, *v)
}
(NameCacheKey::Name2Uuid(k), None) => db.write_name2uuid_rem(k),
(NameCacheKey::ExternalId2Uuid(k), Some(NameCacheValue::U(v))) => {
db.write_externalid2uuid_add(k, *v)
}
(NameCacheKey::ExternalId2Uuid(k), None) => db.write_externalid2uuid_rem(k),
(NameCacheKey::Uuid2Spn(uuid), Some(NameCacheValue::S(v))) => {
db.write_uuid2spn(*uuid, Some(v))
}
@ -967,6 +1006,27 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
Ok(())
}
pub fn create_externalid2uuid(&self) -> Result<(), OperationError> {
self.db.create_externalid2uuid()
}
pub fn write_externalid2uuid_add(
&mut self,
uuid: Uuid,
add: String,
) -> Result<(), OperationError> {
let cache_key = NameCacheKey::ExternalId2Uuid(add);
let cache_value = NameCacheValue::U(uuid);
self.name_cache.insert_dirty(cache_key, cache_value);
Ok(())
}
pub fn write_externalid2uuid_rem(&mut self, rem: String) -> Result<(), OperationError> {
let cache_key = NameCacheKey::ExternalId2Uuid(rem);
self.name_cache.remove_dirty(cache_key);
Ok(())
}
pub fn create_uuid2spn(&self) -> Result<(), OperationError> {
self.db.create_uuid2spn()
}

View file

@ -288,6 +288,26 @@ pub trait IdlSqliteTransaction {
Ok(uuid)
}
fn externalid2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError> {
// The table exists - lets now get the actual index itself.
let mut stmt = self
.get_conn()
.prepare(&format!(
"SELECT uuid FROM {}.idx_externalid2uuid WHERE eid = :eid",
self.get_db_name()
))
.map_err(sqlite_error)?;
let uuid_raw: Option<String> = stmt
.query_row(&[(":eid", &name)], |row| row.get(0))
// We don't mind if it doesn't exist
.optional()
.map_err(sqlite_error)?;
let uuid = uuid_raw.as_ref().and_then(|u| Uuid::parse_str(u).ok());
Ok(uuid)
}
fn uuid2spn(&mut self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
let uuids = uuid.as_hyphenated().to_string();
// The table exists - lets now get the actual index itself.
@ -866,6 +886,45 @@ impl IdlSqliteWriteTransaction {
.map_err(sqlite_error)
}
pub fn create_externalid2uuid(&self) -> Result<(), OperationError> {
self.conn
.execute(
&format!("CREATE TABLE IF NOT EXISTS {}.idx_externalid2uuid (eid TEXT PRIMARY KEY, uuid TEXT)", "main"),
[],
)
.map(|_| ())
.map_err(sqlite_error)
}
pub fn write_externalid2uuid_add(&self, name: &str, uuid: Uuid) -> Result<(), OperationError> {
let uuids = uuid.as_hyphenated().to_string();
self.conn
.prepare(&format!(
"INSERT OR REPLACE INTO {}.idx_externalid2uuid (eid, uuid) VALUES(:eid, :uuid)",
"main"
))
.and_then(|mut stmt| {
stmt.execute(named_params! {
":eid": &name,
":uuid": uuids.as_str()
})
})
.map(|_| ())
.map_err(sqlite_error)
}
pub fn write_externalid2uuid_rem(&self, name: &str) -> Result<(), OperationError> {
self.conn
.prepare(&format!(
"DELETE FROM {}.idx_externalid2uuid WHERE eid = :eid",
"main"
))
.and_then(|mut stmt| stmt.execute(&[(":eid", &name)]))
.map(|_| ())
.map_err(sqlite_error)
}
pub fn create_uuid2spn(&self) -> Result<(), OperationError> {
self.conn
.execute(
@ -1321,8 +1380,13 @@ impl IdlSqliteWriteTransaction {
dbv_id2entry = 5;
admin_info!(entry = %dbv_id2entry, "dbv_id2entry migrated (dbentryv1 -> dbentryv2)");
}
// * if v5 -> complete.
// * if v5 -> create externalid2uuid
if dbv_id2entry == 5 {
self.create_externalid2uuid()?;
dbv_id2entry = 6;
admin_info!(entry = %dbv_id2entry, "dbv_id2entry migrated (externalid2uuid)");
}
// * if v6 -> complete.
self.set_db_version_key(DBV_ID2ENTRY, dbv_id2entry)
.map_err(sqlite_error)?;

View file

@ -851,6 +851,10 @@ pub trait BackendTransaction {
self.get_idlayer().name2uuid(name)
}
fn externalid2uuid(&self, name: &str) -> Result<Option<Uuid>, OperationError> {
self.get_idlayer().externalid2uuid(name)
}
fn uuid2spn(&self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
self.get_idlayer().uuid2spn(uuid)
}
@ -1189,16 +1193,24 @@ impl<'a> BackendWriteTransaction<'a> {
// There will never be content to add.
assert!(n2u_add.is_none());
let (eid2u_add, eid2u_rem) = Entry::idx_externalid2uuid_diff(mask_pre, None);
// There will never be content to add.
assert!(eid2u_add.is_none());
let u2s_act = Entry::idx_uuid2spn_diff(mask_pre, None);
let u2r_act = Entry::idx_uuid2rdn_diff(mask_pre, None);
trace!(?n2u_rem, ?u2s_act, ?u2r_act,);
trace!(?n2u_rem, ?eid2u_rem, ?u2s_act, ?u2r_act,);
// Write the changes out to the backend
if let Some(rem) = n2u_rem {
idlayer.write_name2uuid_rem(rem)?
}
if let Some(rem) = eid2u_rem {
idlayer.write_externalid2uuid_rem(rem)?
}
match u2s_act {
None => {}
Some(Ok(k)) => idlayer.write_uuid2spn(uuid, Some(k))?,
@ -1219,11 +1231,19 @@ impl<'a> BackendWriteTransaction<'a> {
let mask_post = post.and_then(|e| e.mask_recycled_ts());
let (n2u_add, n2u_rem) = Entry::idx_name2uuid_diff(mask_pre, mask_post);
let (eid2u_add, eid2u_rem) = Entry::idx_externalid2uuid_diff(mask_pre, mask_post);
let u2s_act = Entry::idx_uuid2spn_diff(mask_pre, mask_post);
let u2r_act = Entry::idx_uuid2rdn_diff(mask_pre, mask_post);
trace!(?n2u_add, ?n2u_rem, ?u2s_act, ?u2r_act);
trace!(
?n2u_add,
?n2u_rem,
?eid2u_add,
?eid2u_rem,
?u2s_act,
?u2r_act
);
// Write the changes out to the backend
if let Some(add) = n2u_add {
@ -1233,6 +1253,13 @@ impl<'a> BackendWriteTransaction<'a> {
idlayer.write_name2uuid_rem(rem)?
}
if let Some(add) = eid2u_add {
idlayer.write_externalid2uuid_add(e_uuid, add)?
}
if let Some(rem) = eid2u_rem {
idlayer.write_externalid2uuid_rem(rem)?
}
match u2s_act {
None => {}
Some(Ok(k)) => idlayer.write_uuid2spn(e_uuid, Some(k))?,
@ -1327,6 +1354,9 @@ impl<'a> BackendWriteTransaction<'a> {
trace!("Creating index -> name2uuid");
idlayer.create_name2uuid()?;
trace!("Creating index -> externalid2uuid");
idlayer.create_externalid2uuid()?;
trace!("Creating index -> uuid2spn");
idlayer.create_uuid2spn()?;

View file

@ -21,6 +21,9 @@ pub const JSON_SCHEMA_ATTR_DISPLAYNAME: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"displayname"
],
@ -52,6 +55,9 @@ pub const JSON_SCHEMA_ATTR_MAIL: &str = r#"
"multivalue": [
"true"
],
"sync_allowed": [
"true"
],
"attributename": [
"mail"
],
@ -82,6 +88,9 @@ pub const JSON_SCHEMA_ATTR_SSH_PUBLICKEY: &str = r#"
"multivalue": [
"true"
],
"sync_allowed": [
"true"
],
"attributename": [
"ssh_publickey"
],
@ -143,6 +152,9 @@ pub const JSON_SCHEMA_ATTR_LEGALNAME: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"legalname"
],
@ -171,6 +183,9 @@ pub const JSON_SCHEMA_ATTR_RADIUS_SECRET: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"radius_secret"
],
@ -384,6 +399,9 @@ pub const JSON_SCHEMA_ATTR_GIDNUMBER: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"gidnumber"
],
@ -442,6 +460,9 @@ pub const JSON_SCHEMA_ATTR_LOGINSHELL: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"loginshell"
],
@ -502,6 +523,9 @@ pub const JSON_SCHEMA_ATTR_NSUNIQUEID: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"nsuniqueid"
],
@ -531,6 +555,9 @@ pub const JSON_SCHEMA_ATTR_ACCOUNT_EXPIRE: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"account_expire"
],
@ -560,6 +587,9 @@ pub const JSON_SCHEMA_ATTR_ACCOUNT_VALID_FROM: &str = r#"{
"multivalue": [
"false"
],
"sync_allowed": [
"true"
],
"attributename": [
"account_valid_from"
],
@ -1038,6 +1068,9 @@ pub const JSON_SCHEMA_ATTR_PASSKEYS: &str = r#"{
"multivalue": [
"true"
],
"sync_allowed": [
"true"
],
"attributename": [
"passkeys"
],
@ -1069,6 +1102,9 @@ pub const JSON_SCHEMA_ATTR_DEVICEKEYS: &str = r#"{
"multivalue": [
"true"
],
"sync_allowed": [
"true"
],
"attributename": [
"devicekeys"
],
@ -1335,6 +1371,9 @@ pub const JSON_SCHEMA_CLASS_PERSON: &str = r#"
"description": [
"Object representation of a person"
],
"sync_allowed": [
"true"
],
"classname": [
"person"
],
@ -1393,12 +1432,16 @@ pub const JSON_SCHEMA_CLASS_GROUP: &str = r#"
"description": [
"Object representation of a group"
],
"sync_allowed": [
"true"
],
"classname": [
"group"
],
"systemmay": [
"member",
"grant_ui_hint"
"grant_ui_hint",
"description"
],
"systemmust": [
"name",
@ -1449,6 +1492,9 @@ pub const JSON_SCHEMA_CLASS_ACCOUNT: &str = r#"
"description": [
"Object representation of a account"
],
"sync_allowed": [
"true"
],
"classname": [
"account"
],
@ -1464,7 +1510,8 @@ pub const JSON_SCHEMA_CLASS_ACCOUNT: &str = r#"
"mail",
"oauth2_consent_scope_map",
"user_auth_token_session",
"oauth2_session"
"oauth2_session",
"description"
],
"systemmust": [
"displayname",
@ -1493,6 +1540,9 @@ pub const JSON_SCHEMA_CLASS_SERVICE_ACCOUNT: &str = r#"
"description": [
"Object representation of service account"
],
"sync_allowed": [
"true"
],
"classname": [
"service_account"
],
@ -1593,6 +1643,9 @@ pub const JSON_SCHEMA_CLASS_POSIXGROUP: &str = r#"
"description": [
"Object representation of a posix group, requires group"
],
"sync_allowed": [
"true"
],
"classname": [
"posixgroup"
],
@ -1620,6 +1673,9 @@ pub const JSON_SCHEMA_CLASS_POSIXACCOUNT: &str = r#"
"description": [
"Object representation of a posix account, requires account"
],
"sync_allowed": [
"true"
],
"classname": [
"posixaccount"
],

View file

@ -214,6 +214,12 @@ pub const _UUID_SCHEMA_ATTR_GRANT_UI_HINT: Uuid = uuid!("00000000-0000-0000-0000
pub const _UUID_SCHEMA_ATTR_OAUTH2_RS_ORIGIN_LANDING: Uuid =
uuid!("00000000-0000-0000-0000-ffff00000120");
pub const UUID_SCHEMA_ATTR_SYNC_EXTERNAL_ID: Uuid = uuid!("00000000-0000-0000-0000-ffff00000121");
pub const UUID_SCHEMA_ATTR_SYNC_PARENT_UUID: Uuid = uuid!("00000000-0000-0000-0000-ffff00000122");
pub const UUID_SCHEMA_CLASS_SYNC_OBJECT: Uuid = uuid!("00000000-0000-0000-0000-ffff00000123");
pub const UUID_SCHEMA_ATTR_SYNC_CLASS: Uuid = uuid!("00000000-0000-0000-0000-ffff00000124");
pub const UUID_SCHEMA_ATTR_SYNC_ALLOWED: Uuid = uuid!("00000000-0000-0000-0000-ffff00000125");
// System and domain infos
// I'd like to strongly criticise william of the past for making poor choices about these allocations.
pub const UUID_SYSTEM_INFO: Uuid = uuid!("00000000-0000-0000-0000-ffffff000001");

View file

@ -31,6 +31,7 @@ lazy_static! {
pub static ref PVCLASS_SERVICE_ACCOUNT: PartialValue =
PartialValue::new_class("service_account");
pub static ref PVCLASS_SYNC_ACCOUNT: PartialValue = PartialValue::new_class("sync_account");
pub static ref PVCLASS_SYNC_OBJECT: PartialValue = PartialValue::new_class("sync_object");
pub static ref PVCLASS_SYSTEM: PartialValue = PartialValue::new_class("system");
pub static ref PVCLASS_SYSTEM_INFO: PartialValue = PartialValue::new_class("system_info");
pub static ref PVCLASS_SYSTEM_CONFIG: PartialValue = PartialValue::new_class("system_config");
@ -46,6 +47,7 @@ lazy_static! {
pub static ref CLASS_OBJECT: Value = Value::new_class("object");
pub static ref CLASS_RECYCLED: Value = Value::new_class("recycled");
pub static ref CLASS_SERVICE_ACCOUNT: Value = Value::new_class("service_account");
pub static ref CLASS_SYNC_OBJECT: Value = Value::new_class("sync_object");
pub static ref CLASS_SYSTEM: Value = Value::new_class("system");
pub static ref CLASS_SYSTEM_CONFIG: Value = Value::new_class("system_config");
pub static ref CLASS_SYSTEM_INFO: Value = Value::new_class("system_info");

View file

@ -89,6 +89,8 @@ use crate::valueset::{self, ValueSet};
//
pub type EntryInitNew = Entry<EntryInit, EntryNew>;
pub type EntryInvalidNew = Entry<EntryInvalid, EntryNew>;
pub type EntrySealedNew = Entry<EntrySealed, EntryNew>;
pub type EntrySealedCommitted = Entry<EntrySealed, EntryCommitted>;
pub type EntryInvalidCommitted = Entry<EntryInvalid, EntryCommitted>;
pub type EntryReducedCommitted = Entry<EntryReduced, EntryCommitted>;
@ -306,8 +308,6 @@ impl Entry<EntryInit, EntryNew> {
let x = map2?;
Ok(Entry {
// For now, we do a straight move, and we sort the incoming data
// sets so that BST works.
state: EntryNew,
valid: EntryInit,
attrs: x,
@ -1126,6 +1126,15 @@ impl Entry<EntrySealed, EntryCommitted> {
.collect()
}
#[inline]
/// Given this entry, extract the set of strings that can externally identify this
/// entry for sync purposes. These strings are then indexed.
fn get_externalid2uuid(&self) -> Option<String> {
self.attrs
.get("sync_external_id")
.and_then(|vs| vs.to_proto_string_single())
}
#[inline]
/// Given this entry, extract it's primary security prinicple name, or if not present
/// extract it's name, and if that's not present, extract it's uuid.
@ -1190,6 +1199,39 @@ impl Entry<EntrySealed, EntryCommitted> {
}
}
/// Generate the required values for externalid2uuid.
pub(crate) fn idx_externalid2uuid_diff(
pre: Option<&Self>,
post: Option<&Self>,
) -> (Option<String>, Option<String>) {
match (pre, post) {
(None, None) => {
// no action
(None, None)
}
(None, Some(b)) => {
// add
(b.get_externalid2uuid(), None)
}
(Some(a), None) => {
// remove
(None, a.get_externalid2uuid())
}
(Some(a), Some(b)) => {
let ia = a.get_externalid2uuid();
let ib = b.get_externalid2uuid();
if ia != ib {
// Note, we swap these since ib is the new post state
// we want to add, and ia is what we remove.
(ib, ia)
} else {
// no action
(None, None)
}
}
}
}
/// Generate a differential between a previous and current entry state, and what changes this
/// means for the current set of spn's for this uuid.
pub(crate) fn idx_uuid2spn_diff(
@ -2302,6 +2344,17 @@ where
self.add_ava_int(attr, value)
}
fn assert_ava(&mut self, attr: &str, value: &PartialValue) -> Result<(), OperationError> {
self.valid
.eclog
.assert_ava(&self.valid.cid, attr, value.clone());
if self.attribute_equality(attr, value) {
Ok(())
} else {
Err(OperationError::ModifyAssertionFailed)
}
}
/// Remove an attribute-value pair from this entry. If the ava doesn't exist, we
/// don't do anything else since we are asserting the abscence of a value.
pub(crate) fn remove_ava(&mut self, attr: &str, value: &PartialValue) {
@ -2373,19 +2426,30 @@ where
}
/// Apply the content of this modlist to this entry, enforcing the expressed state.
pub fn apply_modlist(&mut self, modlist: &ModifyList<ModifyValid>) {
// -> Result<Entry<EntryInvalid, STATE>, OperationError> {
// Apply a modlist, generating a new entry that conforms to the changes.
// This is effectively clone-and-transform
// mutate
pub fn apply_modlist(
&mut self,
modlist: &ModifyList<ModifyValid>,
) -> Result<(), OperationError> {
for modify in modlist {
match modify {
Modify::Present(a, v) => self.add_ava(a.as_str(), v.clone()),
Modify::Removed(a, v) => self.remove_ava(a.as_str(), v),
Modify::Purged(a) => self.purge_ava(a.as_str()),
Modify::Present(a, v) => {
self.add_ava(a.as_str(), v.clone());
}
Modify::Removed(a, v) => {
self.remove_ava(a.as_str(), v);
}
Modify::Purged(a) => {
self.purge_ava(a.as_str());
}
Modify::Assert(a, v) => {
self.assert_ava(a.as_str(), v).map_err(|e| {
error!("Modification assertion was not met. {} {:?}", a, v);
e
})?;
}
}
}
Ok(())
}
}
@ -2413,6 +2477,8 @@ impl From<&SchemaAttribute> for Entry<EntryInit, EntryNew> {
let desc_v = vs_utf8![s.description.clone()];
let multivalue_v = vs_bool![s.multivalue];
let sync_allowed_v = vs_bool![s.sync_allowed];
let phantom_v = vs_bool![s.phantom];
let unique_v = vs_bool![s.unique];
let index_v = ValueSetIndex::from_iter(s.index.iter().copied());
@ -2426,6 +2492,8 @@ impl From<&SchemaAttribute> for Entry<EntryInit, EntryNew> {
attrs.insert(AttrString::from("description"), desc_v);
attrs.insert(AttrString::from("uuid"), uuid_v);
attrs.insert(AttrString::from("multivalue"), multivalue_v);
attrs.insert(AttrString::from("phantom"), phantom_v);
attrs.insert(AttrString::from("sync_allowed"), sync_allowed_v);
attrs.insert(AttrString::from("unique"), unique_v);
if let Some(vs) = index_v {
attrs.insert(AttrString::from("index"), vs);
@ -2451,11 +2519,13 @@ impl From<&SchemaClass> for Entry<EntryInit, EntryNew> {
let uuid_v = vs_uuid![s.uuid];
let name_v = vs_iutf8![s.name.as_str()];
let desc_v = vs_utf8![s.description.clone()];
let sync_allowed_v = vs_bool![s.sync_allowed];
// let mut attrs: Map<AttrString, Set<Value>> = Map::with_capacity(8);
let mut attrs: Map<AttrString, ValueSet> = Map::new();
attrs.insert(AttrString::from("classname"), name_v);
attrs.insert(AttrString::from("description"), desc_v);
attrs.insert(AttrString::from("sync_allowed"), sync_allowed_v);
attrs.insert(AttrString::from("uuid"), uuid_v);
attrs.insert(
AttrString::from("class"),
@ -2592,7 +2662,7 @@ mod tests {
)])
};
e.apply_modlist(&present_single_mods);
assert!(e.apply_modlist(&present_single_mods).is_ok());
// Assert the changes are there
assert!(e.attribute_equality("userid", &PartialValue::new_utf8s("william")));
@ -2606,7 +2676,7 @@ mod tests {
])
};
e.apply_modlist(&present_multivalue_mods);
assert!(e.apply_modlist(&present_multivalue_mods).is_ok());
assert!(e.attribute_equality("class", &PartialValue::new_iutf8("test")));
assert!(e.attribute_equality("class", &PartialValue::new_iutf8("multi_test")));
@ -2615,20 +2685,20 @@ mod tests {
let purge_single_mods =
unsafe { ModifyList::new_valid_list(vec![Modify::Purged(AttrString::from("attr"))]) };
e.apply_modlist(&purge_single_mods);
assert!(e.apply_modlist(&purge_single_mods).is_ok());
assert!(!e.attribute_pres("attr"));
let purge_multi_mods =
unsafe { ModifyList::new_valid_list(vec![Modify::Purged(AttrString::from("class"))]) };
e.apply_modlist(&purge_multi_mods);
assert!(e.apply_modlist(&purge_multi_mods).is_ok());
assert!(!e.attribute_pres("class"));
let purge_empty_mods = purge_single_mods;
e.apply_modlist(&purge_empty_mods);
assert!(e.apply_modlist(&purge_empty_mods).is_ok());
// Assert removed on value that exists and doesn't exist
let remove_mods = unsafe {
@ -2638,14 +2708,14 @@ mod tests {
)])
};
e.apply_modlist(&present_single_mods);
assert!(e.apply_modlist(&present_single_mods).is_ok());
assert!(e.attribute_equality("attr", &PartialValue::new_iutf8("value")));
e.apply_modlist(&remove_mods);
assert!(e.apply_modlist(&remove_mods).is_ok());
assert!(e.attrs.get("attr").is_none());
let remove_empty_mods = remove_mods;
e.apply_modlist(&remove_empty_mods);
assert!(e.apply_modlist(&remove_empty_mods).is_ok());
assert!(e.attrs.get("attr").is_none());
}

View file

@ -107,7 +107,7 @@ pub fn f_spn_name(id: &str) -> FC<'static> {
/// This is the short-form for tests and internal filters that can then
/// be transformed into a filter for the server to use.
#[derive(Debug, Deserialize)]
#[derive(Clone, Debug, Deserialize)]
pub enum FC<'a> {
Eq(&'a str, PartialValue),
Sub(&'a str, PartialValue),

File diff suppressed because it is too large Load diff

View file

@ -70,8 +70,8 @@ pub mod prelude {
pub use crate::constants::*;
pub use crate::entry::{
Entry, EntryCommitted, EntryInit, EntryInitNew, EntryInvalid, EntryInvalidCommitted,
EntryNew, EntryReduced, EntryReducedCommitted, EntrySealed, EntrySealedCommitted,
EntryTuple, EntryValid,
EntryInvalidNew, EntryNew, EntryReduced, EntryReducedCommitted, EntrySealed,
EntrySealedCommitted, EntrySealedNew, EntryTuple, EntryValid,
};
pub use crate::filter::{
f_and, f_andnot, f_eq, f_id, f_inc, f_lt, f_or, f_pres, f_self, f_spn_name, f_sub, Filter,
@ -79,7 +79,10 @@ pub mod prelude {
};
pub use crate::identity::{AccessScope, IdentType, Identity, IdentityId};
pub use crate::idm::server::{IdmServer, IdmServerDelayed};
pub use crate::modify::{m_pres, m_purge, m_remove, Modify, ModifyInvalid, ModifyList};
pub use crate::modify::{
m_assert, m_pres, m_purge, m_remove, Modify, ModifyInvalid, ModifyList, ModifyValid,
};
pub use crate::server::batch_modify::BatchModifyEvent;
pub use crate::server::{
QueryServer, QueryServerReadTransaction, QueryServerTransaction,
QueryServerWriteTransaction,

View file

@ -32,6 +32,8 @@ pub enum Modify {
Removed(AttrString, PartialValue),
// This attr *should not* exist.
Purged(AttrString),
// This attr and value must exist *in this state* for this change to proceed.
Assert(AttrString, PartialValue),
}
#[allow(dead_code)]
@ -49,6 +51,11 @@ pub fn m_purge(a: &str) -> Modify {
Modify::Purged(AttrString::from(a))
}
#[allow(dead_code)]
pub fn m_assert(a: &str, v: &PartialValue) -> Modify {
Modify::Assert(a.into(), v.clone())
}
impl Modify {
pub fn from(m: &ProtoModify, qs: &QueryServerWriteTransaction) -> Result<Self, OperationError> {
Ok(match m {
@ -186,6 +193,15 @@ impl ModifyList<ModifyInvalid> {
None => Err(SchemaError::InvalidAttribute(attr_norm.to_string())),
}
}
Modify::Assert(attr, value) => {
let attr_norm = schema.normalise_attr_name(attr);
match schema_attributes.get(&attr_norm) {
Some(schema_a) => schema_a
.validate_partialvalue(attr_norm.as_str(), value)
.map(|_| Modify::Assert(attr_norm, value.clone())),
None => Err(SchemaError::InvalidAttribute(attr_norm.to_string())),
}
}
Modify::Purged(attr) => {
let attr_norm = schema.normalise_attr_name(attr);
match schema_attributes.get(&attr_norm) {

View file

@ -5,6 +5,7 @@
//
//
use std::collections::BTreeMap;
use std::collections::VecDeque;
use kanidm_proto::v1::{ConsistencyError, PluginError};
use tracing::trace;
@ -79,13 +80,13 @@ fn enforce_unique<STATE>(
let filt_in = filter!(f_or(
// for each cand_attr
cand_attr
.into_iter()
.iter()
.map(|(v, uuid)| {
// and[ attr eq k, andnot [ uuid eq v ]]
// Basically this says where name but also not self.
f_and(vec![
FC::Eq(attr, v),
f_andnot(FC::Eq("uuid", PartialValue::new_uuid(uuid))),
FC::Eq(attr, v.clone()),
f_andnot(FC::Eq("uuid", PartialValue::new_uuid(*uuid))),
])
})
.collect()
@ -99,12 +100,81 @@ fn enforce_unique<STATE>(
e
})?;
// If all okay, okay!
// TODO! Need to make this show what conflicted!
// We can probably bisect over the filter to work this out?
if conflict_cand {
// Some kind of confilct exists. We need to isolate which parts of the filter were suspect.
// To do this, we bisect over the filter and it's suspect elements.
//
// In most cases there is likely only 1 suspect element. But in some there are more. To make
// this process faster we "bisect" over chunks of the filter remaining until we have only single elements left.
//
// We do a bisect rather than a linear one-at-a-time search because we want to try to somewhat minimise calls
// through internal exists since that has a filter resolve and validate step.
// First create the vec of filters.
let mut cand_filters: Vec<_> = cand_attr
.into_iter()
.map(|(v, uuid)| {
// and[ attr eq k, andnot [ uuid eq v ]]
// Basically this says where name but also not self.
f_and(vec![
FC::Eq(attr, v),
f_andnot(FC::Eq("uuid", PartialValue::new_uuid(uuid))),
])
})
.collect();
// Fast-ish path. There is 0 or 1 element, so we just fast return.
if cand_filters.len() < 2 {
error!(
?cand_filters,
"The following filter conditions failed to assert uniqueness"
);
} else {
// First iteration, we already failed and we know that, so we just prime and setup two
// chunks here.
let mid = cand_filters.len() / 2;
let right = cand_filters.split_off(mid);
let mut queue = VecDeque::new();
queue.push_back(cand_filters);
queue.push_back(right);
// Ok! We are setup to go
while let Some(mut cand_query) = queue.pop_front() {
let filt_in = filter!(f_or(cand_query.clone()));
let conflict_cand = qs.internal_exists(filt_in).map_err(|e| {
admin_error!("internal exists error {:?}", e);
e
})?;
// A conflict was found!
if conflict_cand {
if cand_query.len() >= 2 {
// Continue to split to isolate.
let mid = cand_query.len() / 2;
let right = cand_query.split_off(mid);
queue.push_back(cand_query);
queue.push_back(right);
// Continue!
} else {
// Report this as a failing query.
error!(cand_filters = ?cand_query, "The following filter conditions failed to assert uniqueness");
}
}
}
// End logging / warning iterator
}
Err(OperationError::Plugin(PluginError::AttrUnique(
"duplicate value detected".to_string(),
)))
} else {
// If all okay, okay!
Ok(())
}
}
@ -114,17 +184,12 @@ impl Plugin for AttrUnique {
"plugin_attrunique"
}
#[instrument(
level = "debug",
name = "attrunique_pre_create_transform",
skip(qs, cand, _ce)
)]
#[instrument(level = "debug", name = "attrunique_pre_create_transform", skip_all)]
fn pre_create_transform(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
// Needs to clone to avoid a borrow issue?
let uniqueattrs = {
let schema = qs.get_schema();
schema.get_attributes_unique()
@ -136,13 +201,29 @@ impl Plugin for AttrUnique {
r
}
#[instrument(level = "debug", name = "attrunique_pre_modify", skip(qs, cand, _me))]
#[instrument(level = "debug", name = "attrunique_pre_modify", skip_all)]
fn pre_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
// Needs to clone to avoid a borrow issue?
let uniqueattrs = {
let schema = qs.get_schema();
schema.get_attributes_unique()
};
let r: Result<(), OperationError> = uniqueattrs
.iter()
.try_for_each(|attr| enforce_unique(qs, cand, attr.as_str()));
r
}
#[instrument(level = "debug", name = "attrunique_pre_batch_modify", skip_all)]
fn pre_batch_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
let uniqueattrs = {
let schema = qs.get_schema();
schema.get_attributes_unique()

View file

@ -157,18 +157,47 @@ impl Plugin for Base {
_cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
me: &ModifyEvent,
) -> Result<(), OperationError> {
for modify in me.modlist.into_iter() {
me.modlist.iter().try_for_each(|modify| {
let attr = match &modify {
Modify::Present(a, _) => a,
Modify::Removed(a, _) => a,
Modify::Purged(a) => a,
Modify::Present(a, _) => Some(a),
Modify::Removed(a, _) => Some(a),
Modify::Purged(a) => Some(a),
Modify::Assert(_, _) => None,
};
if attr == "uuid" {
if attr.map(|s| s.as_str()) == Some("uuid") {
debug!(?modify, "Modify in violation");
request_error!("Modifications to UUID's are NOT ALLOWED");
return Err(OperationError::SystemProtectedAttribute);
Err(OperationError::SystemProtectedAttribute)
} else {
Ok(())
}
}
Ok(())
})
}
#[instrument(level = "debug", name = "base_pre_modify", skip(_qs, _cand, me))]
fn pre_batch_modify(
_qs: &mut QueryServerWriteTransaction,
_cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
me: &BatchModifyEvent,
) -> Result<(), OperationError> {
me.modset
.values()
.flat_map(|ml| ml.iter())
.try_for_each(|modify| {
let attr = match &modify {
Modify::Present(a, _) => Some(a),
Modify::Removed(a, _) => Some(a),
Modify::Purged(a) => Some(a),
Modify::Assert(_, _) => None,
};
if attr.map(|s| s.as_str()) == Some("uuid") {
debug!(?modify, "Modify in violation");
request_error!("Modifications to UUID's are NOT ALLOWED");
Err(OperationError::SystemProtectedAttribute)
} else {
Ok(())
}
})
}
#[instrument(level = "debug", name = "base_verify", skip(qs))]

View file

@ -21,15 +21,38 @@ impl Plugin for Domain {
"plugin_domain"
}
#[instrument(
level = "debug",
name = "domain_pre_create_transform",
skip(qs, cand, _ce)
)]
#[instrument(level = "debug", name = "domain_pre_create_transform", skip_all)]
fn pre_create_transform(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
Self::modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "domain_pre_modify", skip_all)]
fn pre_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "domain_pre_batch_modify", skip_all)]
fn pre_batch_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(qs, cand)
}
}
impl Domain {
fn modify_inner<T: Clone + std::fmt::Debug>(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, T>>,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| {
if e.attribute_equality("class", &PVCLASS_DOMAIN_INFO)
@ -39,54 +62,7 @@ impl Plugin for Domain {
let u = Value::new_uuid(qs.get_domain_uuid());
e.set_ava("domain_uuid", once(u));
trace!("plugin_domain: Applying uuid transform");
// We only apply this if one isn't provided.
if !e.attribute_pres("domain_name") {
let n = Value::new_iname(qs.get_domain_name());
e.set_ava("domain_name", once(n));
trace!("plugin_domain: Applying domain_name transform");
}
// create the domain_display_name if it's missing
if !e.attribute_pres("domain_display_name") {
let domain_display_name = Value::new_utf8(format!("Kanidm {}", qs.get_domain_name()));
security_info!("plugin_domain: setting default domain_display_name to {:?}", domain_display_name);
e.set_ava("domain_display_name", once(domain_display_name));
}
if !e.attribute_pres("fernet_private_key_str") {
security_info!("regenerating domain token encryption key");
let k = fernet::Fernet::generate_key();
let v = Value::new_secret_str(&k);
e.add_ava("fernet_private_key_str", v);
}
if !e.attribute_pres("es256_private_key_der") {
security_info!("regenerating domain es256 private key");
let der = JwsSigner::generate_es256()
.and_then(|jws| jws.private_key_to_der())
.map_err(|e| {
admin_error!(err = ?e, "Unable to generate ES256 JwsSigner private key");
OperationError::CryptographyError
})?;
let v = Value::new_privatebinary(&der);
e.add_ava("es256_private_key_der", v);
}
trace!(?e);
Ok(())
} else {
Ok(())
}
})
}
#[instrument(level = "debug", name = "domain_pre_modify", skip(qs, cand, _me))]
fn pre_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| {
if e.attribute_equality("class", &PVCLASS_DOMAIN_INFO)
&& e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)
{
// We only apply this if one isn't provided.
if !e.attribute_pres("domain_name") {
let n = Value::new_iname(qs.get_domain_name());

View file

@ -3,7 +3,6 @@ use std::sync::Arc;
use kanidm_proto::v1::Filter as ProtoFilter;
use crate::event::{CreateEvent, ModifyEvent};
use crate::filter::FilterInvalid;
use crate::prelude::*;
@ -132,11 +131,11 @@ impl DynGroup {
Ok(())
}
#[instrument(level = "debug", name = "dyngroup_post_create", skip(qs, cand, ce))]
#[instrument(level = "debug", name = "dyngroup_post_create", skip_all)]
pub fn post_create(
qs: &mut QueryServerWriteTransaction,
cand: &[Entry<EntrySealed, EntryCommitted>],
ce: &CreateEvent,
ident: &Identity,
) -> Result<Vec<Uuid>, OperationError> {
let mut affected_uuids = Vec::with_capacity(cand.len());
@ -204,7 +203,7 @@ impl DynGroup {
trace!("considering new dyngroups");
Self::apply_dyngroup_change(
qs,
&ce.ident,
ident,
&mut pre_candidates,
&mut candidates,
&mut affected_uuids,
@ -219,7 +218,7 @@ impl DynGroup {
debug_assert!(pre_candidates.len() == candidates.len());
// Write this stripe if populated.
if !pre_candidates.is_empty() {
qs.internal_batch_modify(pre_candidates, candidates)
qs.internal_apply_writable(pre_candidates, candidates)
.map_err(|e| {
admin_error!("Failed to commit dyngroup set {:?}", e);
e
@ -229,16 +228,12 @@ impl DynGroup {
Ok(affected_uuids)
}
#[instrument(
level = "debug",
name = "memberof_post_modify",
skip(qs, pre_cand, cand, me)
)]
#[instrument(level = "debug", name = "memberof_post_modify", skip_all)]
pub fn post_modify(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
me: &ModifyEvent,
ident: &Identity,
) -> Result<Vec<Uuid>, OperationError> {
let mut affected_uuids = Vec::with_capacity(cand.len());
@ -268,7 +263,7 @@ impl DynGroup {
trace!("considering modified dyngroups");
Self::apply_dyngroup_change(
qs,
&me.ident,
ident,
&mut pre_candidates,
&mut candidates,
&mut affected_uuids,
@ -333,7 +328,7 @@ impl DynGroup {
debug_assert!(pre_candidates.len() == candidates.len());
// Write this stripe if populated.
if !pre_candidates.is_empty() {
qs.internal_batch_modify(pre_candidates, candidates)
qs.internal_apply_writable(pre_candidates, candidates)
.map_err(|e| {
admin_error!("Failed to commit dyngroup set {:?}", e);
e

View file

@ -64,34 +64,31 @@ impl Plugin for GidNumber {
"plugin_gidnumber"
}
#[instrument(
level = "debug",
name = "gidnumber_pre_create_transform",
skip(_qs, cand, _ce)
)]
#[instrument(level = "debug", name = "gidnumber_pre_create_transform", skip_all)]
fn pre_create_transform(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
for e in cand.iter_mut() {
apply_gidnumber(e)?;
}
Ok(())
cand.iter_mut().try_for_each(|e| apply_gidnumber(e))
}
#[instrument(level = "debug", name = "gidnumber_pre_modify", skip(_qs, cand, _me))]
#[instrument(level = "debug", name = "gidnumber_pre_modify", skip_all)]
fn pre_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
for e in cand.iter_mut() {
apply_gidnumber(e)?;
}
cand.iter_mut().try_for_each(|e| apply_gidnumber(e))
}
Ok(())
#[instrument(level = "debug", name = "gidnumber_pre_batch_modify", skip_all)]
fn pre_batch_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| apply_gidnumber(e))
}
}

View file

@ -7,23 +7,57 @@ use crate::utils::password_from_random;
pub struct JwsKeygen {}
macro_rules! keygen_transform {
(
$e:expr
) => {{
if $e.attribute_equality("class", &PVCLASS_OAUTH2_BASIC) {
if !$e.attribute_pres("oauth2_rs_basic_secret") {
impl Plugin for JwsKeygen {
fn id() -> &'static str {
"plugin_jws_keygen"
}
#[instrument(level = "debug", name = "jwskeygen_pre_create_transform", skip_all)]
fn pre_create_transform(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
Self::modify_inner(cand)
}
#[instrument(level = "debug", name = "jwskeygen_pre_modify", skip_all)]
fn pre_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(cand)
}
#[instrument(level = "debug", name = "jwskeygen_pre_batch_modify", skip_all)]
fn pre_batch_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(cand)
}
}
impl JwsKeygen {
fn modify_inner<T: Clone>(
cand: &mut Vec<Entry<EntryInvalid, T>>,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| {
if e.attribute_equality("class", &PVCLASS_OAUTH2_BASIC) {
if !e.attribute_pres("oauth2_rs_basic_secret") {
security_info!("regenerating oauth2 basic secret");
let v = Value::SecretValue(password_from_random());
$e.add_ava("oauth2_rs_basic_secret", v);
e.add_ava("oauth2_rs_basic_secret", v);
}
if !$e.attribute_pres("oauth2_rs_token_key") {
if !e.attribute_pres("oauth2_rs_token_key") {
security_info!("regenerating oauth2 token key");
let k = fernet::Fernet::generate_key();
let v = Value::new_secret_str(&k);
$e.add_ava("oauth2_rs_token_key", v);
e.add_ava("oauth2_rs_token_key", v);
}
if !$e.attribute_pres("es256_private_key_der") {
if !e.attribute_pres("es256_private_key_der") {
security_info!("regenerating oauth2 es256 private key");
let der = JwsSigner::generate_es256()
.and_then(|jws| jws.private_key_to_der())
@ -32,10 +66,10 @@ macro_rules! keygen_transform {
OperationError::CryptographyError
})?;
let v = Value::new_privatebinary(&der);
$e.add_ava("es256_private_key_der", v);
e.add_ava("es256_private_key_der", v);
}
if $e.get_ava_single_bool("oauth2_jwt_legacy_crypto_enable").unwrap_or(false) {
if !$e.attribute_pres("rs256_private_key_der") {
if e.get_ava_single_bool("oauth2_jwt_legacy_crypto_enable").unwrap_or(false) {
if !e.attribute_pres("rs256_private_key_der") {
security_info!("regenerating oauth2 legacy rs256 private key");
let der = JwsSigner::generate_legacy_rs256()
.and_then(|jws| jws.private_key_to_der())
@ -44,15 +78,15 @@ macro_rules! keygen_transform {
OperationError::CryptographyError
})?;
let v = Value::new_privatebinary(&der);
$e.add_ava("rs256_private_key_der", v);
e.add_ava("rs256_private_key_der", v);
}
}
}
if $e.attribute_equality("class", &PVCLASS_SERVICE_ACCOUNT) ||
$e.attribute_equality("class", &PVCLASS_SYNC_ACCOUNT)
if e.attribute_equality("class", &PVCLASS_SERVICE_ACCOUNT) ||
e.attribute_equality("class", &PVCLASS_SYNC_ACCOUNT)
{
if !$e.attribute_pres("jws_es256_private_key") {
if !e.attribute_pres("jws_es256_private_key") {
security_info!("regenerating jws es256 private key");
let jwssigner = JwsSigner::generate_es256()
.map_err(|e| {
@ -60,39 +94,12 @@ macro_rules! keygen_transform {
OperationError::CryptographyError
})?;
let v = Value::JwsKeyEs256(jwssigner);
$e.add_ava("jws_es256_private_key", v);
e.add_ava("jws_es256_private_key", v);
}
}
Ok(())
}};
}
impl Plugin for JwsKeygen {
fn id() -> &'static str {
"plugin_jws_keygen"
}
#[instrument(
level = "debug",
name = "jwskeygen_pre_create_transform",
skip(_qs, cand, _ce)
)]
fn pre_create_transform(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| keygen_transform!(e))
}
#[instrument(level = "debug", name = "jwskeygen_pre_modify", skip(_qs, cand, _me))]
fn pre_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| keygen_transform!(e))
})
}
}

View file

@ -170,7 +170,7 @@ fn apply_memberof(
debug_assert!(pre_candidates.len() == candidates.len());
// Write this stripe if populated.
if !pre_candidates.is_empty() {
qs.internal_batch_modify(pre_candidates, candidates)
qs.internal_apply_writable(pre_candidates, candidates)
.map_err(|e| {
admin_error!("Failed to commit memberof group set {:?}", e);
e
@ -201,7 +201,7 @@ fn apply_memberof(
// Turn the other_cache into a write set.
// Write the batch out in a single stripe.
qs.internal_batch_modify(pre_candidates, candidates)
qs.internal_apply_writable(pre_candidates, candidates)
// Done! 🎉
}
@ -216,7 +216,7 @@ impl Plugin for MemberOf {
cand: &[Entry<EntrySealed, EntryCommitted>],
ce: &CreateEvent,
) -> Result<(), OperationError> {
let dyngroup_change = super::dyngroup::DynGroup::post_create(qs, cand, ce)?;
let dyngroup_change = super::dyngroup::DynGroup::post_create(qs, cand, &ce.ident)?;
let group_affect = cand
.iter()
@ -239,74 +239,25 @@ impl Plugin for MemberOf {
apply_memberof(qs, group_affect)
}
#[instrument(
level = "debug",
name = "memberof_post_modify",
skip(qs, pre_cand, cand, me)
)]
#[instrument(level = "debug", name = "memberof_post_modify", skip_all)]
fn post_modify(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
me: &ModifyEvent,
) -> Result<(), OperationError> {
let dyngroup_change = super::dyngroup::DynGroup::post_modify(qs, pre_cand, cand, me)?;
// TODO: Limit this to when it's a class, member, mo, dmo change instead.
let group_affect = cand
.iter()
.map(|post| post.get_uuid())
.chain(dyngroup_change.into_iter())
.chain(
pre_cand
.iter()
.filter_map(|pre| {
if pre.attribute_equality("class", &PVCLASS_GROUP) {
pre.get_ava_as_refuuid("member")
} else {
None
}
})
.flatten(),
)
.chain(
cand.iter()
.filter_map(|post| {
if post.attribute_equality("class", &PVCLASS_GROUP) {
post.get_ava_as_refuuid("member")
} else {
None
}
})
.flatten(),
)
.collect();
apply_memberof(qs, group_affect)
Self::post_modify_inner(qs, pre_cand, cand, &me.ident)
}
/*
fn pre_delete(
_qs: &QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_de: &DeleteEvent,
#[instrument(level = "debug", name = "memberof_post_batch_modify", skip_all)]
fn post_batch_modify(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
me: &BatchModifyEvent,
) -> Result<(), OperationError> {
// It is not valid for a recycled group to be considered
// a member of any other type. We simply purge the ava from
// the entries. This is because it will be removed from all
// locations where it *was* a member.
//
// As a result, on restore, the graph of where it was a member
// would have to be rebuilt.
//
// NOTE: DO NOT purge directmemberof - we use that to restore memberships
// in recycle revive!
let mo_purge = unsafe { ModifyList::new_purge("memberof").into_valid() };
cand.iter_mut().for_each(|e| e.apply_modlist(&mo_purge));
Ok(())
Self::post_modify_inner(qs, pre_cand, cand, &me.ident)
}
*/
#[instrument(level = "debug", name = "memberof_post_delete", skip(qs, cand, _de))]
fn post_delete(
@ -424,6 +375,49 @@ impl Plugin for MemberOf {
}
}
impl MemberOf {
fn post_modify_inner(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<EntrySealedCommitted>],
cand: &[EntrySealedCommitted],
ident: &Identity,
) -> Result<(), OperationError> {
let dyngroup_change = super::dyngroup::DynGroup::post_modify(qs, pre_cand, cand, ident)?;
// TODO: Limit this to when it's a class, member, mo, dmo change instead.
let group_affect = cand
.iter()
.map(|post| post.get_uuid())
.chain(dyngroup_change.into_iter())
.chain(
pre_cand
.iter()
.filter_map(|pre| {
if pre.attribute_equality("class", &PVCLASS_GROUP) {
pre.get_ava_as_refuuid("member")
} else {
None
}
})
.flatten(),
)
.chain(
cand.iter()
.filter_map(|post| {
if post.attribute_equality("class", &PVCLASS_GROUP) {
post.get_ava_as_refuuid("member")
} else {
None
}
})
.flatten(),
)
.collect();
apply_memberof(qs, group_affect)
}
}
#[cfg(test)]
mod tests {
use crate::prelude::*;

View file

@ -29,7 +29,7 @@ trait Plugin {
fn pre_create_transform(
_qs: &mut QueryServerWriteTransaction,
_cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_cand: &mut Vec<EntryInvalidNew>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
admin_error!(
@ -42,7 +42,7 @@ trait Plugin {
fn pre_create(
_qs: &mut QueryServerWriteTransaction,
// List of what we will commit that is valid?
_cand: &[Entry<EntrySealed, EntryNew>],
_cand: &[EntrySealedNew],
_ce: &CreateEvent,
) -> Result<(), OperationError> {
admin_error!("plugin {} has an unimplemented pre_create!", Self::id());
@ -52,7 +52,7 @@ trait Plugin {
fn post_create(
_qs: &mut QueryServerWriteTransaction,
// List of what we commited that was valid?
_cand: &[Entry<EntrySealed, EntryCommitted>],
_cand: &[EntrySealedCommitted],
_ce: &CreateEvent,
) -> Result<(), OperationError> {
admin_error!("plugin {} has an unimplemented post_create!", Self::id());
@ -61,7 +61,7 @@ trait Plugin {
fn pre_modify(
_qs: &mut QueryServerWriteTransaction,
_cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_cand: &mut Vec<EntryInvalidCommitted>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
admin_error!("plugin {} has an unimplemented pre_modify!", Self::id());
@ -71,17 +71,43 @@ trait Plugin {
fn post_modify(
_qs: &mut QueryServerWriteTransaction,
// List of what we modified that was valid?
_pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
_cand: &[Entry<EntrySealed, EntryCommitted>],
_pre_cand: &[Arc<EntrySealedCommitted>],
_cand: &[EntrySealedCommitted],
_ce: &ModifyEvent,
) -> Result<(), OperationError> {
admin_error!("plugin {} has an unimplemented post_modify!", Self::id());
Err(OperationError::InvalidState)
}
fn pre_batch_modify(
_qs: &mut QueryServerWriteTransaction,
_cand: &mut Vec<EntryInvalidCommitted>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
admin_error!(
"plugin {} has an unimplemented pre_batch_modify!",
Self::id()
);
Err(OperationError::InvalidState)
}
fn post_batch_modify(
_qs: &mut QueryServerWriteTransaction,
// List of what we modified that was valid?
_pre_cand: &[Arc<EntrySealedCommitted>],
_cand: &[EntrySealedCommitted],
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
admin_error!(
"plugin {} has an unimplemented post_batch_modify!",
Self::id()
);
Err(OperationError::InvalidState)
}
fn pre_delete(
_qs: &mut QueryServerWriteTransaction,
_cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_cand: &mut Vec<EntryInvalidCommitted>,
_de: &DeleteEvent,
) -> Result<(), OperationError> {
admin_error!("plugin {} has an unimplemented pre_delete!", Self::id());
@ -91,7 +117,7 @@ trait Plugin {
fn post_delete(
_qs: &mut QueryServerWriteTransaction,
// List of what we delete that was valid?
_cand: &[Entry<EntrySealed, EntryCommitted>],
_cand: &[EntrySealedCommitted],
_ce: &DeleteEvent,
) -> Result<(), OperationError> {
admin_error!("plugin {} has an unimplemented post_delete!", Self::id());
@ -183,6 +209,36 @@ impl Plugins {
.and_then(|_| memberof::MemberOf::post_modify(qs, pre_cand, cand, me))
}
#[instrument(level = "debug", name = "plugins::run_pre_batch_modify", skip_all)]
pub fn run_pre_batch_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
me: &BatchModifyEvent,
) -> Result<(), OperationError> {
protected::Protected::pre_batch_modify(qs, cand, me)
.and_then(|_| base::Base::pre_batch_modify(qs, cand, me))
.and_then(|_| password_import::PasswordImport::pre_batch_modify(qs, cand, me))
.and_then(|_| jwskeygen::JwsKeygen::pre_batch_modify(qs, cand, me))
.and_then(|_| gidnumber::GidNumber::pre_batch_modify(qs, cand, me))
.and_then(|_| domain::Domain::pre_batch_modify(qs, cand, me))
.and_then(|_| spn::Spn::pre_batch_modify(qs, cand, me))
.and_then(|_| session::SessionConsistency::pre_batch_modify(qs, cand, me))
// attr unique should always be last
.and_then(|_| attrunique::AttrUnique::pre_batch_modify(qs, cand, me))
}
#[instrument(level = "debug", name = "plugins::run_post_batch_modify", skip_all)]
pub fn run_post_batch_modify(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
me: &BatchModifyEvent,
) -> Result<(), OperationError> {
refint::ReferentialIntegrity::post_batch_modify(qs, pre_cand, cand, me)
.and_then(|_| spn::Spn::post_batch_modify(qs, pre_cand, cand, me))
.and_then(|_| memberof::MemberOf::post_batch_modify(qs, pre_cand, cand, me))
}
#[instrument(level = "debug", name = "plugins::run_pre_delete", skip_all)]
pub fn run_pre_delete(
qs: &mut QueryServerWriteTransaction,

View file

@ -26,6 +26,7 @@ impl Plugin for PasswordImport {
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
_ce: &CreateEvent,
) -> Result<(), OperationError> {
/*
cand.iter_mut()
.try_for_each(|e| {
// is there a password we are trying to import?
@ -62,6 +63,8 @@ impl Plugin for PasswordImport {
}
}
})
*/
Self::modify_inner(cand)
}
#[instrument(
@ -73,6 +76,23 @@ impl Plugin for PasswordImport {
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(cand)
}
#[instrument(level = "debug", name = "password_import_pre_batch_modify", skip_all)]
fn pre_batch_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(cand)
}
}
impl PasswordImport {
fn modify_inner<T: Clone>(
cand: &mut Vec<Entry<EntryInvalid, T>>,
) -> Result<(), OperationError> {
cand.iter_mut().try_for_each(|e| {
// is there a password we are trying to import?

View file

@ -57,6 +57,7 @@ impl Plugin for Protected {
|| cand.attribute_equality("class", &PVCLASS_TOMBSTONE)
|| cand.attribute_equality("class", &PVCLASS_RECYCLED)
|| cand.attribute_equality("class", &PVCLASS_DYNGROUP)
|| cand.attribute_equality("class", &PVCLASS_SYNC_OBJECT)
{
Err(OperationError::SystemProtectedObject)
} else {
@ -68,8 +69,7 @@ impl Plugin for Protected {
#[instrument(level = "debug", name = "protected_pre_modify", skip(_qs, cand, me))]
fn pre_modify(
_qs: &mut QueryServerWriteTransaction,
// Should these be EntrySealed?
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
cand: &mut Vec<EntryInvalidCommitted>,
me: &ModifyEvent,
) -> Result<(), OperationError> {
if me.ident.is_internal() {
@ -79,13 +79,13 @@ impl Plugin for Protected {
// Prevent adding class: system, domain_info, tombstone, or recycled.
me.modlist.iter().try_fold((), |(), m| match m {
Modify::Present(a, v) => {
// TODO: Can we avoid this clone?
if a == "class"
&& (v == &(*CLASS_SYSTEM)
|| v == &(*CLASS_DOMAIN_INFO)
|| v == &(*CLASS_SYSTEM_INFO)
|| v == &(*CLASS_SYSTEM_CONFIG)
|| v == &(*CLASS_DYNGROUP)
|| v == &(*CLASS_SYNC_OBJECT)
|| v == &(*CLASS_TOMBSTONE)
|| v == &(*CLASS_RECYCLED))
{
@ -103,6 +103,8 @@ impl Plugin for Protected {
if cand.attribute_equality("class", &PVCLASS_TOMBSTONE)
|| cand.attribute_equality("class", &PVCLASS_RECYCLED)
|| cand.attribute_equality("class", &PVCLASS_DYNGROUP)
// Temporary until I move this into access.rs
|| cand.attribute_equality("class", &PVCLASS_SYNC_OBJECT)
{
Err(OperationError::SystemProtectedObject)
} else {
@ -127,15 +129,104 @@ impl Plugin for Protected {
me.modlist.iter().try_fold((), |(), m| {
// Already hit an error, move on.
let a = match m {
Modify::Present(a, _) | Modify::Removed(a, _) | Modify::Purged(a) => a,
Modify::Present(a, _) | Modify::Removed(a, _) | Modify::Purged(a) => Some(a),
Modify::Assert(_, _) => None,
};
match ALLOWED_ATTRS.get(a.as_str()) {
Some(_) => Ok(()),
None => Err(OperationError::SystemProtectedObject),
if let Some(a) = a {
match ALLOWED_ATTRS.get(a.as_str()) {
Some(_) => Ok(()),
None => Err(OperationError::SystemProtectedObject),
}
} else {
// Was not a mod needing checking
Ok(())
}
})
}
fn pre_batch_modify(
_qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<EntryInvalidCommitted>,
me: &BatchModifyEvent,
) -> Result<(), OperationError> {
if me.ident.is_internal() {
trace!("Internal operation, not enforcing system object protection");
return Ok(());
}
me.modset
.values()
.flat_map(|ml| ml.iter())
.try_fold((), |(), m| match m {
Modify::Present(a, v) => {
if a == "class"
&& (v == &(*CLASS_SYSTEM)
|| v == &(*CLASS_DOMAIN_INFO)
|| v == &(*CLASS_SYSTEM_INFO)
|| v == &(*CLASS_SYSTEM_CONFIG)
|| v == &(*CLASS_DYNGROUP)
|| v == &(*CLASS_SYNC_OBJECT)
|| v == &(*CLASS_TOMBSTONE)
|| v == &(*CLASS_RECYCLED))
{
Err(OperationError::SystemProtectedObject)
} else {
Ok(())
}
}
_ => Ok(()),
})?;
// HARD block mods on tombstone or recycle. We soft block on the rest as they may
// have some allowed attrs.
cand.iter().try_fold((), |(), cand| {
if cand.attribute_equality("class", &PVCLASS_TOMBSTONE)
|| cand.attribute_equality("class", &PVCLASS_RECYCLED)
|| cand.attribute_equality("class", &PVCLASS_DYNGROUP)
// Temporary until I move this into access.rs
|| cand.attribute_equality("class", &PVCLASS_SYNC_OBJECT)
{
Err(OperationError::SystemProtectedObject)
} else {
Ok(())
}
})?;
// if class: system, check the mods are "allowed"
let system_pres = cand.iter().any(|c| {
// We don't need to check for domain info here because domain_info has a class
// system also. We just need to block it from being created.
c.attribute_equality("class", &PVCLASS_SYSTEM)
});
trace!("class: system -> {}", system_pres);
// No system types being altered, return.
if !system_pres {
return Ok(());
}
// Something altered is system, check if it's allowed.
me.modset
.values()
.flat_map(|ml| ml.iter())
.try_fold((), |(), m| {
// Already hit an error, move on.
let a = match m {
Modify::Present(a, _) | Modify::Removed(a, _) | Modify::Purged(a) => Some(a),
Modify::Assert(_, _) => None,
};
if let Some(a) = a {
match ALLOWED_ATTRS.get(a.as_str()) {
Some(_) => Ok(()),
None => Err(OperationError::SystemProtectedObject),
}
} else {
// Was not a mod needing checking
Ok(())
}
})
}
#[instrument(level = "debug", name = "protected_pre_delete", skip(_qs, cand, de))]
fn pre_delete(
_qs: &mut QueryServerWriteTransaction,
@ -156,6 +247,7 @@ impl Plugin for Protected {
|| cand.attribute_equality("class", &PVCLASS_TOMBSTONE)
|| cand.attribute_equality("class", &PVCLASS_RECYCLED)
|| cand.attribute_equality("class", &PVCLASS_DYNGROUP)
|| cand.attribute_equality("class", &PVCLASS_SYNC_OBJECT)
{
Err(OperationError::SystemProtectedObject)
} else {

View file

@ -14,11 +14,9 @@ use std::sync::Arc;
use hashbrown::HashSet as Set;
use kanidm_proto::v1::{ConsistencyError, PluginError};
use tracing::trace;
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent};
use crate::filter::f_eq;
use crate::modify::Modify;
use crate::plugins::Plugin;
use crate::prelude::*;
use crate::schema::SchemaTransaction;
@ -88,82 +86,30 @@ impl Plugin for ReferentialIntegrity {
cand: &[Entry<EntrySealed, EntryCommitted>],
_ce: &CreateEvent,
) -> Result<(), OperationError> {
let schema = qs.get_schema();
let ref_types = schema.get_reference_types();
// Fast Path
let mut vsiter = cand.iter().flat_map(|c| {
ref_types
.values()
.filter_map(move |rtype| c.get_ava_set(&rtype.name))
});
// Could check len first?
let mut i = Vec::new();
vsiter.try_for_each(|vs| {
if let Some(uuid_iter) = vs.as_ref_uuid_iter() {
uuid_iter.for_each(|u| {
i.push(PartialValue::new_uuid(u))
});
Ok(())
} else {
admin_error!(?vs, "reference value could not convert to reference uuid.");
admin_error!("If you are sure the name/uuid/spn exist, and that this is in error, you should run a verify task.");
Err(OperationError::InvalidAttribute(
"uuid could not become reference value".to_string(),
))
}
})?;
Self::check_uuids_exist(qs, i)
Self::post_modify_inner(qs, cand)
}
#[instrument(
level = "debug",
name = "refint_post_modify",
skip(qs, _pre_cand, _cand, me)
)]
#[instrument(level = "debug", name = "refint_post_modify", skip_all)]
fn post_modify(
qs: &mut QueryServerWriteTransaction,
_pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
_cand: &[Entry<EntrySealed, EntryCommitted>],
me: &ModifyEvent,
cand: &[Entry<EntrySealed, EntryCommitted>],
_me: &ModifyEvent,
) -> Result<(), OperationError> {
let schema = qs.get_schema();
let ref_types = schema.get_reference_types();
let i: Result<Vec<PartialValue>, _> = me.modlist.into_iter().filter_map(|modify| {
if let Modify::Present(a, v) = &modify {
if ref_types.get(a).is_some() {
Some(v)
} else {
None
}
} else {
None
}
})
.map(|v| {
v.to_ref_uuid()
.map(|uuid| PartialValue::new_uuid(uuid))
.ok_or_else(|| {
admin_error!(?v, "reference value could not convert to reference uuid.");
admin_error!("If you are sure the name/uuid/spn exist, and that this is in error, you should run a verify task.");
OperationError::InvalidAttribute(
"uuid could not become reference value".to_string(),
)
})
})
.collect();
let i = i?;
Self::check_uuids_exist(qs, i)
Self::post_modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "refint_post_delete", skip(qs, cand, _ce))]
#[instrument(level = "debug", name = "refint_post_batch_modify", skip_all)]
fn post_batch_modify(
qs: &mut QueryServerWriteTransaction,
_pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::post_modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "refint_post_delete", skip_all)]
fn post_delete(
qs: &mut QueryServerWriteTransaction,
cand: &[Entry<EntrySealed, EntryCommitted>],
@ -213,7 +159,7 @@ impl Plugin for ReferentialIntegrity {
})
.unzip();
qs.internal_batch_modify(pre_candidates, candidates)
qs.internal_apply_writable(pre_candidates, candidates)
}
#[instrument(level = "debug", name = "verify", skip(qs))]
@ -263,6 +209,43 @@ impl Plugin for ReferentialIntegrity {
}
}
impl ReferentialIntegrity {
fn post_modify_inner(
qs: &mut QueryServerWriteTransaction,
cand: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> {
let schema = qs.get_schema();
let ref_types = schema.get_reference_types();
// Fast Path
let mut vsiter = cand.iter().flat_map(|c| {
ref_types
.values()
.filter_map(move |rtype| c.get_ava_set(&rtype.name))
});
// Could check len first?
let mut i = Vec::with_capacity(cand.len() * 2);
vsiter.try_for_each(|vs| {
if let Some(uuid_iter) = vs.as_ref_uuid_iter() {
uuid_iter.for_each(|u| {
i.push(PartialValue::new_uuid(u))
});
Ok(())
} else {
admin_error!(?vs, "reference value could not convert to reference uuid.");
admin_error!("If you are sure the name/uuid/spn exist, and that this is in error, you should run a verify task.");
Err(OperationError::InvalidAttribute(
"uuid could not become reference value".to_string(),
))
}
})?;
Self::check_uuids_exist(qs, i)
}
}
#[cfg(test)]
mod tests {
use kanidm_proto::v1::PluginError;

View file

@ -25,6 +25,24 @@ impl Plugin for SessionConsistency {
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "session_consistency", skip_all)]
fn pre_batch_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(qs, cand)
}
}
impl SessionConsistency {
fn modify_inner<T: Clone + std::fmt::Debug>(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, T>>,
) -> Result<(), OperationError> {
let curtime = qs.get_curtime();
let curtime_odt = OffsetDateTime::unix_epoch() + curtime;

View file

@ -19,11 +19,7 @@ impl Plugin for Spn {
}
// hook on pre-create and modify to generate / validate.
#[instrument(
level = "debug",
name = "spn_pre_create_transform",
skip(qs, cand, _ce)
)]
#[instrument(level = "debug", name = "spn_pre_create_transform", skip_all)]
fn pre_create_transform(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
@ -32,68 +28,28 @@ impl Plugin for Spn {
// Always generate the spn and set it. Why? Because the effort
// needed to validate is the same as generation, so we may as well
// just generate and set blindly when required.
// Should we work out what classes dynamically from schema into a filter?
// No - types that are trust replicated are fixed.
let domain_name = qs.get_domain_name();
for e in cand.iter_mut() {
if e.attribute_equality("class", &PVCLASS_GROUP)
|| e.attribute_equality("class", &PVCLASS_ACCOUNT)
{
let spn = e
.generate_spn(domain_name)
.ok_or(OperationError::InvalidEntryState)
.map_err(|e| {
admin_error!(
"Account or group missing name, unable to generate spn!? {:?}",
e
);
e
})?;
trace!("plugin_spn: set spn to {:?}", spn);
e.set_ava("spn", once(spn));
}
}
Ok(())
Self::modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "spn_pre_modify", skip(qs, cand, _me))]
#[instrument(level = "debug", name = "spn_pre_modify", skip_all)]
fn pre_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &ModifyEvent,
) -> Result<(), OperationError> {
// Always generate and set *if* spn was an attribute on any of the mod
// list events.
let domain_name = qs.get_domain_name();
for e in cand.iter_mut() {
if e.attribute_equality("class", &PVCLASS_GROUP)
|| e.attribute_equality("class", &PVCLASS_ACCOUNT)
{
let spn = e
.generate_spn(domain_name)
.ok_or(OperationError::InvalidEntryState)
.map_err(|e| {
admin_error!(
"Account or group missing name, unable to generate spn!? {:?}",
e
);
e
})?;
trace!("plugin_spn: set spn to {:?}", spn);
e.set_ava("spn", once(spn));
}
}
Ok(())
Self::modify_inner(qs, cand)
}
#[instrument(
level = "debug",
name = "spn_post_modify",
skip(qs, pre_cand, cand, _ce)
)]
#[instrument(level = "debug", name = "spn_pre_batch_modify", skip_all)]
fn pre_batch_modify(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
_me: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::modify_inner(qs, cand)
}
#[instrument(level = "debug", name = "spn_post_modify", skip_all)]
fn post_modify(
qs: &mut QueryServerWriteTransaction,
// List of what we modified that was valid?
@ -101,40 +57,18 @@ impl Plugin for Spn {
cand: &[Entry<EntrySealed, EntryCommitted>],
_ce: &ModifyEvent,
) -> Result<(), OperationError> {
// On modify, if changing domain_name on UUID_DOMAIN_INFO
// trigger the spn regen ... which is expensive. Future
// TODO #157: will be improvements to modify on large txns.
Self::post_modify_inner(qs, pre_cand, cand)
}
let domain_name_changed = cand.iter().zip(pre_cand.iter()).find_map(|(post, pre)| {
let domain_name = post.get_ava_single("domain_name");
if post.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)
&& domain_name != pre.get_ava_single("domain_name")
{
domain_name
} else {
None
}
});
let domain_name = match domain_name_changed {
Some(s) => s,
None => return Ok(()),
};
admin_info!(
"IMPORTANT!!! Changing domain name to \"{:?}\". THIS MAY TAKE A LONG TIME ...",
domain_name
);
// All we do is purge spn, and allow the plugin to recreate. Neat! It's also all still
// within the transaction, just incase!
qs.internal_modify(
&filter!(f_or!([
f_eq("class", PVCLASS_GROUP.clone()),
f_eq("class", PVCLASS_ACCOUNT.clone())
])),
&modlist!([m_purge("spn")]),
)
#[instrument(level = "debug", name = "spn_post_batch_modify", skip_all)]
fn post_batch_modify(
qs: &mut QueryServerWriteTransaction,
// List of what we modified that was valid?
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
_ce: &BatchModifyEvent,
) -> Result<(), OperationError> {
Self::post_modify_inner(qs, pre_cand, cand)
}
#[instrument(level = "debug", name = "spn_verify", skip(qs))]
@ -198,6 +132,76 @@ impl Plugin for Spn {
}
}
impl Spn {
fn modify_inner<T: Clone + std::fmt::Debug>(
qs: &mut QueryServerWriteTransaction,
cand: &mut Vec<Entry<EntryInvalid, T>>,
) -> Result<(), OperationError> {
let domain_name = qs.get_domain_name();
for ent in cand.iter_mut() {
if ent.attribute_equality("class", &PVCLASS_GROUP)
|| ent.attribute_equality("class", &PVCLASS_ACCOUNT)
{
let spn = ent
.generate_spn(domain_name)
.ok_or(OperationError::InvalidEntryState)
.map_err(|e| {
admin_error!(
"Account or group missing name, unable to generate spn!? {:?} entry_id = {:?}",
e, ent.get_uuid()
);
e
})?;
trace!("plugin_spn: set spn to {:?}", spn);
ent.set_ava("spn", once(spn));
}
}
Ok(())
}
fn post_modify_inner(
qs: &mut QueryServerWriteTransaction,
pre_cand: &[Arc<Entry<EntrySealed, EntryCommitted>>],
cand: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> {
// On modify, if changing domain_name on UUID_DOMAIN_INFO
// trigger the spn regen ... which is expensive. Future
// TODO #157: will be improvements to modify on large txns.
let domain_name_changed = cand.iter().zip(pre_cand.iter()).find_map(|(post, pre)| {
let domain_name = post.get_ava_single("domain_name");
if post.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)
&& domain_name != pre.get_ava_single("domain_name")
{
domain_name
} else {
None
}
});
let domain_name = match domain_name_changed {
Some(s) => s,
None => return Ok(()),
};
admin_info!(
"IMPORTANT!!! Changing domain name to \"{:?}\". THIS MAY TAKE A LONG TIME ...",
domain_name
);
// All we do is purge spn, and allow the plugin to recreate. Neat! It's also all still
// within the transaction, just incase!
qs.internal_modify(
&filter!(f_or!([
f_eq("class", PVCLASS_GROUP.clone()),
f_eq("class", PVCLASS_ACCOUNT.clone())
])),
&modlist!([m_purge("spn")]),
)
}
}
#[cfg(test)]
mod tests {
use crate::prelude::*;

View file

@ -66,6 +66,7 @@ enum Transition {
ModifyPurge(AttrString),
ModifyPresent(AttrString, Box<Value>),
ModifyRemoved(AttrString, Box<PartialValue>),
ModifyAssert(AttrString, Box<PartialValue>),
Recycle,
Revive,
Tombstone(Eattrs),
@ -78,6 +79,7 @@ impl fmt::Display for Transition {
Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a),
Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a),
Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a),
Transition::ModifyAssert(a, _) => write!(f, "ModifyAssert({})", a),
Transition::Recycle => write!(f, "Recycle"),
Transition::Revive => write!(f, "Revive"),
Transition::Tombstone(_) => write!(f, "Tombstone"),
@ -123,6 +125,20 @@ impl State {
attrs.remove(attr);
};
}
(State::Live(ref mut attrs), Transition::ModifyAssert(attr, value)) => {
trace!("Live + ModifyAssert({}) -> Live", attr);
if attrs
.get(attr)
.map(|vs| vs.contains(&value))
.unwrap_or(false)
{
// Valid
} else {
warn!("{} + {:?} -> Assertion not met - REJECTING", attr, value);
return Err(state);
}
}
(State::Live(attrs), Transition::Recycle) => {
trace!("Live + Recycle -> Recycled");
state = State::Recycled(attrs.clone());
@ -275,6 +291,23 @@ impl EntryChangelog {
.for_each(|t| change.s.push(t));
}
pub fn assert_ava(&mut self, cid: &Cid, attr: &str, value: PartialValue) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
#[allow(clippy::expect_used)]
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::ModifyAssert(
AttrString::from(attr),
Box::new(value),
))
}
pub fn purge_ava(&mut self, cid: &Cid, attr: &str) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });

View file

@ -78,7 +78,7 @@ pub struct SchemaReadTransaction {
/// [`Entry`]: ../entry/index.html
/// [`indexed`]: ../value/enum.IndexType.html
/// [`syntax`]: ../value/enum.SyntaxType.html
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Default)]
pub struct SchemaAttribute {
// Is this ... used?
// class: Vec<String>,
@ -89,6 +89,7 @@ pub struct SchemaAttribute {
pub multivalue: bool,
pub unique: bool,
pub phantom: bool,
pub sync_allowed: bool,
pub index: Vec<IndexType>,
pub syntax: SyntaxType,
}
@ -136,6 +137,9 @@ impl SchemaAttribute {
OperationError::InvalidSchemaState("missing unique".to_string())
})?;
let phantom = value.get_ava_single_bool("phantom").unwrap_or(false);
let sync_allowed = value.get_ava_single_bool("sync_allowed").unwrap_or(false);
// index vec
// even if empty, it SHOULD be present ... (is that valid to put an empty set?)
// The get_ava_opt_index handles the optional case for us :)
@ -156,6 +160,7 @@ impl SchemaAttribute {
multivalue,
unique,
phantom,
sync_allowed,
index,
syntax,
})
@ -306,6 +311,7 @@ pub struct SchemaClass {
pub name: AttrString,
pub uuid: Uuid,
pub description: String,
pub sync_allowed: bool,
/// This allows modification of system types to be extended in custom ways
pub systemmay: Vec<AttrString>,
pub may: Vec<AttrString>,
@ -352,6 +358,8 @@ impl SchemaClass {
OperationError::InvalidSchemaState("missing description".to_string())
})?;
let sync_allowed = value.get_ava_single_bool("sync_allowed").unwrap_or(false);
// These are all "optional" lists of strings.
let systemmay = value
.get_ava_iter_iutf8("systemmay")
@ -391,6 +399,7 @@ impl SchemaClass {
name,
uuid,
description,
sync_allowed,
systemmay,
may,
systemmust,
@ -659,6 +668,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality, IndexType::Presence],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -674,6 +684,7 @@ impl<'a> SchemaWriteTransaction<'a> {
// needing to check recycled objects too.
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality, IndexType::Presence],
syntax: SyntaxType::Uuid,
},
@ -689,6 +700,7 @@ impl<'a> SchemaWriteTransaction<'a> {
// needing to check recycled objects too.
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Cid,
},
@ -702,6 +714,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: true,
phantom: false,
sync_allowed: true,
index: vec![IndexType::Equality, IndexType::Presence],
syntax: SyntaxType::Utf8StringIname,
},
@ -717,6 +730,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: true,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::SecurityPrincipalName,
},
@ -730,6 +744,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: true,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -743,6 +758,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: true,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -756,6 +772,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: true,
index: vec![],
syntax: SyntaxType::Utf8String,
},
@ -767,6 +784,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Boolean,
});
@ -777,6 +795,18 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Boolean,
});
self.attributes.insert(AttrString::from("sync_allowed"), SchemaAttribute {
name: AttrString::from("sync_allowed"),
uuid: UUID_SCHEMA_ATTR_SYNC_ALLOWED,
description: String::from("If true, this attribute or class can by synchronised by an external scim import"),
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Boolean,
});
@ -791,6 +821,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Boolean,
},
@ -806,6 +837,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::IndexId,
},
@ -821,6 +853,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::SyntaxId,
},
@ -836,6 +869,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -851,6 +885,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -866,6 +901,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -881,6 +917,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -896,6 +933,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -911,6 +949,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -926,6 +965,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -941,6 +981,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -957,6 +998,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Boolean,
},
@ -973,6 +1015,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality, IndexType::SubString],
syntax: SyntaxType::JsonFilter,
},
@ -988,6 +1031,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1004,6 +1048,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality, IndexType::SubString],
syntax: SyntaxType::JsonFilter,
},
@ -1019,6 +1064,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1032,6 +1078,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1047,6 +1094,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1063,6 +1111,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1078,6 +1127,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1091,6 +1141,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1105,6 +1156,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1118,6 +1170,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1131,6 +1184,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: true,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
@ -1147,6 +1201,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Uint32,
},
@ -1161,6 +1216,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringIname,
},
@ -1176,6 +1232,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1191,6 +1248,55 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
);
// External Scim Sync
self.attributes.insert(
AttrString::from("sync_external_id"),
SchemaAttribute {
name: AttrString::from("sync_external_id"),
uuid: UUID_SCHEMA_ATTR_SYNC_EXTERNAL_ID,
description: String::from(
"An external string ID of an entry imported from a sync agreement",
),
multivalue: false,
unique: true,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
},
);
self.attributes.insert(
AttrString::from("sync_parent_uuid"),
SchemaAttribute {
name: AttrString::from("sync_parent_uuid"),
uuid: UUID_SCHEMA_ATTR_SYNC_PARENT_UUID,
description: String::from(
"The UUID of the parent sync agreement that created this entry.",
),
multivalue: false,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::ReferenceUuid,
},
);
self.attributes.insert(
AttrString::from("sync_class"),
SchemaAttribute {
name: AttrString::from("sync_class"),
uuid: UUID_SCHEMA_ATTR_SYNC_CLASS,
description: String::from("The set of classes requested by the sync client."),
multivalue: true,
unique: false,
phantom: false,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1202,9 +1308,10 @@ impl<'a> SchemaWriteTransaction<'a> {
name: AttrString::from("password_import"),
uuid: UUID_SCHEMA_ATTR_PASSWORD_IMPORT,
description: String::from("An imported password hash from an external system."),
multivalue: true,
multivalue: false,
unique: false,
phantom: true,
sync_allowed: true,
index: vec![],
syntax: SyntaxType::Utf8String,
},
@ -1220,6 +1327,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1233,6 +1341,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1246,6 +1355,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Uuid,
},
@ -1259,6 +1369,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringInsensitive,
},
@ -1272,6 +1383,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Utf8StringIname,
},
@ -1285,6 +1397,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::SshKey,
},
@ -1298,6 +1411,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::SshKey,
},
@ -1311,6 +1425,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::EmailAddress,
},
@ -1324,6 +1439,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: true,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::EmailAddress,
},
@ -1337,6 +1453,7 @@ impl<'a> SchemaWriteTransaction<'a> {
multivalue: false,
unique: false,
phantom: true,
sync_allowed: false,
index: vec![],
syntax: SyntaxType::Uint32,
},
@ -1349,7 +1466,11 @@ impl<'a> SchemaWriteTransaction<'a> {
name: AttrString::from("attributetype"),
uuid: UUID_SCHEMA_CLASS_ATTRIBUTETYPE,
description: String::from("Definition of a schema attribute"),
systemmay: vec![AttrString::from("phantom"), AttrString::from("index")],
systemmay: vec![
AttrString::from("phantom"),
AttrString::from("sync_allowed"),
AttrString::from("index"),
],
systemmust: vec![
AttrString::from("class"),
AttrString::from("attributename"),
@ -1369,6 +1490,7 @@ impl<'a> SchemaWriteTransaction<'a> {
uuid: UUID_SCHEMA_CLASS_CLASSTYPE,
description: String::from("Definition of a schema classtype"),
systemmay: vec![
AttrString::from("sync_allowed"),
AttrString::from("systemmay"),
AttrString::from("may"),
AttrString::from("systemmust"),
@ -1535,14 +1657,31 @@ impl<'a> SchemaWriteTransaction<'a> {
},
);
self.classes.insert(
AttrString::from("system"),
SchemaClass {
name: AttrString::from("system"),
uuid: UUID_SCHEMA_CLASS_SYSTEM,
description: String::from("A class denoting that a type is system generated and protected. It has special internal behaviour."),
.. Default::default()
},
);
AttrString::from("system"),
SchemaClass {
name: AttrString::from("system"),
uuid: UUID_SCHEMA_CLASS_SYSTEM,
description: String::from("A class denoting that a type is system generated and protected. It has special internal behaviour."),
.. Default::default()
},
);
self.classes.insert(
AttrString::from("sync_object"),
SchemaClass {
name: AttrString::from("sync_object"),
uuid: UUID_SCHEMA_CLASS_SYNC_OBJECT,
description: String::from("A class denoting that an entry is synchronised from an external source. This entry may not be modifiable."),
systemmust: vec![
AttrString::from("uuid"),
AttrString::from("sync_parent_uuid")
],
systemmay: vec![
AttrString::from("sync_external_id"),
AttrString::from("sync_class"),
],
.. Default::default()
},
);
let r = self.validate();
if r.is_empty() {
@ -1912,11 +2051,9 @@ mod tests {
name: AttrString::from("single_value"),
uuid: Uuid::new_v4(),
description: String::from(""),
multivalue: false,
unique: false,
phantom: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8StringInsensitive,
..Default::default()
};
let r1 = single_value_string.validate_ava("single_value", &(vs_iutf8!["test"] as _));
@ -1939,10 +2076,9 @@ mod tests {
uuid: Uuid::new_v4(),
description: String::from(""),
multivalue: true,
unique: false,
phantom: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Utf8String,
..Default::default()
};
let rvs = vs_utf8!["test1".to_string(), "test2".to_string()] as _;
@ -1955,10 +2091,9 @@ mod tests {
uuid: Uuid::new_v4(),
description: String::from(""),
multivalue: true,
unique: false,
phantom: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::Boolean,
..Default::default()
};
// Since valueset now disallows such shenangians at a type level, this can't occur
@ -1987,11 +2122,9 @@ mod tests {
name: AttrString::from("sv_syntax"),
uuid: Uuid::new_v4(),
description: String::from(""),
multivalue: false,
unique: false,
phantom: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::SyntaxId,
..Default::default()
};
let rvs = vs_syntax![SyntaxType::try_from("UTF8STRING").unwrap()] as _;
@ -2010,11 +2143,9 @@ mod tests {
name: AttrString::from("sv_index"),
uuid: Uuid::new_v4(),
description: String::from(""),
multivalue: false,
unique: false,
phantom: false,
index: vec![IndexType::Equality],
syntax: SyntaxType::IndexId,
..Default::default()
};
//
let rvs = vs_index![IndexType::try_from("EQUALITY").unwrap()] as _;

View file

@ -0,0 +1,311 @@
use super::QueryServerWriteTransaction;
use crate::prelude::*;
// use std::collections::BTreeMap;
use crate::access::AccessControlsTransaction;
use crate::server::Plugins;
use hashbrown::HashMap;
pub type ModSetValid = HashMap<Uuid, ModifyList<ModifyValid>>;
pub struct BatchModifyEvent {
pub ident: Identity,
pub modset: ModSetValid,
}
impl<'a> QueryServerWriteTransaction<'a> {
/// This function behaves different to modify. Modify applies the same
/// modification operation en-mass to 1 -> N entries. This takes a set of modifications
/// that define a precise entry to apply a change to and only modifies that.
///
/// modify is for all entries matching this condition, do this change.
///
/// batch_modify is for entry X apply mod A, for entry Y apply mod B etc. It allows you
/// to do per-entry mods.
///
/// The drawback is you need to know ahead of time what uuids you are affecting. This
/// has parallels to scim, so it's not a significant issue.
///
/// Otherwise, we follow the same pattern here as modify, and inside the transform
/// the same modlists are used.
#[instrument(level = "debug", skip_all)]
pub fn batch_modify(&mut self, me: &BatchModifyEvent) -> Result<(), OperationError> {
// ⚠️ =========
// Effectively this is the same as modify but instead of apply modlist
// we do it by uuid.
// Get the candidates.
// Modify applies a modlist to a filter, so we need to internal search
// then apply.
if !me.ident.is_internal() {
security_info!(name = %me.ident, "batch modify initiator");
}
// Validate input.
// Is the modlist non zero?
if me.modset.is_empty() {
request_error!("empty modify request");
return Err(OperationError::EmptyRequest);
}
let filter_or = me
.modset
.keys()
.copied()
.map(|u| f_eq("uuid", PartialValue::new_uuid(u)))
.collect();
let filter = filter_all!(f_or(filter_or))
.validate(self.get_schema())
.map_err(OperationError::SchemaViolation)?;
// This also checks access controls due to use of the impersonation.
let pre_candidates = self
.impersonate_search_valid(filter.clone(), filter.clone(), &me.ident)
.map_err(|e| {
admin_error!("error in pre-candidate selection {:?}", e);
e
})?;
if pre_candidates.is_empty() {
if me.ident.is_internal() {
trace!("no candidates match filter ... continuing {:?}", filter);
return Ok(());
} else {
request_error!("no candidates match modset request, failure {:?}", filter);
return Err(OperationError::NoMatchingEntries);
}
};
if pre_candidates.len() != me.modset.len() {
error!("Inconsistent modify, some uuids were not found in request.");
return Err(OperationError::MissingEntries);
}
trace!("pre_candidates -> {:?}", pre_candidates);
trace!("modset -> {:?}", me.modset);
// Are we allowed to make the changes we want to?
// modify_allow_operation
let access = self.get_accesscontrols();
let op_allow = access
.batch_modify_allow_operation(me, &pre_candidates)
.map_err(|e| {
admin_error!("Unable to check batch modify access {:?}", e);
e
})?;
if !op_allow {
return Err(OperationError::AccessDenied);
}
// Clone a set of writeables.
// Apply the modlist -> Remember, we have a set of origs
// and the new modified ents.
// =========
// The primary difference to modify is here - notice we do per-uuid mods.
let mut candidates = pre_candidates
.iter()
.map(|er| {
let u = er.get_uuid();
let mut ent_mut = er.as_ref().clone().invalidate(self.cid.clone());
me.modset
.get(&u)
.ok_or_else(|| {
error!("No entry for uuid {} was found, aborting", u);
OperationError::NoMatchingEntries
})
.and_then(|modlist| {
ent_mut
.apply_modlist(modlist)
// Return if success
.map(|()| ent_mut)
// Error log otherwise.
.map_err(|e| {
error!("Modification failed for {}", u);
e
})
})
})
.collect::<Result<Vec<EntryInvalidCommitted>, _>>()?;
// Did any of the candidates now become masked?
if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) {
admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine.");
return Err(OperationError::AccessDenied);
}
// Pre mod plugins
// We should probably supply the pre-post cands here.
Plugins::run_pre_batch_modify(self, &mut candidates, me).map_err(|e| {
admin_error!("Pre-Modify operation failed (plugin), {:?}", e);
e
})?;
let norm_cand = candidates
.into_iter()
.map(|entry| {
entry
.validate(&self.schema)
.map_err(|e| {
admin_error!("Schema Violation in validation of modify_pre_apply {:?}", e);
OperationError::SchemaViolation(e)
})
.map(|entry| entry.seal(&self.schema))
})
.collect::<Result<Vec<EntrySealedCommitted>, _>>()?;
// Backend Modify
self.be_txn
.modify(&self.cid, &pre_candidates, &norm_cand)
.map_err(|e| {
admin_error!("Modify operation failed (backend), {:?}", e);
e
})?;
// Post Plugins
//
// memberOf actually wants the pre cand list and the norm_cand list to see what
// changed. Could be optimised, but this is correct still ...
Plugins::run_post_batch_modify(self, &pre_candidates, &norm_cand, me).map_err(|e| {
admin_error!("Post-Modify operation failed (plugin), {:?}", e);
e
})?;
// We have finished all plugs and now have a successful operation - flag if
// schema or acp requires reload. Remember, this is a modify, so we need to check
// pre and post cands.
if !self.changed_schema.get() {
self.changed_schema.set(
norm_cand
.iter()
.chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| {
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
}),
)
}
if !self.changed_acp.get() {
self.changed_acp.set(
norm_cand
.iter()
.chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| e.attribute_equality("class", &PVCLASS_ACP)),
)
}
if !self.changed_oauth2.get() {
self.changed_oauth2.set(
norm_cand
.iter()
.chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)),
)
}
if !self.changed_domain.get() {
self.changed_domain.set(
norm_cand
.iter()
.chain(pre_candidates.iter().map(|e| e.as_ref()))
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)),
)
}
let cu = self.changed_uuid.as_ptr();
unsafe {
(*cu).extend(
norm_cand
.iter()
.map(|e| e.get_uuid())
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
);
}
trace!(
schema_reload = ?self.changed_schema,
acp_reload = ?self.changed_acp,
oauth2_reload = ?self.changed_oauth2,
domain_reload = ?self.changed_domain,
);
// return
if me.ident.is_internal() {
trace!("Modify operation success");
} else {
admin_info!("Modify operation success");
}
Ok(())
}
pub fn internal_batch_modify(
&mut self,
mods_iter: impl Iterator<Item = (Uuid, ModifyList<ModifyInvalid>)>,
) -> Result<(), OperationError> {
let modset = mods_iter
.map(|(u, ml)| {
ml.validate(self.get_schema())
.map(|modlist| (u, modlist))
.map_err(OperationError::SchemaViolation)
})
.collect::<Result<ModSetValid, _>>()?;
let bme = BatchModifyEvent {
ident: Identity::from_internal(),
modset,
};
self.batch_modify(&bme)
}
}
#[cfg(test)]
mod tests {
use crate::prelude::*;
#[qs_test]
async fn test_batch_modify_basic(server: &QueryServer) {
let mut server_txn = server.write(duration_from_epoch_now()).await;
// Setup entries.
let uuid_a = Uuid::new_v4();
let uuid_b = Uuid::new_v4();
assert!(server_txn
.internal_create(vec![
entry_init!(
("class", Value::new_class("object")),
("uuid", Value::Uuid(uuid_a))
),
entry_init!(
("class", Value::new_class("object")),
("uuid", Value::Uuid(uuid_b))
),
])
.is_ok());
// Do a batch mod.
assert!(server_txn
.internal_batch_modify(
[
(
uuid_a,
ModifyList::new_append("description", Value::Utf8("a".into()))
),
(
uuid_b,
ModifyList::new_append("description", Value::Utf8("b".into()))
),
]
.into_iter()
)
.is_ok());
// Now check them
let ent_a = server_txn
.internal_search_uuid(&uuid_a)
.expect("Failed to get entry.");
let ent_b = server_txn
.internal_search_uuid(&uuid_b)
.expect("Failed to get entry.");
assert!(ent_a.get_ava_single_utf8("description") == Some("a"));
assert!(ent_b.get_ava_single_utf8("description") == Some("b"));
}
}

View file

@ -0,0 +1 @@

View file

@ -0,0 +1 @@

View file

@ -38,6 +38,12 @@ use crate::schema::{
};
use crate::valueset::uuid_to_proto_string;
pub mod batch_modify;
pub mod create;
pub mod delete;
pub mod modify;
pub mod search;
const RESOLVE_FILTER_CACHE_MAX: usize = 4096;
const RESOLVE_FILTER_CACHE_LOCAL: usize = 0;
@ -261,32 +267,27 @@ pub trait QueryServerTransaction<'a> {
})
}
// Should this actually be names_to_uuids and we do batches?
// In the initial design "no", we can always write a batched
// interface later.
//
// The main question is if we need association between the name and
// the request uuid - if we do, we need singular. If we don't, we can
// just do the batching.
//
// Filter conversion likely needs 1:1, due to and/or conversions
// but create/mod likely doesn't due to the nature of the attributes.
//
// In the end, singular is the simple and correct option, so lets do
// that first, and we can add batched (and cache!) later.
//
// Remember, we don't care if the name is invalid, because search
// will validate/normalise the filter we construct for us. COOL!
fn name_to_uuid(&self, name: &str) -> Result<Uuid, OperationError> {
// Is it just a uuid?
Uuid::parse_str(name).or_else(|_| {
let lname = name.to_lowercase();
self.get_be_txn()
.name2uuid(lname.as_str())?
.ok_or(OperationError::NoMatchingEntries) // should we log this?
.ok_or(OperationError::NoMatchingEntries)
})
}
// Similar to name, but where we lookup from external_id instead.
fn sync_external_id_to_uuid(&self, external_id: &str) -> Result<Option<Uuid>, OperationError> {
// Is it just a uuid?
Uuid::parse_str(external_id)
.map(|uuid| Some(uuid))
.or_else(|_| {
let lname = external_id.to_lowercase();
self.get_be_txn().externalid2uuid(lname.as_str())
})
}
fn uuid_to_spn(&self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
let r = self.get_be_txn().uuid2spn(uuid)?;
@ -460,35 +461,18 @@ pub trait QueryServerTransaction<'a> {
SyntaxType::IndexId => Value::new_indexs(value)
.ok_or_else(|| OperationError::InvalidAttribute("Invalid Index syntax".to_string())),
SyntaxType::Uuid => {
// It's a uuid - we do NOT check for existance, because that
// could be revealing or disclosing - it is up to acp to assert
// if we can see the value or not, and it's not up to us to
// assert the filter value exists.
Value::new_uuids(value)
.or_else(|| {
// it's not a uuid, try to resolve it.
// if the value is NOT found, we map to "does not exist" to allow
// the value to continue being evaluated, which of course, will fail
// all subsequent filter tests because it ... well, doesn't exist.
let un = self
.name_to_uuid( value)
.unwrap_or(UUID_DOES_NOT_EXIST);
Some(Value::new_uuid(un))
})
// I think this is unreachable due to how the .or_else works.
.ok_or_else(|| OperationError::InvalidAttribute("Invalid UUID syntax".to_string()))
// Attempt to resolve this name to a uuid. If it's already a uuid, then
// name to uuid will "do the right thing" and give us the Uuid back.
let un = self
.name_to_uuid(value)
.unwrap_or(UUID_DOES_NOT_EXIST);
Ok(Value::Uuid(un))
}
SyntaxType::ReferenceUuid => {
// See comments above.
Value::new_refer_s(value)
.or_else(|| {
let un = self
.name_to_uuid( value)
.unwrap_or(UUID_DOES_NOT_EXIST);
Some(Value::new_refer(un))
})
// I think this is unreachable due to how the .or_else works.
.ok_or_else(|| OperationError::InvalidAttribute("Invalid Reference syntax".to_string()))
let un = self
.name_to_uuid(value)
.unwrap_or(UUID_DOES_NOT_EXIST);
Ok(Value::Refer(un))
}
SyntaxType::JsonFilter => Value::new_json_filter_s(value)
.ok_or_else(|| OperationError::InvalidAttribute("Invalid Filter syntax".to_string())),
@ -553,31 +537,8 @@ pub trait QueryServerTransaction<'a> {
OperationError::InvalidAttribute("Invalid Index syntax".to_string())
}),
SyntaxType::Uuid => {
PartialValue::new_uuids(value)
.or_else(|| {
// it's not a uuid, try to resolve it.
// if the value is NOT found, we map to "does not exist" to allow
// the value to continue being evaluated, which of course, will fail
// all subsequent filter tests because it ... well, doesn't exist.
let un = self.name_to_uuid(value).unwrap_or(UUID_DOES_NOT_EXIST);
Some(PartialValue::new_uuid(un))
})
// I think this is unreachable due to how the .or_else works.
.ok_or_else(|| {
OperationError::InvalidAttribute("Invalid UUID syntax".to_string())
})
// This avoids having unreachable code:
// Ok(PartialValue::new_uuids(value)
// .unwrap_or_else(|| {
// // it's not a uuid, try to resolve it.
// // if the value is NOT found, we map to "does not exist" to allow
// // the value to continue being evaluated, which of course, will fail
// // all subsequent filter tests because it ... well, doesn't exist.
// let un = self
// .name_to_uuid( value)
// .unwrap_or(*UUID_DOES_NOT_EXIST);
// PartialValue::new_uuid(un)
// }))
let un = self.name_to_uuid(value).unwrap_or(UUID_DOES_NOT_EXIST);
Ok(PartialValue::Uuid(un))
}
// ⚠️ Any types here need to also be added to update_attributes in
// schema.rs for reference type / cache awareness during referential
@ -586,19 +547,8 @@ pub trait QueryServerTransaction<'a> {
| SyntaxType::OauthScopeMap
| SyntaxType::Session
| SyntaxType::Oauth2Session => {
// See comments above.
PartialValue::new_refer_s(value)
.or_else(|| {
let un = self.name_to_uuid(value).unwrap_or(UUID_DOES_NOT_EXIST);
Some(PartialValue::new_refer(un))
})
// I think this is unreachable due to how the .or_else works.
// See above case for how to avoid having unreachable code
.ok_or_else(|| {
OperationError::InvalidAttribute(
"Invalid Reference syntax".to_string(),
)
})
let un = self.name_to_uuid(value).unwrap_or(UUID_DOES_NOT_EXIST);
Ok(PartialValue::Refer(un))
}
SyntaxType::JsonFilter => {
PartialValue::new_json_filter_s(value).ok_or_else(|| {
@ -1205,7 +1155,10 @@ impl<'a> QueryServerWriteTransaction<'a> {
security_info!(name = %ce.ident, "create initiator");
}
// Log the request
if ce.entries.is_empty() {
request_error!("create: empty create request");
return Err(OperationError::EmptyRequest);
}
// TODO #67: Do we need limits on number of creates, or do we constraint
// based on request size in the frontend?
@ -1852,9 +1805,12 @@ impl<'a> QueryServerWriteTransaction<'a> {
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
.collect();
candidates
.iter_mut()
.for_each(|er| er.apply_modlist(&me.modlist));
candidates.iter_mut().try_for_each(|er| {
er.apply_modlist(&me.modlist).map_err(|e| {
error!("Modification failed for {:?}", er.get_uuid());
e
})
})?;
trace!("modify: candidates -> {:?}", candidates);
@ -2001,9 +1957,9 @@ impl<'a> QueryServerWriteTransaction<'a> {
}
}
/// Used in conjunction with internal_batch_modify, to get a pre/post
/// Used in conjunction with internal_apply_writable, to get a pre/post
/// pair, where post is pre-configured with metadata to allow
/// modificiation before submit back to internal_batch_modify
/// modificiation before submit back to internal_apply_writable
#[instrument(level = "debug", skip_all)]
pub(crate) fn internal_search_writeable(
&self,
@ -2030,7 +1986,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
/// probably want modify instead.
#[allow(clippy::needless_pass_by_value)]
#[instrument(level = "debug", skip_all)]
pub(crate) fn internal_batch_modify(
pub(crate) fn internal_apply_writable(
&self,
pre_candidates: Vec<Arc<EntrySealedCommitted>>,
candidates: Vec<Entry<EntryInvalid, EntryCommitted>>,
@ -2041,7 +1997,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
}
if pre_candidates.len() != candidates.len() {
admin_error!("internal_batch_modify - cand lengths differ");
admin_error!("internal_apply_writable - cand lengths differ");
return Err(OperationError::InvalidRequestState);
}
@ -2051,7 +2007,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
e.validate(&self.schema)
.map_err(|e| {
admin_error!(
"Schema Violation in internal_batch_modify validate: {:?}",
"Schema Violation in internal_apply_writable validate: {:?}",
e
);
OperationError::SchemaViolation(e)
@ -2376,8 +2332,6 @@ impl<'a> QueryServerWriteTransaction<'a> {
&mut self,
entries: Vec<Entry<EntryInit, EntryNew>>,
) -> Result<(), OperationError> {
// Start the audit scope
// Create the CreateEvent
let ce = CreateEvent::new_internal(entries);
self.create(&ce)
}
@ -2418,6 +2372,22 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.modify(&me)
}
pub fn internal_modify_uuid(
&mut self,
target_uuid: Uuid,
modlist: &ModifyList<ModifyInvalid>,
) -> Result<(), OperationError> {
let filter = filter!(f_eq("uuid", PartialValue::new_uuid(target_uuid)));
let f_valid = filter
.validate(self.get_schema())
.map_err(OperationError::SchemaViolation)?;
let m_valid = modlist
.validate(self.get_schema())
.map_err(OperationError::SchemaViolation)?;
let me = ModifyEvent::new_internal(f_valid, m_valid);
self.modify(&me)
}
pub fn impersonate_modify_valid(
&mut self,
f_valid: Filter<FilterValid>,
@ -3245,8 +3215,6 @@ impl<'a> QueryServerWriteTransaction<'a> {
}
}
// Auth requests? How do we structure these ...
#[cfg(test)]
mod tests {
use std::sync::Arc;
@ -3457,6 +3425,44 @@ mod tests {
assert!(server_txn.commit().is_ok());
}
#[qs_test]
async fn test_modify_assert(server: &QueryServer) {
let mut server_txn = server.write(duration_from_epoch_now()).await;
let t_uuid = Uuid::new_v4();
let r_uuid = Uuid::new_v4();
assert!(server_txn
.internal_create(vec![entry_init!(
("class", Value::new_class("object")),
("uuid", Value::Uuid(t_uuid))
),])
.is_ok());
// This assertion will FAIL
assert!(matches!(
server_txn.internal_modify_uuid(
t_uuid,
&ModifyList::new_list(vec![
m_assert("uuid", &PartialValue::Uuid(r_uuid)),
m_pres("description", &Value::Utf8("test".into()))
])
),
Err(OperationError::ModifyAssertionFailed)
));
// This assertion will PASS
assert!(server_txn
.internal_modify_uuid(
t_uuid,
&ModifyList::new_list(vec![
m_assert("uuid", &PartialValue::Uuid(t_uuid)),
m_pres("description", &Value::Utf8("test".into()))
])
)
.is_ok());
}
#[qs_test]
async fn test_modify_invalid_class(server: &QueryServer) {
// Test modifying an entry and adding an extra class, that would cause the entry
@ -3917,20 +3923,17 @@ mod tests {
async fn test_name_to_uuid(server: &QueryServer) {
let mut server_txn = server.write(duration_from_epoch_now()).await;
let e1 = entry_init!(
("class", Value::new_class("object")),
("class", Value::new_class("person")),
("name", Value::new_iname("testperson1")),
(
"uuid",
Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid")
),
("description", Value::new_utf8s("testperson1")),
("displayname", Value::new_utf8s("testperson1"))
);
let ce = CreateEvent::new_internal(vec![e1]);
let cr = server_txn.create(&ce);
assert!(cr.is_ok());
let t_uuid = Uuid::new_v4();
assert!(server_txn
.internal_create(vec![entry_init!(
("class", Value::new_class("object")),
("class", Value::new_class("person")),
("name", Value::new_iname("testperson1")),
("uuid", Value::Uuid(t_uuid)),
("description", Value::new_utf8s("testperson1")),
("displayname", Value::new_utf8s("testperson1"))
),])
.is_ok());
// Name doesn't exist
let r1 = server_txn.name_to_uuid("testpers");
@ -3940,10 +3943,38 @@ mod tests {
assert!(r2.is_err());
// Name does exist
let r3 = server_txn.name_to_uuid("testperson1");
assert!(r3.is_ok());
assert!(r3 == Ok(t_uuid));
// Name is not syntax normalised (but exists)
let r4 = server_txn.name_to_uuid("tEsTpErSoN1");
assert!(r4.is_ok());
assert!(r4 == Ok(t_uuid));
}
#[qs_test]
async fn test_external_id_to_uuid(server: &QueryServer) {
let mut server_txn = server.write(duration_from_epoch_now()).await;
let t_uuid = Uuid::new_v4();
assert!(server_txn
.internal_create(vec![entry_init!(
("class", Value::new_class("object")),
("class", Value::new_class("extensibleobject")),
("uuid", Value::Uuid(t_uuid)),
("sync_external_id", Value::new_iutf8("uid=testperson"))
),])
.is_ok());
// Name doesn't exist
let r1 = server_txn.sync_external_id_to_uuid("tobias");
assert!(r1 == Ok(None));
// Name doesn't exist (not syntax normalised)
let r2 = server_txn.sync_external_id_to_uuid("tObIAs");
assert!(r2 == Ok(None));
// Name does exist
let r3 = server_txn.sync_external_id_to_uuid("uid=testperson");
assert!(r3 == Ok(Some(t_uuid)));
// Name is not syntax normalised (but exists)
let r4 = server_txn.sync_external_id_to_uuid("uId=TeStPeRsOn");
assert!(r4 == Ok(Some(t_uuid)));
}
#[qs_test]

View file

@ -0,0 +1 @@

View file

@ -0,0 +1 @@

View file

@ -163,9 +163,11 @@ impl fmt::Display for IndexType {
Deserialize,
Serialize,
TryFromPrimitive,
Default,
)]
#[repr(u16)]
pub enum SyntaxType {
#[default]
Utf8String = 0,
Utf8StringInsensitive = 1,
Uuid = 2,
@ -826,6 +828,10 @@ impl PartialEq for Value {
| (Value::SecretValue(_), Value::SecretValue(_)) => false,
// Specifically related to migrations, we allow the invalid comparison.
(Value::Iutf8(_), Value::Iname(_)) | (Value::Iname(_), Value::Iutf8(_)) => false,
// When upgrading between uuid -> name -> spn we have to allow some invalid types.
(Value::Uuid(_), Value::Iname(_))
| (Value::Iname(_), Value::Spn(_, _))
| (Value::Uuid(_), Value::Spn(_, _)) => false,
(l, r) => {
error!(?l, ?r, "mismatched value types");
debug_assert!(false);