mirror of
https://github.com/kanidm/kanidm.git
synced 2025-05-22 00:43:54 +02:00
20221224 cleanup (#1300)
* Cleanup * schema * Remove some Cell * Fix mut * clippy
This commit is contained in:
parent
a82fd0aea2
commit
27cb3d8510
Cargo.lockCargo.toml
examples
kanidm_tools/src/cli
kanidmd
core
lib/src
be
constants
credential
entry.rsevent.rsfilter.rsidm
account.rsapplinks.rsauthsession.rscredupdatesession.rsdelayed.rsgroup.rsldap.rsmod.rsoauth2.rsscim.rsserver.rsserviceaccount.rs
lib.rsmacros.rsmodify.rsplugins
attrunique.rsbase.rsdomain.rsdyngroup.rsgidnumber.rsjwskeygen.rsmemberof.rspassword_import.rsrefint.rsspn.rs
schema.rsserver
access.rsbatch_modify.rscreate.rsdelete.rsidentity.rsmigrations.rsmod.rsmodify.rsrecycle.rssearch.rs
value.rsvalueset
orca/src
12
Cargo.lock
generated
12
Cargo.lock
generated
|
@ -2379,6 +2379,7 @@ dependencies = [
|
|||
"async-trait",
|
||||
"chrono",
|
||||
"compact_jwt",
|
||||
"cron",
|
||||
"futures-util",
|
||||
"http-types",
|
||||
"kanidm_proto",
|
||||
|
@ -2389,7 +2390,6 @@ dependencies = [
|
|||
"profiles",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"saffron",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sketching",
|
||||
|
@ -3762,16 +3762,6 @@ version = "1.0.11"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09"
|
||||
|
||||
[[package]]
|
||||
name = "saffron"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03fb9a628596fc7590eb7edbf7b0613287be78df107f5f97b118aad59fb2eea9"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"nom 5.1.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "same-file"
|
||||
version = "1.0.6"
|
||||
|
|
|
@ -114,7 +114,6 @@ regex = "1.7.0"
|
|||
reqwest = { version = "0.11.13", default-features = false, features=["cookies", "json", "gzip", "native-tls"] }
|
||||
rpassword = "^7.2.0"
|
||||
rusqlite = "^0.28.0"
|
||||
saffron = "^0.1.0"
|
||||
serde = "^1.0.151"
|
||||
serde_cbor = { version = "0.12.0-dev", package = "serde_cbor_2" }
|
||||
serde_json = "^1.0.91"
|
||||
|
|
|
@ -13,3 +13,6 @@ log_level = "verbose"
|
|||
domain = "localhost"
|
||||
origin = "https://localhost:8443"
|
||||
|
||||
[online_backup]
|
||||
path = "/tmp/kanidm/backups/"
|
||||
schedule = "@hourly"
|
||||
|
|
|
@ -65,13 +65,30 @@ origin = "https://idm.example.com:8443"
|
|||
# Defaults to "WriteReplica".
|
||||
# role = "WriteReplica"
|
||||
#
|
||||
# This section if uncommented will enable online - automatic backups of your database.
|
||||
# [online_backup]
|
||||
#
|
||||
# The path to the output folder for online backups
|
||||
# Defaults to "" (no path set)
|
||||
# path = "/var/lib/kanidm/backups/"
|
||||
# The schedule to run online backups - see https://crontab.guru/
|
||||
# every day at 22:00 UTC (default)
|
||||
# schedule = "00 22 * * *"
|
||||
# four times a day at 3 minutes past the hour, every 6th hours
|
||||
# schedule = "03 */6 * * *"
|
||||
#
|
||||
# The schedule to run online backups. All times are interpretted in UTC.
|
||||
# The format of the cron expression is:
|
||||
#
|
||||
# sec min hour day of month month day of week year
|
||||
#
|
||||
# - to run a 6:09 pm every day.
|
||||
# "0 9 6 * * * * "
|
||||
# "0 9 6 * * * *"
|
||||
#
|
||||
# - to run at midnight daily
|
||||
# @daily
|
||||
#
|
||||
# - to run every hour
|
||||
# @hourly
|
||||
#
|
||||
# Defaults to "@daily"
|
||||
# schedule = "@daily"
|
||||
# Number of backups to keep (default 7)
|
||||
# versions = 7
|
||||
|
||||
|
|
|
@ -265,7 +265,7 @@ impl LoginOpt {
|
|||
|
||||
#[allow(clippy::expect_used)]
|
||||
mechs
|
||||
.get(selection as usize)
|
||||
.get(selection)
|
||||
.expect("can not fail - bounds already checked.")
|
||||
}
|
||||
};
|
||||
|
@ -306,7 +306,7 @@ impl LoginOpt {
|
|||
|
||||
#[allow(clippy::expect_used)]
|
||||
allowed
|
||||
.get(selection as usize)
|
||||
.get(selection)
|
||||
.expect("can not fail - bounds already checked.")
|
||||
}
|
||||
};
|
||||
|
|
|
@ -15,6 +15,7 @@ repository.workspace = true
|
|||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
chrono.workspace = true
|
||||
cron.workspace = true
|
||||
compact_jwt.workspace = true
|
||||
futures-util.workspace = true
|
||||
http-types.workspace = true
|
||||
|
@ -25,7 +26,6 @@ libc.workspace = true
|
|||
openssl.workspace = true
|
||||
rand.workspace = true
|
||||
regex.workspace = true
|
||||
saffron.workspace = true
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json.workspace = true
|
||||
sketching.workspace = true
|
||||
|
|
|
@ -25,6 +25,7 @@ use kanidmd_lib::{
|
|||
AuthEvent, AuthResult, CredentialStatusEvent, RadiusAuthTokenEvent, ReadBackupCodeEvent,
|
||||
UnixGroupTokenEvent, UnixUserAuthEvent, UnixUserTokenEvent,
|
||||
},
|
||||
idm::ldap::{LdapBoundToken, LdapResponseState, LdapServer},
|
||||
idm::oauth2::{
|
||||
AccessTokenIntrospectRequest, AccessTokenIntrospectResponse, AccessTokenRequest,
|
||||
AccessTokenResponse, AuthorisationRequest, AuthorisePermitSuccess, AuthoriseResponse,
|
||||
|
@ -32,7 +33,6 @@ use kanidmd_lib::{
|
|||
},
|
||||
idm::server::{IdmServer, IdmServerTransaction},
|
||||
idm::serviceaccount::ListApiTokenEvent,
|
||||
ldap::{LdapBoundToken, LdapResponseState, LdapServer},
|
||||
};
|
||||
|
||||
// ===========================================================
|
||||
|
@ -78,7 +78,7 @@ impl QueryServerReadV1 {
|
|||
) -> Result<SearchResponse, OperationError> {
|
||||
// Begin a read
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -88,7 +88,7 @@ impl QueryServerReadV1 {
|
|||
|
||||
// Make an event from the request
|
||||
let search =
|
||||
SearchEvent::from_message(ident, &req, &idms_prox_read.qs_read).map_err(|e| {
|
||||
SearchEvent::from_message(ident, &req, &mut idms_prox_read.qs_read).map_err(|e| {
|
||||
admin_error!(?e, "Failed to begin search");
|
||||
e
|
||||
})?;
|
||||
|
@ -97,7 +97,7 @@ impl QueryServerReadV1 {
|
|||
|
||||
let entries = idms_prox_read.qs_read.search_ext(&search)?;
|
||||
|
||||
SearchResult::new(&idms_prox_read.qs_read, &entries).map(SearchResult::response)
|
||||
SearchResult::new(&mut idms_prox_read.qs_read, &entries).map(SearchResult::response)
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
|
@ -174,7 +174,7 @@ impl QueryServerReadV1 {
|
|||
|
||||
// Scope to limit the read txn.
|
||||
{
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
idms_prox_read
|
||||
.qs_read
|
||||
.get_be_txn()
|
||||
|
@ -287,7 +287,7 @@ impl QueryServerReadV1 {
|
|||
// TODO #62: Move this to IdmServer!!!
|
||||
// Begin a read
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
// Make an event from the whoami request. This will process the event and
|
||||
// generate a selfuuid search.
|
||||
//
|
||||
|
@ -314,7 +314,7 @@ impl QueryServerReadV1 {
|
|||
|
||||
match entries.pop() {
|
||||
Some(e) if entries.is_empty() => {
|
||||
WhoamiResult::new(&idms_prox_read.qs_read, &e).map(WhoamiResult::response)
|
||||
WhoamiResult::new(&mut idms_prox_read.qs_read, &e).map(WhoamiResult::response)
|
||||
}
|
||||
Some(_) => Err(OperationError::InvalidState), /* Somehow matched multiple entries... */
|
||||
_ => Err(OperationError::NoMatchingEntries),
|
||||
|
@ -333,7 +333,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<UserAuthToken, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
// Make an event from the whoami request. This will process the event and
|
||||
// generate a selfuuid search.
|
||||
//
|
||||
|
@ -362,7 +362,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Vec<ProtoEntry>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -374,7 +374,7 @@ impl QueryServerReadV1 {
|
|||
ident,
|
||||
&filter,
|
||||
attrs.as_deref(),
|
||||
&idms_prox_read.qs_read,
|
||||
&mut idms_prox_read.qs_read,
|
||||
) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
|
@ -386,7 +386,7 @@ impl QueryServerReadV1 {
|
|||
trace!(?srch, "Begin event");
|
||||
|
||||
match idms_prox_read.qs_read.search_ext(&srch) {
|
||||
Ok(entries) => SearchResult::new(&idms_prox_read.qs_read, &entries)
|
||||
Ok(entries) => SearchResult::new(&mut idms_prox_read.qs_read, &entries)
|
||||
.map(|ok_sr| ok_sr.into_proto_array()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
|
@ -405,7 +405,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Vec<ProtoEntry>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
|
@ -430,7 +430,7 @@ impl QueryServerReadV1 {
|
|||
trace!(?srch, "Begin event");
|
||||
|
||||
match idms_prox_read.qs_read.search_ext(&srch) {
|
||||
Ok(entries) => SearchResult::new(&idms_prox_read.qs_read, &entries)
|
||||
Ok(entries) => SearchResult::new(&mut idms_prox_read.qs_read, &entries)
|
||||
.map(|ok_sr| ok_sr.into_proto_array()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
|
@ -448,7 +448,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Option<String>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -648,7 +648,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Vec<String>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -711,7 +711,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Option<String>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -775,7 +775,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Vec<ApiToken>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -807,7 +807,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Vec<UatStatus>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -1141,7 +1141,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Option<String>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -1150,15 +1150,18 @@ impl QueryServerReadV1 {
|
|||
})?;
|
||||
|
||||
// Make an event from the request
|
||||
let srch =
|
||||
match SearchEvent::from_internal_message(ident, &filter, None, &idms_prox_read.qs_read)
|
||||
{
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
admin_error!("Failed to begin oauth2 basic secret read: {:?}", e);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
let srch = match SearchEvent::from_internal_message(
|
||||
ident,
|
||||
&filter,
|
||||
None,
|
||||
&mut idms_prox_read.qs_read,
|
||||
) {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
admin_error!("Failed to begin oauth2 basic secret read: {:?}", e);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
trace!(?srch, "Begin event");
|
||||
|
||||
|
@ -1191,7 +1194,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<AuthoriseResponse, Oauth2Error> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let (ident, uat) = idms_prox_read
|
||||
.validate_and_parse_uat(uat.as_deref(), ct)
|
||||
.and_then(|uat| {
|
||||
|
@ -1220,7 +1223,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<AuthorisePermitSuccess, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let (ident, uat) = idms_prox_read
|
||||
.validate_and_parse_uat(uat.as_deref(), ct)
|
||||
.and_then(|uat| {
|
||||
|
@ -1248,7 +1251,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Url, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let (ident, uat) = idms_prox_read
|
||||
.validate_and_parse_uat(uat.as_deref(), ct)
|
||||
.and_then(|uat| {
|
||||
|
@ -1276,7 +1279,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<AccessTokenResponse, Oauth2Error> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
// Now we can send to the idm server for authorisation checking.
|
||||
idms_prox_read.check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct)
|
||||
}
|
||||
|
@ -1293,7 +1296,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<AccessTokenIntrospectResponse, Oauth2Error> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
// Now we can send to the idm server for introspection checking.
|
||||
idms_prox_read.check_oauth2_token_introspect(&client_authz, &intr_req, ct)
|
||||
}
|
||||
|
@ -1310,7 +1313,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<OidcToken, Oauth2Error> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
idms_prox_read.oauth2_openid_userinfo(&client_id, &client_authz, ct)
|
||||
}
|
||||
|
||||
|
@ -1353,7 +1356,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<Vec<AppLink>, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
|
||||
.map_err(|e| {
|
||||
|
@ -1386,10 +1389,12 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<(), OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
|
||||
// parse_token_to_ident
|
||||
idms_prox_read
|
||||
.validate_and_parse_uat(uat.as_deref(), ct)
|
||||
.and_then(|uat| idms_prox_read.process_uat_to_identity(&uat, ct))
|
||||
.map(|_| ())
|
||||
.map_err(|e| {
|
||||
admin_error!("Invalid token: {:?}", e);
|
||||
|
|
|
@ -189,7 +189,7 @@ impl QueryServerReadV1 {
|
|||
eventid: Uuid,
|
||||
) -> Result<ScimSyncState, OperationError> {
|
||||
let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = self.idms.proxy_read().await;
|
||||
let mut idms_prox_read = self.idms.proxy_read().await;
|
||||
|
||||
let ident = idms_prox_read.validate_and_parse_sync_token_to_ident(bearer.as_deref(), ct)?;
|
||||
|
||||
|
|
|
@ -82,7 +82,7 @@ impl QueryServerWriteV1 {
|
|||
target_uuid,
|
||||
proto_ml,
|
||||
filter,
|
||||
&idms_prox_write.qs_write,
|
||||
&mut idms_prox_write.qs_write,
|
||||
) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
|
@ -171,7 +171,7 @@ impl QueryServerWriteV1 {
|
|||
e
|
||||
})?;
|
||||
|
||||
let crt = match CreateEvent::from_message(ident, &req, &idms_prox_write.qs_write) {
|
||||
let crt = match CreateEvent::from_message(ident, &req, &mut idms_prox_write.qs_write) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
admin_warn!(err = ?e, "Failed to begin create");
|
||||
|
@ -207,7 +207,7 @@ impl QueryServerWriteV1 {
|
|||
e
|
||||
})?;
|
||||
|
||||
let mdf = match ModifyEvent::from_message(ident, &req, &idms_prox_write.qs_write) {
|
||||
let mdf = match ModifyEvent::from_message(ident, &req, &mut idms_prox_write.qs_write) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
admin_error!(err = ?e, "Failed to begin modify");
|
||||
|
@ -242,7 +242,7 @@ impl QueryServerWriteV1 {
|
|||
admin_error!(err = ?e, "Invalid identity");
|
||||
e
|
||||
})?;
|
||||
let del = match DeleteEvent::from_message(ident, &req, &idms_prox_write.qs_write) {
|
||||
let del = match DeleteEvent::from_message(ident, &req, &mut idms_prox_write.qs_write) {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
admin_error!(err = ?e, "Failed to begin delete");
|
||||
|
@ -281,18 +281,23 @@ impl QueryServerWriteV1 {
|
|||
})?;
|
||||
|
||||
// Transform the ProtoEntry to a Modlist
|
||||
let modlist = ModifyList::from_patch(&update, &idms_prox_write.qs_write).map_err(|e| {
|
||||
admin_error!(err = ?e, "Invalid Patch Request");
|
||||
e
|
||||
})?;
|
||||
|
||||
let mdf =
|
||||
ModifyEvent::from_internal_parts(ident, &modlist, &filter, &idms_prox_write.qs_write)
|
||||
.map_err(|e| {
|
||||
admin_error!(err = ?e, "Failed to begin modify");
|
||||
let modlist =
|
||||
ModifyList::from_patch(&update, &mut idms_prox_write.qs_write).map_err(|e| {
|
||||
admin_error!(err = ?e, "Invalid Patch Request");
|
||||
e
|
||||
})?;
|
||||
|
||||
let mdf = ModifyEvent::from_internal_parts(
|
||||
ident,
|
||||
&modlist,
|
||||
&filter,
|
||||
&mut idms_prox_write.qs_write,
|
||||
)
|
||||
.map_err(|e| {
|
||||
admin_error!(err = ?e, "Failed to begin modify");
|
||||
e
|
||||
})?;
|
||||
|
||||
trace!(?mdf, "Begin modify event");
|
||||
|
||||
idms_prox_write
|
||||
|
@ -320,7 +325,7 @@ impl QueryServerWriteV1 {
|
|||
admin_error!(err = ?e, "Invalid identity");
|
||||
e
|
||||
})?;
|
||||
let del = match DeleteEvent::from_parts(ident, &filter, &idms_prox_write.qs_write) {
|
||||
let del = match DeleteEvent::from_parts(ident, &filter, &mut idms_prox_write.qs_write) {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
admin_error!(err = ?e, "Failed to begin delete");
|
||||
|
@ -943,7 +948,7 @@ impl QueryServerWriteV1 {
|
|||
target_uuid,
|
||||
&proto_ml,
|
||||
filter,
|
||||
&idms_prox_write.qs_write,
|
||||
&mut idms_prox_write.qs_write,
|
||||
) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
|
@ -1419,7 +1424,7 @@ impl QueryServerWriteV1 {
|
|||
)]
|
||||
pub async fn handle_purgetombstoneevent(&self, msg: PurgeTombstoneEvent) {
|
||||
trace!(?msg, "Begin purge tombstone event");
|
||||
let idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await;
|
||||
let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await;
|
||||
|
||||
let res = idms_prox_write
|
||||
.qs_write
|
||||
|
@ -1437,7 +1442,7 @@ impl QueryServerWriteV1 {
|
|||
)]
|
||||
pub async fn handle_purgerecycledevent(&self, msg: PurgeRecycledEvent) {
|
||||
trace!(?msg, "Begin purge recycled event");
|
||||
let idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await;
|
||||
let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await;
|
||||
let res = idms_prox_write
|
||||
.qs_write
|
||||
.purge_recycled()
|
||||
|
|
|
@ -27,7 +27,7 @@ pub struct OnlineBackup {
|
|||
}
|
||||
|
||||
fn default_online_backup_schedule() -> String {
|
||||
"00 22 * * *".to_string()
|
||||
"@daily".to_string()
|
||||
}
|
||||
|
||||
fn default_online_backup_versions() -> usize {
|
||||
|
|
|
@ -3,11 +3,10 @@
|
|||
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
use saffron::parse::{CronExpr, English};
|
||||
use saffron::Cron;
|
||||
use cron::Schedule;
|
||||
|
||||
use tokio::sync::broadcast;
|
||||
use tokio::time::{interval, sleep, Duration};
|
||||
|
@ -61,23 +60,22 @@ impl IntervalActor {
|
|||
mut rx: broadcast::Receiver<CoreAction>,
|
||||
) -> Result<tokio::task::JoinHandle<()>, ()> {
|
||||
let outpath = cfg.path.to_owned();
|
||||
let schedule = cfg.schedule.to_owned();
|
||||
let versions = cfg.versions;
|
||||
|
||||
// Cron expression handling
|
||||
let cron_expr = schedule.as_str().parse::<CronExpr>().map_err(|e| {
|
||||
let cron_expr = Schedule::from_str(cfg.schedule.as_str()).map_err(|e| {
|
||||
error!("Online backup schedule parse error: {}", e);
|
||||
error!("valid formats are:");
|
||||
error!("sec min hour day of month month day of week year");
|
||||
error!("@hourly | @daily | @weekly");
|
||||
})?;
|
||||
|
||||
info!(
|
||||
"Online backup schedule parsed as: {}",
|
||||
cron_expr.describe(English::default())
|
||||
);
|
||||
info!("Online backup schedule parsed as: {}", cron_expr);
|
||||
|
||||
if !Cron::new(cron_expr.clone()).any() {
|
||||
if cron_expr.upcoming(Utc).next().is_none() {
|
||||
error!(
|
||||
"Online backup schedule error: '{}' will not match any date.",
|
||||
schedule
|
||||
cron_expr
|
||||
);
|
||||
return Err(());
|
||||
}
|
||||
|
@ -106,11 +104,7 @@ impl IntervalActor {
|
|||
}
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let ct = Utc::now();
|
||||
let cron = Cron::new(cron_expr.clone());
|
||||
|
||||
let cron_iter = cron.clone().iter_after(ct);
|
||||
for next_time in cron_iter {
|
||||
for next_time in cron_expr.upcoming(Utc) {
|
||||
// We add 1 second to the `wait_time` in order to get "even" timestampes
|
||||
// for example: 1 + 17:05:59Z --> 17:06:00Z
|
||||
let wait_seconds = 1 + (next_time - Utc::now()).num_seconds() as u64;
|
||||
|
|
|
@ -5,7 +5,7 @@ use std::str::FromStr;
|
|||
use crate::actors::v1_read::QueryServerReadV1;
|
||||
use futures_util::sink::SinkExt;
|
||||
use futures_util::stream::StreamExt;
|
||||
use kanidmd_lib::ldap::{LdapBoundToken, LdapResponseState};
|
||||
use kanidmd_lib::idm::ldap::{LdapBoundToken, LdapResponseState};
|
||||
use kanidmd_lib::prelude::*;
|
||||
use ldap3_proto::proto::LdapMsg;
|
||||
use ldap3_proto::LdapCodec;
|
||||
|
|
|
@ -38,8 +38,8 @@ use compact_jwt::JwsSigner;
|
|||
use kanidm_proto::messages::{AccountChangeMessage, MessageStatus};
|
||||
use kanidm_proto::v1::OperationError;
|
||||
use kanidmd_lib::be::{Backend, BackendConfig, BackendTransaction, FsType};
|
||||
use kanidmd_lib::idm::ldap::LdapServer;
|
||||
use kanidmd_lib::idm::server::{IdmServer, IdmServerDelayed};
|
||||
use kanidmd_lib::ldap::LdapServer;
|
||||
use kanidmd_lib::prelude::*;
|
||||
use kanidmd_lib::schema::Schema;
|
||||
use kanidmd_lib::status::StatusActor;
|
||||
|
@ -171,7 +171,7 @@ macro_rules! dbscan_setup_be {
|
|||
|
||||
pub fn dbscan_list_indexes_core(config: &Configuration) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let be_rotxn = be.read();
|
||||
let mut be_rotxn = be.read();
|
||||
|
||||
match be_rotxn.list_indexes() {
|
||||
Ok(mut idx_list) => {
|
||||
|
@ -188,7 +188,7 @@ pub fn dbscan_list_indexes_core(config: &Configuration) {
|
|||
|
||||
pub fn dbscan_list_id2entry_core(config: &Configuration) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let be_rotxn = be.read();
|
||||
let mut be_rotxn = be.read();
|
||||
|
||||
match be_rotxn.list_id2entry() {
|
||||
Ok(mut id_list) => {
|
||||
|
@ -210,7 +210,7 @@ pub fn dbscan_list_index_analysis_core(config: &Configuration) {
|
|||
|
||||
pub fn dbscan_list_index_core(config: &Configuration, index_name: &str) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let be_rotxn = be.read();
|
||||
let mut be_rotxn = be.read();
|
||||
|
||||
match be_rotxn.list_index_content(index_name) {
|
||||
Ok(mut idx_list) => {
|
||||
|
@ -227,7 +227,7 @@ pub fn dbscan_list_index_core(config: &Configuration, index_name: &str) {
|
|||
|
||||
pub fn dbscan_get_id2entry_core(config: &Configuration, id: u64) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let be_rotxn = be.read();
|
||||
let mut be_rotxn = be.read();
|
||||
|
||||
match be_rotxn.get_id2entry(id) {
|
||||
Ok((id, value)) => println!("{:>8}: {}", id, value),
|
||||
|
@ -254,7 +254,7 @@ pub fn backup_server_core(config: &Configuration, dst_path: &str) {
|
|||
}
|
||||
};
|
||||
|
||||
let be_ro_txn = be.read();
|
||||
let mut be_ro_txn = be.read();
|
||||
let r = be_ro_txn.backup(dst_path);
|
||||
match r {
|
||||
Ok(_) => info!("Backup success!"),
|
||||
|
@ -286,7 +286,7 @@ pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
|
|||
}
|
||||
};
|
||||
|
||||
let be_wr_txn = be.write();
|
||||
let mut be_wr_txn = be.write();
|
||||
let r = be_wr_txn.restore(dst_path).and_then(|_| be_wr_txn.commit());
|
||||
|
||||
if r.is_err() {
|
||||
|
@ -308,7 +308,7 @@ pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
|
|||
|
||||
info!("Start reindex phase ...");
|
||||
|
||||
let qs_write = qs.write(duration_from_epoch_now()).await;
|
||||
let mut qs_write = qs.write(duration_from_epoch_now()).await;
|
||||
let r = qs_write.reindex().and_then(|_| qs_write.commit());
|
||||
|
||||
match r {
|
||||
|
@ -342,7 +342,7 @@ pub async fn reindex_server_core(config: &Configuration) {
|
|||
};
|
||||
|
||||
// Reindex only the core schema attributes to bootstrap the process.
|
||||
let be_wr_txn = be.write();
|
||||
let mut be_wr_txn = be.write();
|
||||
let r = be_wr_txn.reindex().and_then(|_| be_wr_txn.commit());
|
||||
|
||||
// Now that's done, setup a minimal qs and reindex from that.
|
||||
|
@ -365,7 +365,7 @@ pub async fn reindex_server_core(config: &Configuration) {
|
|||
|
||||
eprintln!("Start Index Phase 2 ...");
|
||||
|
||||
let qs_write = qs.write(duration_from_epoch_now()).await;
|
||||
let mut qs_write = qs.write(duration_from_epoch_now()).await;
|
||||
let r = qs_write.reindex().and_then(|_| qs_write.commit());
|
||||
|
||||
match r {
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
//! is to persist content safely to disk, load that content, and execute queries
|
||||
//! utilising indexes in the most effective way possible.
|
||||
|
||||
use std::cell::UnsafeCell;
|
||||
use std::fs;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::Arc;
|
||||
|
@ -22,7 +21,6 @@ use uuid::Uuid;
|
|||
use crate::be::dbentry::{DbBackup, DbEntry};
|
||||
use crate::entry::{Entry, EntryCommitted, EntryNew, EntrySealed};
|
||||
use crate::filter::{Filter, FilterPlan, FilterResolved, FilterValidResolved};
|
||||
use crate::identity::Limits;
|
||||
use crate::prelude::*;
|
||||
use crate::repl::cid::Cid;
|
||||
use crate::repl::ruv::{
|
||||
|
@ -49,6 +47,38 @@ pub use crate::be::idl_sqlite::FsType;
|
|||
const FILTER_SEARCH_TEST_THRESHOLD: usize = 0;
|
||||
const FILTER_EXISTS_TEST_THRESHOLD: usize = 0;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Limits on the resources a single event can consume. These are defined per-event
|
||||
/// as they are derived from the userAuthToken based on that individual session
|
||||
pub struct Limits {
|
||||
pub unindexed_allow: bool,
|
||||
pub search_max_results: usize,
|
||||
pub search_max_filter_test: usize,
|
||||
pub filter_max_elements: usize,
|
||||
}
|
||||
|
||||
impl Default for Limits {
|
||||
fn default() -> Self {
|
||||
Limits {
|
||||
unindexed_allow: false,
|
||||
search_max_results: 128,
|
||||
search_max_filter_test: 256,
|
||||
filter_max_elements: 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Limits {
|
||||
pub fn unlimited() -> Self {
|
||||
Limits {
|
||||
unindexed_allow: true,
|
||||
search_max_results: usize::MAX,
|
||||
search_max_filter_test: usize::MAX,
|
||||
filter_max_elements: usize::MAX,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum IdList {
|
||||
AllIds,
|
||||
|
@ -120,9 +150,9 @@ pub struct Backend {
|
|||
}
|
||||
|
||||
pub struct BackendReadTransaction<'a> {
|
||||
idlayer: UnsafeCell<IdlArcSqliteReadTransaction<'a>>,
|
||||
idlayer: IdlArcSqliteReadTransaction<'a>,
|
||||
idxmeta: CowCellReadTxn<IdxMeta>,
|
||||
ruv: UnsafeCell<ReplicationUpdateVectorReadTransaction<'a>>,
|
||||
ruv: ReplicationUpdateVectorReadTransaction<'a>,
|
||||
}
|
||||
|
||||
unsafe impl<'a> Sync for BackendReadTransaction<'a> {}
|
||||
|
@ -130,9 +160,9 @@ unsafe impl<'a> Sync for BackendReadTransaction<'a> {}
|
|||
unsafe impl<'a> Send for BackendReadTransaction<'a> {}
|
||||
|
||||
pub struct BackendWriteTransaction<'a> {
|
||||
idlayer: UnsafeCell<IdlArcSqliteWriteTransaction<'a>>,
|
||||
idlayer: IdlArcSqliteWriteTransaction<'a>,
|
||||
idxmeta: CowCellReadTxn<IdxMeta>,
|
||||
ruv: UnsafeCell<ReplicationUpdateVectorWriteTransaction<'a>>,
|
||||
ruv: ReplicationUpdateVectorWriteTransaction<'a>,
|
||||
idxmeta_wr: CowCellWriteTxn<'a, IdxMeta>,
|
||||
}
|
||||
|
||||
|
@ -160,12 +190,10 @@ impl IdRawEntry {
|
|||
|
||||
pub trait BackendTransaction {
|
||||
type IdlLayerType: IdlArcSqliteTransaction;
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_idlayer(&self) -> &mut Self::IdlLayerType;
|
||||
fn get_idlayer(&mut self) -> &mut Self::IdlLayerType;
|
||||
|
||||
type RuvType: ReplicationUpdateVectorTransaction;
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_ruv(&self) -> &mut Self::RuvType;
|
||||
fn get_ruv(&mut self) -> &mut Self::RuvType;
|
||||
|
||||
fn get_idxmeta_ref(&self) -> &IdxMeta;
|
||||
|
||||
|
@ -174,7 +202,7 @@ pub trait BackendTransaction {
|
|||
#[allow(clippy::cognitive_complexity)]
|
||||
#[instrument(level = "debug", name = "be::filter2idl", skip_all)]
|
||||
fn filter2idl(
|
||||
&self,
|
||||
&mut self,
|
||||
filt: &FilterResolved,
|
||||
thres: usize,
|
||||
) -> Result<(IdList, FilterPlan), OperationError> {
|
||||
|
@ -534,7 +562,7 @@ pub trait BackendTransaction {
|
|||
|
||||
#[instrument(level = "debug", name = "be::search", skip_all)]
|
||||
fn search(
|
||||
&self,
|
||||
&mut self,
|
||||
erl: &Limits,
|
||||
filt: &Filter<FilterValidResolved>,
|
||||
) -> Result<Vec<Arc<EntrySealedCommitted>>, OperationError> {
|
||||
|
@ -629,7 +657,7 @@ pub trait BackendTransaction {
|
|||
/// refint and attr uniqueness.
|
||||
#[instrument(level = "debug", name = "be::exists", skip_all)]
|
||||
fn exists(
|
||||
&self,
|
||||
&mut self,
|
||||
erl: &Limits,
|
||||
filt: &Filter<FilterValidResolved>,
|
||||
) -> Result<bool, OperationError> {
|
||||
|
@ -688,12 +716,12 @@ pub trait BackendTransaction {
|
|||
} // end match idl
|
||||
}
|
||||
|
||||
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
|
||||
fn verify(&mut self) -> Vec<Result<(), ConsistencyError>> {
|
||||
self.get_idlayer().verify()
|
||||
}
|
||||
|
||||
fn verify_entry_index(
|
||||
&self,
|
||||
&mut self,
|
||||
e: &Entry<EntrySealed, EntryCommitted>,
|
||||
) -> Result<(), ConsistencyError> {
|
||||
// First, check our references in name2uuid, uuid2spn and uuid2rdn
|
||||
|
@ -766,7 +794,7 @@ pub trait BackendTransaction {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn verify_indexes(&self) -> Vec<Result<(), ConsistencyError>> {
|
||||
fn verify_indexes(&mut self) -> Vec<Result<(), ConsistencyError>> {
|
||||
let idl = IdList::AllIds;
|
||||
let entries = match self.get_idlayer().get_identry(&idl) {
|
||||
Ok(s) => s,
|
||||
|
@ -785,7 +813,7 @@ pub trait BackendTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
fn verify_ruv(&self, results: &mut Vec<Result<(), ConsistencyError>>) {
|
||||
fn verify_ruv(&mut self, results: &mut Vec<Result<(), ConsistencyError>>) {
|
||||
// The way we verify this is building a whole second RUV and then comparing it.
|
||||
let idl = IdList::AllIds;
|
||||
let entries = match self.get_idlayer().get_identry(&idl) {
|
||||
|
@ -800,7 +828,7 @@ pub trait BackendTransaction {
|
|||
self.get_ruv().verify(&entries, results);
|
||||
}
|
||||
|
||||
fn backup(&self, dst_path: &str) -> Result<(), OperationError> {
|
||||
fn backup(&mut self, dst_path: &str) -> Result<(), OperationError> {
|
||||
// load all entries into RAM, may need to change this later
|
||||
// if the size of the database compared to RAM is an issue
|
||||
let idl = IdList::AllIds;
|
||||
|
@ -847,19 +875,19 @@ pub trait BackendTransaction {
|
|||
})
|
||||
}
|
||||
|
||||
fn name2uuid(&self, name: &str) -> Result<Option<Uuid>, OperationError> {
|
||||
fn name2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError> {
|
||||
self.get_idlayer().name2uuid(name)
|
||||
}
|
||||
|
||||
fn externalid2uuid(&self, name: &str) -> Result<Option<Uuid>, OperationError> {
|
||||
fn externalid2uuid(&mut self, name: &str) -> Result<Option<Uuid>, OperationError> {
|
||||
self.get_idlayer().externalid2uuid(name)
|
||||
}
|
||||
|
||||
fn uuid2spn(&self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
|
||||
fn uuid2spn(&mut self, uuid: Uuid) -> Result<Option<Value>, OperationError> {
|
||||
self.get_idlayer().uuid2spn(uuid)
|
||||
}
|
||||
|
||||
fn uuid2rdn(&self, uuid: Uuid) -> Result<Option<String>, OperationError> {
|
||||
fn uuid2rdn(&mut self, uuid: Uuid) -> Result<Option<String>, OperationError> {
|
||||
self.get_idlayer().uuid2rdn(uuid)
|
||||
}
|
||||
}
|
||||
|
@ -868,25 +896,12 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> {
|
|||
type IdlLayerType = IdlArcSqliteReadTransaction<'a>;
|
||||
type RuvType = ReplicationUpdateVectorReadTransaction<'a>;
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_idlayer(&self) -> &mut IdlArcSqliteReadTransaction<'a> {
|
||||
// OKAY here be the cursed thing. We know that in our application
|
||||
// that during a transaction, that we are the only holder of the
|
||||
// idlayer, so we KNOW it can be mut, and we know every thing it
|
||||
// returns is a copy anyway. But if we permeate that mut up, it prevents
|
||||
// reference holding of read-only structures in loops, which was forcing
|
||||
// a lot of clones.
|
||||
//
|
||||
// Instead we make everything immutable, and use interior mutability
|
||||
// to the idlayer here since we know and can assert it is correct
|
||||
// that during this inner mutable phase, that nothing will be
|
||||
// conflicting during this cache operation.
|
||||
unsafe { &mut (*self.idlayer.get()) }
|
||||
fn get_idlayer(&mut self) -> &mut IdlArcSqliteReadTransaction<'a> {
|
||||
&mut self.idlayer
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_ruv(&self) -> &mut ReplicationUpdateVectorReadTransaction<'a> {
|
||||
unsafe { &mut (*self.ruv.get()) }
|
||||
fn get_ruv(&mut self) -> &mut ReplicationUpdateVectorReadTransaction<'a> {
|
||||
&mut self.ruv
|
||||
}
|
||||
|
||||
fn get_idxmeta_ref(&self) -> &IdxMeta {
|
||||
|
@ -895,22 +910,22 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> {
|
|||
}
|
||||
|
||||
impl<'a> BackendReadTransaction<'a> {
|
||||
pub fn list_indexes(&self) -> Result<Vec<String>, OperationError> {
|
||||
pub fn list_indexes(&mut self) -> Result<Vec<String>, OperationError> {
|
||||
self.get_idlayer().list_idxs()
|
||||
}
|
||||
|
||||
pub fn list_id2entry(&self) -> Result<Vec<(u64, String)>, OperationError> {
|
||||
pub fn list_id2entry(&mut self) -> Result<Vec<(u64, String)>, OperationError> {
|
||||
self.get_idlayer().list_id2entry()
|
||||
}
|
||||
|
||||
pub fn list_index_content(
|
||||
&self,
|
||||
&mut self,
|
||||
index_name: &str,
|
||||
) -> Result<Vec<(String, IDLBitRange)>, OperationError> {
|
||||
self.get_idlayer().list_index_content(index_name)
|
||||
}
|
||||
|
||||
pub fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError> {
|
||||
pub fn get_id2entry(&mut self, id: u64) -> Result<(u64, String), OperationError> {
|
||||
self.get_idlayer().get_id2entry(id)
|
||||
}
|
||||
}
|
||||
|
@ -919,14 +934,12 @@ impl<'a> BackendTransaction for BackendWriteTransaction<'a> {
|
|||
type IdlLayerType = IdlArcSqliteWriteTransaction<'a>;
|
||||
type RuvType = ReplicationUpdateVectorWriteTransaction<'a>;
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_idlayer(&self) -> &mut IdlArcSqliteWriteTransaction<'a> {
|
||||
unsafe { &mut (*self.idlayer.get()) }
|
||||
fn get_idlayer(&mut self) -> &mut IdlArcSqliteWriteTransaction<'a> {
|
||||
&mut self.idlayer
|
||||
}
|
||||
|
||||
#[allow(clippy::mut_from_ref)]
|
||||
fn get_ruv(&self) -> &mut ReplicationUpdateVectorWriteTransaction<'a> {
|
||||
unsafe { &mut (*self.ruv.get()) }
|
||||
fn get_ruv(&mut self) -> &mut ReplicationUpdateVectorWriteTransaction<'a> {
|
||||
&mut self.ruv
|
||||
}
|
||||
|
||||
fn get_idxmeta_ref(&self) -> &IdxMeta {
|
||||
|
@ -937,7 +950,7 @@ impl<'a> BackendTransaction for BackendWriteTransaction<'a> {
|
|||
impl<'a> BackendWriteTransaction<'a> {
|
||||
#[instrument(level = "debug", name = "be::create", skip_all)]
|
||||
pub fn create(
|
||||
&self,
|
||||
&mut self,
|
||||
cid: &Cid,
|
||||
entries: Vec<Entry<EntrySealed, EntryNew>>,
|
||||
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
|
||||
|
@ -959,10 +972,9 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
})?;
|
||||
|
||||
let idlayer = self.get_idlayer();
|
||||
// Now, assign id's to all the new entries.
|
||||
|
||||
let mut id_max = idlayer.get_id2entry_max_id()?;
|
||||
let mut id_max = self.idlayer.get_id2entry_max_id()?;
|
||||
let c_entries: Vec<_> = entries
|
||||
.into_iter()
|
||||
.map(|e| {
|
||||
|
@ -977,9 +989,9 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
|
||||
self.get_ruv().insert_change(cid, ruv_idl)?;
|
||||
|
||||
idlayer.write_identries(c_entries.iter())?;
|
||||
self.idlayer.write_identries(c_entries.iter())?;
|
||||
|
||||
idlayer.set_id2entry_max_id(id_max);
|
||||
self.idlayer.set_id2entry_max_id(id_max);
|
||||
|
||||
// Now update the indexes as required.
|
||||
for e in c_entries.iter() {
|
||||
|
@ -991,7 +1003,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
|
||||
#[instrument(level = "debug", name = "be::modify", skip_all)]
|
||||
pub fn modify(
|
||||
&self,
|
||||
&mut self,
|
||||
cid: &Cid,
|
||||
pre_entries: &[Arc<EntrySealedCommitted>],
|
||||
post_entries: &[EntrySealedCommitted],
|
||||
|
@ -1031,7 +1043,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
#[instrument(level = "debug", name = "be::reap_tombstones", skip_all)]
|
||||
pub fn reap_tombstones(&self, cid: &Cid) -> Result<usize, OperationError> {
|
||||
pub fn reap_tombstones(&mut self, cid: &Cid) -> Result<usize, OperationError> {
|
||||
// We plan to clear the RUV up to this cid. So we need to build an IDL
|
||||
// of all the entries we need to examine.
|
||||
let idl = self.get_ruv().trim_up_to(cid).map_err(|e| {
|
||||
|
@ -1142,7 +1154,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
// TODO: Can this be improved?
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn entry_index(
|
||||
&self,
|
||||
&mut self,
|
||||
pre: Option<&Entry<EntrySealed, EntryCommitted>>,
|
||||
post: Option<&Entry<EntrySealed, EntryCommitted>>,
|
||||
) -> Result<(), OperationError> {
|
||||
|
@ -1175,8 +1187,6 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
// and can trigger correct actions.
|
||||
//
|
||||
|
||||
let idlayer = self.get_idlayer();
|
||||
|
||||
let mask_pre = pre.and_then(|e| e.mask_recycled_ts());
|
||||
let mask_pre = if !uuid_same {
|
||||
// Okay, so if the uuids are different this is probably from
|
||||
|
@ -1204,23 +1214,23 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
|
||||
// Write the changes out to the backend
|
||||
if let Some(rem) = n2u_rem {
|
||||
idlayer.write_name2uuid_rem(rem)?
|
||||
self.idlayer.write_name2uuid_rem(rem)?
|
||||
}
|
||||
|
||||
if let Some(rem) = eid2u_rem {
|
||||
idlayer.write_externalid2uuid_rem(rem)?
|
||||
self.idlayer.write_externalid2uuid_rem(rem)?
|
||||
}
|
||||
|
||||
match u2s_act {
|
||||
None => {}
|
||||
Some(Ok(k)) => idlayer.write_uuid2spn(uuid, Some(k))?,
|
||||
Some(Err(_)) => idlayer.write_uuid2spn(uuid, None)?,
|
||||
Some(Ok(k)) => self.idlayer.write_uuid2spn(uuid, Some(k))?,
|
||||
Some(Err(_)) => self.idlayer.write_uuid2spn(uuid, None)?,
|
||||
}
|
||||
|
||||
match u2r_act {
|
||||
None => {}
|
||||
Some(Ok(k)) => idlayer.write_uuid2rdn(uuid, Some(k))?,
|
||||
Some(Err(_)) => idlayer.write_uuid2rdn(uuid, None)?,
|
||||
Some(Ok(k)) => self.idlayer.write_uuid2rdn(uuid, Some(k))?,
|
||||
Some(Err(_)) => self.idlayer.write_uuid2rdn(uuid, None)?,
|
||||
}
|
||||
// Return none, mask_pre is now completed.
|
||||
None
|
||||
|
@ -1247,29 +1257,29 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
|
||||
// Write the changes out to the backend
|
||||
if let Some(add) = n2u_add {
|
||||
idlayer.write_name2uuid_add(e_uuid, add)?
|
||||
self.idlayer.write_name2uuid_add(e_uuid, add)?
|
||||
}
|
||||
if let Some(rem) = n2u_rem {
|
||||
idlayer.write_name2uuid_rem(rem)?
|
||||
self.idlayer.write_name2uuid_rem(rem)?
|
||||
}
|
||||
|
||||
if let Some(add) = eid2u_add {
|
||||
idlayer.write_externalid2uuid_add(e_uuid, add)?
|
||||
self.idlayer.write_externalid2uuid_add(e_uuid, add)?
|
||||
}
|
||||
if let Some(rem) = eid2u_rem {
|
||||
idlayer.write_externalid2uuid_rem(rem)?
|
||||
self.idlayer.write_externalid2uuid_rem(rem)?
|
||||
}
|
||||
|
||||
match u2s_act {
|
||||
None => {}
|
||||
Some(Ok(k)) => idlayer.write_uuid2spn(e_uuid, Some(k))?,
|
||||
Some(Err(_)) => idlayer.write_uuid2spn(e_uuid, None)?,
|
||||
Some(Ok(k)) => self.idlayer.write_uuid2spn(e_uuid, Some(k))?,
|
||||
Some(Err(_)) => self.idlayer.write_uuid2spn(e_uuid, None)?,
|
||||
}
|
||||
|
||||
match u2r_act {
|
||||
None => {}
|
||||
Some(Ok(k)) => idlayer.write_uuid2rdn(e_uuid, Some(k))?,
|
||||
Some(Err(_)) => idlayer.write_uuid2rdn(e_uuid, None)?,
|
||||
Some(Ok(k)) => self.idlayer.write_uuid2rdn(e_uuid, Some(k))?,
|
||||
Some(Err(_)) => self.idlayer.write_uuid2rdn(e_uuid, None)?,
|
||||
}
|
||||
|
||||
// Extremely Cursed - Okay, we know that self.idxmeta will NOT be changed
|
||||
|
@ -1287,10 +1297,10 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
match act {
|
||||
Ok((attr, itype, idx_key)) => {
|
||||
trace!("Adding {:?} idx -> {:?}: {:?}", itype, attr, idx_key);
|
||||
match idlayer.get_idl(attr, itype, &idx_key)? {
|
||||
match self.idlayer.get_idl(attr, itype, &idx_key)? {
|
||||
Some(mut idl) => {
|
||||
idl.insert_id(e_id);
|
||||
idlayer.write_idl(attr, itype, &idx_key, &idl)
|
||||
self.idlayer.write_idl(attr, itype, &idx_key, &idl)
|
||||
}
|
||||
None => {
|
||||
warn!(
|
||||
|
@ -1303,10 +1313,10 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
Err((attr, itype, idx_key)) => {
|
||||
trace!("Removing {:?} idx -> {:?}: {:?}", itype, attr, idx_key);
|
||||
match idlayer.get_idl(attr, itype, &idx_key)? {
|
||||
match self.idlayer.get_idl(attr, itype, &idx_key)? {
|
||||
Some(mut idl) => {
|
||||
idl.remove_id(e_id);
|
||||
idlayer.write_idl(attr, itype, &idx_key, &idl)
|
||||
self.idlayer.write_idl(attr, itype, &idx_key, &idl)
|
||||
}
|
||||
None => {
|
||||
warn!(
|
||||
|
@ -1323,7 +1333,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn missing_idxs(&self) -> Result<Vec<(AttrString, IndexType)>, OperationError> {
|
||||
fn missing_idxs(&mut self) -> Result<Vec<(AttrString, IndexType)>, OperationError> {
|
||||
let idx_table_list = self.get_idlayer().list_idxs()?;
|
||||
|
||||
// Turn the vec to a real set
|
||||
|
@ -1348,28 +1358,27 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
Ok(missing)
|
||||
}
|
||||
|
||||
fn create_idxs(&self) -> Result<(), OperationError> {
|
||||
let idlayer = self.get_idlayer();
|
||||
fn create_idxs(&mut self) -> Result<(), OperationError> {
|
||||
// Create name2uuid and uuid2name
|
||||
trace!("Creating index -> name2uuid");
|
||||
idlayer.create_name2uuid()?;
|
||||
self.idlayer.create_name2uuid()?;
|
||||
|
||||
trace!("Creating index -> externalid2uuid");
|
||||
idlayer.create_externalid2uuid()?;
|
||||
self.idlayer.create_externalid2uuid()?;
|
||||
|
||||
trace!("Creating index -> uuid2spn");
|
||||
idlayer.create_uuid2spn()?;
|
||||
self.idlayer.create_uuid2spn()?;
|
||||
|
||||
trace!("Creating index -> uuid2rdn");
|
||||
idlayer.create_uuid2rdn()?;
|
||||
self.idlayer.create_uuid2rdn()?;
|
||||
|
||||
self.idxmeta
|
||||
.idxkeys
|
||||
.keys()
|
||||
.try_for_each(|ikey| idlayer.create_idx(&ikey.attr, ikey.itype))
|
||||
.try_for_each(|ikey| self.idlayer.create_idx(&ikey.attr, ikey.itype))
|
||||
}
|
||||
|
||||
pub fn upgrade_reindex(&self, v: i64) -> Result<(), OperationError> {
|
||||
pub fn upgrade_reindex(&mut self, v: i64) -> Result<(), OperationError> {
|
||||
let dbv = self.get_db_index_version();
|
||||
admin_debug!(?dbv, ?v, "upgrade_reindex");
|
||||
if dbv < v {
|
||||
|
@ -1384,10 +1393,9 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn reindex(&self) -> Result<(), OperationError> {
|
||||
let idlayer = self.get_idlayer();
|
||||
pub fn reindex(&mut self) -> Result<(), OperationError> {
|
||||
// Purge the idxs
|
||||
unsafe { idlayer.purge_idxs()? };
|
||||
unsafe { self.idlayer.purge_idxs()? };
|
||||
|
||||
// Using the index metadata on the txn, create all our idx tables
|
||||
self.create_idxs()?;
|
||||
|
@ -1396,7 +1404,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
// Future idea: Do this in batches of X amount to limit memory
|
||||
// consumption.
|
||||
let idl = IdList::AllIds;
|
||||
let entries = idlayer.get_identry(&idl).map_err(|e| {
|
||||
let entries = self.idlayer.get_identry(&idl).map_err(|e| {
|
||||
admin_error!(err = ?e, "get_identry failure");
|
||||
e
|
||||
})?;
|
||||
|
@ -1420,10 +1428,10 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
})?;
|
||||
limmediate_warning!(" reindexed {} entries ✅\n", count);
|
||||
limmediate_warning!("Optimising Indexes ... ");
|
||||
idlayer.optimise_dirty_idls();
|
||||
self.idlayer.optimise_dirty_idls();
|
||||
limmediate_warning!("done ✅\n");
|
||||
limmediate_warning!("Calculating Index Optimisation Slopes ... ");
|
||||
idlayer.analyse_idx_slopes().map_err(|e| {
|
||||
self.idlayer.analyse_idx_slopes().map_err(|e| {
|
||||
admin_error!(err = ?e, "index optimisation failed");
|
||||
e
|
||||
})?;
|
||||
|
@ -1432,13 +1440,13 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn purge_idxs(&self) -> Result<(), OperationError> {
|
||||
pub fn purge_idxs(&mut self) -> Result<(), OperationError> {
|
||||
unsafe { self.get_idlayer().purge_idxs() }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn load_test_idl(
|
||||
&self,
|
||||
&mut self,
|
||||
attr: &String,
|
||||
itype: IndexType,
|
||||
idx_key: &String,
|
||||
|
@ -1446,11 +1454,11 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
self.get_idlayer().get_idl(attr, itype, idx_key)
|
||||
}
|
||||
|
||||
fn is_idx_slopeyness_generated(&self) -> Result<bool, OperationError> {
|
||||
fn is_idx_slopeyness_generated(&mut self) -> Result<bool, OperationError> {
|
||||
self.get_idlayer().is_idx_slopeyness_generated()
|
||||
}
|
||||
|
||||
fn get_idx_slope(&self, ikey: &IdxKey) -> Result<IdxSlope, OperationError> {
|
||||
fn get_idx_slope(&mut self, ikey: &IdxKey) -> Result<IdxSlope, OperationError> {
|
||||
// Do we have the slopeyness?
|
||||
let slope = self
|
||||
.get_idlayer()
|
||||
|
@ -1460,7 +1468,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
Ok(slope)
|
||||
}
|
||||
|
||||
pub fn restore(&self, src_path: &str) -> Result<(), OperationError> {
|
||||
pub fn restore(&mut self, src_path: &str) -> Result<(), OperationError> {
|
||||
let idlayer = self.get_idlayer();
|
||||
// load all entries into RAM, may need to change this later
|
||||
// if the size of the database compared to RAM is an issue
|
||||
|
@ -1560,24 +1568,20 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
idxmeta_wr,
|
||||
} = self;
|
||||
|
||||
// Unwrap the Cell we have finished with it.
|
||||
let idlayer = idlayer.into_inner();
|
||||
let ruv = ruv.into_inner();
|
||||
|
||||
idlayer.commit().map(|()| {
|
||||
ruv.commit();
|
||||
idxmeta_wr.commit();
|
||||
})
|
||||
}
|
||||
|
||||
fn reset_db_s_uuid(&self) -> Result<Uuid, OperationError> {
|
||||
fn reset_db_s_uuid(&mut self) -> Result<Uuid, OperationError> {
|
||||
// The value is missing. Generate a new one and store it.
|
||||
let nsid = Uuid::new_v4();
|
||||
self.get_idlayer().write_db_s_uuid(nsid)?;
|
||||
Ok(nsid)
|
||||
}
|
||||
|
||||
pub fn get_db_s_uuid(&self) -> Uuid {
|
||||
pub fn get_db_s_uuid(&mut self) -> Uuid {
|
||||
#[allow(clippy::expect_used)]
|
||||
match self
|
||||
.get_idlayer()
|
||||
|
@ -1591,14 +1595,14 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
|
||||
/// This generates a new domain UUID and stores it into the database,
|
||||
/// returning the new UUID
|
||||
fn reset_db_d_uuid(&self) -> Result<Uuid, OperationError> {
|
||||
fn reset_db_d_uuid(&mut self) -> Result<Uuid, OperationError> {
|
||||
let nsid = Uuid::new_v4();
|
||||
self.get_idlayer().write_db_d_uuid(nsid)?;
|
||||
Ok(nsid)
|
||||
}
|
||||
|
||||
/// This pulls the domain UUID from the database
|
||||
pub fn get_db_d_uuid(&self) -> Uuid {
|
||||
pub fn get_db_d_uuid(&mut self) -> Uuid {
|
||||
#[allow(clippy::expect_used)]
|
||||
match self
|
||||
.get_idlayer()
|
||||
|
@ -1610,11 +1614,11 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn set_db_ts_max(&self, ts: Duration) -> Result<(), OperationError> {
|
||||
pub fn set_db_ts_max(&mut self, ts: Duration) -> Result<(), OperationError> {
|
||||
self.get_idlayer().set_db_ts_max(ts)
|
||||
}
|
||||
|
||||
pub fn get_db_ts_max(&self, ts: Duration) -> Result<Duration, OperationError> {
|
||||
pub fn get_db_ts_max(&mut self, ts: Duration) -> Result<Duration, OperationError> {
|
||||
// if none, return ts. If found, return it.
|
||||
match self.get_idlayer().get_db_ts_max()? {
|
||||
Some(dts) => Ok(dts),
|
||||
|
@ -1622,11 +1626,11 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_db_index_version(&self) -> i64 {
|
||||
fn get_db_index_version(&mut self) -> i64 {
|
||||
self.get_idlayer().get_db_index_version()
|
||||
}
|
||||
|
||||
fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> {
|
||||
fn set_db_index_version(&mut self, v: i64) -> Result<(), OperationError> {
|
||||
self.get_idlayer().set_db_index_version(v)
|
||||
}
|
||||
}
|
||||
|
@ -1730,24 +1734,24 @@ impl Backend {
|
|||
|
||||
pub fn read(&self) -> BackendReadTransaction {
|
||||
BackendReadTransaction {
|
||||
idlayer: UnsafeCell::new(self.idlayer.read()),
|
||||
idlayer: self.idlayer.read(),
|
||||
idxmeta: self.idxmeta.read(),
|
||||
ruv: UnsafeCell::new(self.ruv.read()),
|
||||
ruv: self.ruv.read(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write(&self) -> BackendWriteTransaction {
|
||||
BackendWriteTransaction {
|
||||
idlayer: UnsafeCell::new(self.idlayer.write()),
|
||||
idlayer: self.idlayer.write(),
|
||||
idxmeta: self.idxmeta.read(),
|
||||
ruv: UnsafeCell::new(self.ruv.write()),
|
||||
ruv: self.ruv.write(),
|
||||
idxmeta_wr: self.idxmeta.write(),
|
||||
}
|
||||
}
|
||||
|
||||
// Should this actually call the idlayer directly?
|
||||
pub fn reset_db_s_uuid(&self) -> Uuid {
|
||||
let wr = self.write();
|
||||
let mut wr = self.write();
|
||||
#[allow(clippy::expect_used)]
|
||||
let sid = wr
|
||||
.reset_db_s_uuid()
|
||||
|
@ -1778,11 +1782,11 @@ mod tests {
|
|||
use idlset::v2::IDLBitRange;
|
||||
|
||||
use super::super::entry::{Entry, EntryInit, EntryNew};
|
||||
use super::Limits;
|
||||
use super::{
|
||||
Backend, BackendConfig, BackendTransaction, BackendWriteTransaction, DbBackup, IdList,
|
||||
IdxKey, OperationError,
|
||||
};
|
||||
use crate::identity::Limits;
|
||||
use crate::prelude::*;
|
||||
use crate::repl::cid::Cid;
|
||||
use crate::value::{IndexType, PartialValue, Value};
|
||||
|
|
|
@ -40,11 +40,14 @@ lazy_static! {
|
|||
pub static ref CLASS_ACCESS_CONTROL_PROFILE: Value = Value::new_class("access_control_profile");
|
||||
pub static ref CLASS_ACCESS_CONTROL_SEARCH: Value = Value::new_class("access_control_search");
|
||||
pub static ref CLASS_ACCOUNT: Value = Value::new_class("account");
|
||||
pub static ref CLASS_ATTRIBUTETYPE: Value = Value::new_class("attributetype");
|
||||
pub static ref CLASS_CLASS: Value = Value::new_class("class");
|
||||
pub static ref CLASS_DOMAIN_INFO: Value = Value::new_class("domain_info");
|
||||
pub static ref CLASS_DYNGROUP: Value = Value::new_class("dyngroup");
|
||||
pub static ref CLASS_GROUP: Value = Value::new_class("group");
|
||||
pub static ref CLASS_MEMBEROF: Value = Value::new_class("memberof");
|
||||
pub static ref CLASS_OBJECT: Value = Value::new_class("object");
|
||||
pub static ref CLASS_PERSON: Value = Value::new_class("person");
|
||||
pub static ref CLASS_RECYCLED: Value = Value::new_class("recycled");
|
||||
pub static ref CLASS_SERVICE_ACCOUNT: Value = Value::new_class("service_account");
|
||||
pub static ref CLASS_SYNC_OBJECT: Value = Value::new_class("sync_object");
|
||||
|
|
|
@ -215,18 +215,16 @@ impl TryFrom<&str> for Password {
|
|||
let c = cost.parse::<usize>().map_err(|_| ())?;
|
||||
|
||||
let s = ab64_to_b64!(salt);
|
||||
let s =
|
||||
base64::decode_config(&s, base64::STANDARD.decode_allow_trailing_bits(true))
|
||||
.map_err(|e| {
|
||||
error!(?e, "Invalid base64 in oldap pbkdf2-sha1");
|
||||
})?;
|
||||
let s = base64::decode_config(s, base64::STANDARD.decode_allow_trailing_bits(true))
|
||||
.map_err(|e| {
|
||||
error!(?e, "Invalid base64 in oldap pbkdf2-sha1");
|
||||
})?;
|
||||
|
||||
let h = ab64_to_b64!(hash);
|
||||
let h =
|
||||
base64::decode_config(&h, base64::STANDARD.decode_allow_trailing_bits(true))
|
||||
.map_err(|e| {
|
||||
error!(?e, "Invalid base64 in oldap pbkdf2-sha1");
|
||||
})?;
|
||||
let h = base64::decode_config(h, base64::STANDARD.decode_allow_trailing_bits(true))
|
||||
.map_err(|e| {
|
||||
error!(?e, "Invalid base64 in oldap pbkdf2-sha1");
|
||||
})?;
|
||||
|
||||
// This is just sha1 in a trenchcoat.
|
||||
if value.strip_prefix("{PBKDF2}").is_some()
|
||||
|
|
|
@ -16,7 +16,7 @@ impl CryptoPolicy {
|
|||
}
|
||||
|
||||
pub fn time_target(t: Duration) -> Self {
|
||||
let r = match Password::bench_pbkdf2((PBKDF2_MIN_NIST_COST * 10) as usize) {
|
||||
let r = match Password::bench_pbkdf2(PBKDF2_MIN_NIST_COST * 10) {
|
||||
Some(bt) => {
|
||||
let ubt = bt.as_nanos() as usize;
|
||||
|
||||
|
@ -33,12 +33,12 @@ impl CryptoPolicy {
|
|||
// eprintln!("Maybe rounds -> {}", r);
|
||||
|
||||
if r < PBKDF2_MIN_NIST_COST {
|
||||
PBKDF2_MIN_NIST_COST as usize
|
||||
PBKDF2_MIN_NIST_COST
|
||||
} else {
|
||||
r as usize
|
||||
r
|
||||
}
|
||||
}
|
||||
None => PBKDF2_MIN_NIST_COST as usize,
|
||||
None => PBKDF2_MIN_NIST_COST,
|
||||
};
|
||||
|
||||
CryptoPolicy { pbkdf2_cost: r }
|
||||
|
|
|
@ -47,7 +47,7 @@ use crate::be::dbvalue::DbValueSetV2;
|
|||
use crate::be::{IdxKey, IdxSlope};
|
||||
use crate::credential::Credential;
|
||||
use crate::filter::{Filter, FilterInvalid, FilterResolved, FilterValidResolved};
|
||||
use crate::ldap::ldap_vattr_map;
|
||||
use crate::idm::ldap::ldap_vattr_map;
|
||||
use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid};
|
||||
use crate::prelude::*;
|
||||
use crate::repl::cid::Cid;
|
||||
|
@ -279,7 +279,7 @@ impl Entry<EntryInit, EntryNew> {
|
|||
/// [`Entry`] type.
|
||||
pub fn from_proto_entry(
|
||||
e: &ProtoEntry,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
trace!("from_proto_entry");
|
||||
// Why not the trait? In the future we may want to extend
|
||||
|
@ -319,7 +319,7 @@ impl Entry<EntryInit, EntryNew> {
|
|||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn from_proto_entry_str(
|
||||
es: &str,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
if cfg!(test) {
|
||||
if es.len() > 256 {
|
||||
|
@ -1725,7 +1725,7 @@ impl Entry<EntryReduced, EntryCommitted> {
|
|||
}
|
||||
|
||||
/// Transform this reduced entry into a JSON protocol form that can be sent to clients.
|
||||
pub fn to_pe(&self, qs: &QueryServerReadTransaction) -> Result<ProtoEntry, OperationError> {
|
||||
pub fn to_pe(&self, qs: &mut QueryServerReadTransaction) -> Result<ProtoEntry, OperationError> {
|
||||
// Turn values -> Strings.
|
||||
let attrs: Result<_, _> = self
|
||||
.attrs
|
||||
|
@ -1738,7 +1738,7 @@ impl Entry<EntryReduced, EntryCommitted> {
|
|||
/// Transform this reduced entry into an LDAP form that can be sent to clients.
|
||||
pub fn to_ldap(
|
||||
&self,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
basedn: &str,
|
||||
// Did the client request all attributes?
|
||||
all_attrs: bool,
|
||||
|
@ -2298,7 +2298,7 @@ impl<VALID, STATE> Entry<VALID, STATE> {
|
|||
|
||||
/// Determine if this entry is recycled or a tombstone, and map that to "None". This allows
|
||||
/// filter_map to effectively remove entries that should not be considered as "alive".
|
||||
pub(crate) fn mask_recycled_ts(&self) -> Option<&Self> {
|
||||
pub fn mask_recycled_ts(&self) -> Option<&Self> {
|
||||
// Only when cls has ts/rc then None, else lways Some(self).
|
||||
match self.attrs.get("class") {
|
||||
Some(cls) => {
|
||||
|
@ -2316,7 +2316,7 @@ impl<VALID, STATE> Entry<VALID, STATE> {
|
|||
|
||||
/// Determine if this entry is recycled, and map that to "None". This allows
|
||||
/// filter_map to effectively remove entries that are recycled in some cases.
|
||||
pub(crate) fn mask_recycled(&self) -> Option<&Self> {
|
||||
pub fn mask_recycled(&self) -> Option<&Self> {
|
||||
// Only when cls has ts/rc then None, else lways Some(self).
|
||||
match self.attrs.get("class") {
|
||||
Some(cls) => {
|
||||
|
@ -2332,7 +2332,7 @@ impl<VALID, STATE> Entry<VALID, STATE> {
|
|||
|
||||
/// Determine if this entry is a tombstone, and map that to "None". This allows
|
||||
/// filter_map to effectively remove entries that are tombstones in some cases.
|
||||
pub(crate) fn mask_tombstone(&self) -> Option<&Self> {
|
||||
pub fn mask_tombstone(&self) -> Option<&Self> {
|
||||
// Only when cls has ts/rc then None, else lways Some(self).
|
||||
match self.attrs.get("class") {
|
||||
Some(cls) => {
|
||||
|
|
|
@ -28,7 +28,6 @@ use uuid::Uuid;
|
|||
|
||||
use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced};
|
||||
use crate::filter::{Filter, FilterInvalid, FilterValid};
|
||||
use crate::identity::Limits;
|
||||
use crate::modify::{ModifyInvalid, ModifyList, ModifyValid};
|
||||
use crate::prelude::*;
|
||||
use crate::schema::SchemaTransaction;
|
||||
|
@ -41,7 +40,7 @@ pub struct SearchResult {
|
|||
|
||||
impl SearchResult {
|
||||
pub fn new(
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
entries: &[Entry<EntryReduced, EntryCommitted>],
|
||||
) -> Result<Self, OperationError> {
|
||||
let entries: Result<_, _> = entries
|
||||
|
@ -83,7 +82,7 @@ impl SearchEvent {
|
|||
pub fn from_message(
|
||||
ident: Identity,
|
||||
req: &SearchRequest,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let f = Filter::from_ro(&ident, &req.filter, qs)?;
|
||||
// We do need to do this twice to account for the ignore_hidden
|
||||
|
@ -106,7 +105,7 @@ impl SearchEvent {
|
|||
ident: Identity,
|
||||
filter: &Filter<FilterInvalid>,
|
||||
attrs: Option<&[String]>,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let r_attrs: Option<BTreeSet<AttrString>> = attrs.map(|vs| {
|
||||
vs.iter()
|
||||
|
@ -280,7 +279,7 @@ impl SearchEvent {
|
|||
}
|
||||
|
||||
pub(crate) fn new_ext_impersonate_uuid(
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
ident: Identity,
|
||||
lf: &LdapFilter,
|
||||
attrs: Option<BTreeSet<AttrString>>,
|
||||
|
@ -340,7 +339,7 @@ impl CreateEvent {
|
|||
pub fn from_message(
|
||||
ident: Identity,
|
||||
req: &CreateRequest,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let rentries: Result<Vec<_>, _> = req
|
||||
.entries
|
||||
|
@ -430,7 +429,7 @@ impl DeleteEvent {
|
|||
pub fn from_message(
|
||||
ident: Identity,
|
||||
req: &DeleteRequest,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let f = Filter::from_rw(&ident, &req.filter, qs)?;
|
||||
let filter_orig = f
|
||||
|
@ -447,7 +446,7 @@ impl DeleteEvent {
|
|||
pub fn from_parts(
|
||||
ident: Identity,
|
||||
f: &Filter<FilterInvalid>,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let filter_orig = f
|
||||
.validate(qs.get_schema())
|
||||
|
@ -525,7 +524,7 @@ impl ModifyEvent {
|
|||
pub fn from_message(
|
||||
ident: Identity,
|
||||
req: &ModifyRequest,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let f = Filter::from_rw(&ident, &req.filter, qs)?;
|
||||
let m = ModifyList::from(&req.modlist, qs)?;
|
||||
|
@ -549,7 +548,7 @@ impl ModifyEvent {
|
|||
target_uuid: Uuid,
|
||||
proto_ml: &ProtoModifyList,
|
||||
filter: Filter<FilterInvalid>,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let f_uuid = filter_all!(f_eq("uuid", PartialValue::Uuid(target_uuid)));
|
||||
// Add any supplemental conditions we have.
|
||||
|
@ -707,7 +706,7 @@ pub struct WhoamiResult {
|
|||
|
||||
impl WhoamiResult {
|
||||
pub fn new(
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
e: &Entry<EntryReduced, EntryCommitted>,
|
||||
) -> Result<Self, OperationError> {
|
||||
Ok(WhoamiResult {
|
||||
|
|
|
@ -25,8 +25,7 @@ use serde::Deserialize;
|
|||
use uuid::Uuid;
|
||||
|
||||
use crate::be::{IdxKey, IdxKeyRef, IdxKeyToRef, IdxMeta, IdxSlope};
|
||||
use crate::identity::IdentityId;
|
||||
use crate::ldap::ldap_attr_filter_map;
|
||||
use crate::idm::ldap::ldap_attr_filter_map;
|
||||
use crate::prelude::*;
|
||||
use crate::schema::SchemaTransaction;
|
||||
use crate::value::{IndexType, PartialValue};
|
||||
|
@ -495,7 +494,7 @@ impl Filter<FilterInvalid> {
|
|||
pub fn from_ro(
|
||||
ev: &Identity,
|
||||
f: &ProtoFilter,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let depth = FILTER_DEPTH_MAX;
|
||||
let mut elems = ev.limits.filter_max_elements;
|
||||
|
@ -510,7 +509,7 @@ impl Filter<FilterInvalid> {
|
|||
pub fn from_rw(
|
||||
ev: &Identity,
|
||||
f: &ProtoFilter,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let depth = FILTER_DEPTH_MAX;
|
||||
let mut elems = ev.limits.filter_max_elements;
|
||||
|
@ -525,7 +524,7 @@ impl Filter<FilterInvalid> {
|
|||
pub fn from_ldap_ro(
|
||||
ev: &Identity,
|
||||
f: &LdapFilter,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let depth = FILTER_DEPTH_MAX;
|
||||
let mut elems = ev.limits.filter_max_elements;
|
||||
|
@ -725,7 +724,7 @@ impl FilterComp {
|
|||
|
||||
fn from_ro(
|
||||
f: &ProtoFilter,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
depth: usize,
|
||||
elems: &mut usize,
|
||||
) -> Result<Self, OperationError> {
|
||||
|
@ -777,7 +776,7 @@ impl FilterComp {
|
|||
|
||||
fn from_rw(
|
||||
f: &ProtoFilter,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
depth: usize,
|
||||
elems: &mut usize,
|
||||
) -> Result<Self, OperationError> {
|
||||
|
@ -830,7 +829,7 @@ impl FilterComp {
|
|||
|
||||
fn from_ldap_ro(
|
||||
f: &LdapFilter,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
depth: usize,
|
||||
elems: &mut usize,
|
||||
) -> Result<Self, OperationError> {
|
||||
|
@ -1561,15 +1560,14 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_lessthan_entry_filter() {
|
||||
let e: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"userid": ["william"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"gidnumber": ["1000"]
|
||||
}
|
||||
}"#,
|
||||
let e = unsafe {
|
||||
entry_init!(
|
||||
("userid", Value::new_iutf8("william")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1000))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
@ -1586,15 +1584,14 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_or_entry_filter() {
|
||||
let e: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"userid": ["william"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"uidnumber": ["1000"]
|
||||
}
|
||||
}"#,
|
||||
let e = unsafe {
|
||||
entry_init!(
|
||||
("userid", Value::new_iutf8("william")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1000))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
@ -1602,7 +1599,7 @@ mod tests {
|
|||
let f_t1a = unsafe {
|
||||
filter_resolved!(f_or!([
|
||||
f_eq("userid", PartialValue::new_iutf8("william")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1000")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1000)),
|
||||
]))
|
||||
};
|
||||
assert!(e.entry_match_no_index(&f_t1a));
|
||||
|
@ -1610,7 +1607,7 @@ mod tests {
|
|||
let f_t2a = unsafe {
|
||||
filter_resolved!(f_or!([
|
||||
f_eq("userid", PartialValue::new_iutf8("william")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1001")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1000)),
|
||||
]))
|
||||
};
|
||||
assert!(e.entry_match_no_index(&f_t2a));
|
||||
|
@ -1618,7 +1615,7 @@ mod tests {
|
|||
let f_t3a = unsafe {
|
||||
filter_resolved!(f_or!([
|
||||
f_eq("userid", PartialValue::new_iutf8("alice")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1000")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1000)),
|
||||
]))
|
||||
};
|
||||
assert!(e.entry_match_no_index(&f_t3a));
|
||||
|
@ -1626,7 +1623,7 @@ mod tests {
|
|||
let f_t4a = unsafe {
|
||||
filter_resolved!(f_or!([
|
||||
f_eq("userid", PartialValue::new_iutf8("alice")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1001")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1001)),
|
||||
]))
|
||||
};
|
||||
assert!(!e.entry_match_no_index(&f_t4a));
|
||||
|
@ -1634,15 +1631,14 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_and_entry_filter() {
|
||||
let e: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"userid": ["william"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"uidnumber": ["1000"]
|
||||
}
|
||||
}"#,
|
||||
let e = unsafe {
|
||||
entry_init!(
|
||||
("userid", Value::new_iutf8("william")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1000))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
@ -1650,7 +1646,7 @@ mod tests {
|
|||
let f_t1a = unsafe {
|
||||
filter_resolved!(f_and!([
|
||||
f_eq("userid", PartialValue::new_iutf8("william")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1000")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1000)),
|
||||
]))
|
||||
};
|
||||
assert!(e.entry_match_no_index(&f_t1a));
|
||||
|
@ -1658,7 +1654,7 @@ mod tests {
|
|||
let f_t2a = unsafe {
|
||||
filter_resolved!(f_and!([
|
||||
f_eq("userid", PartialValue::new_iutf8("william")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1001")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1001)),
|
||||
]))
|
||||
};
|
||||
assert!(!e.entry_match_no_index(&f_t2a));
|
||||
|
@ -1666,7 +1662,7 @@ mod tests {
|
|||
let f_t3a = unsafe {
|
||||
filter_resolved!(f_and!([
|
||||
f_eq("userid", PartialValue::new_iutf8("alice")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1000")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1000)),
|
||||
]))
|
||||
};
|
||||
assert!(!e.entry_match_no_index(&f_t3a));
|
||||
|
@ -1674,7 +1670,7 @@ mod tests {
|
|||
let f_t4a = unsafe {
|
||||
filter_resolved!(f_and!([
|
||||
f_eq("userid", PartialValue::new_iutf8("alice")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1001")),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1001)),
|
||||
]))
|
||||
};
|
||||
assert!(!e.entry_match_no_index(&f_t4a));
|
||||
|
@ -1682,15 +1678,14 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_not_entry_filter() {
|
||||
let e1: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"userid": ["william"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"uidnumber": ["1000"]
|
||||
}
|
||||
}"#,
|
||||
let e1 = unsafe {
|
||||
entry_init!(
|
||||
("userid", Value::new_iutf8("william")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1000))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
@ -1707,64 +1702,60 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_nested_entry_filter() {
|
||||
let e1: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["person"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"uidnumber": ["1000"]
|
||||
}
|
||||
}"#,
|
||||
let e1 = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_PERSON.clone()),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1000))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
||||
let e2: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["person"],
|
||||
"uuid": ["4b6228ab-1dbe-42a4-a9f5-f6368222438e"],
|
||||
"uidnumber": ["1001"]
|
||||
}
|
||||
}"#,
|
||||
let e2 = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_PERSON.clone()),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("4b6228ab-1dbe-42a4-a9f5-f6368222438e"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1001))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
||||
let e3: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["person"],
|
||||
"uuid": ["7b23c99d-c06b-4a9a-a958-3afa56383e1d"],
|
||||
"uidnumber": ["1002"]
|
||||
}
|
||||
}"#,
|
||||
let e3 = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_PERSON.clone()),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("7b23c99d-c06b-4a9a-a958-3afa56383e1d"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1002))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
||||
let e4: Entry<EntrySealed, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["group"],
|
||||
"uuid": ["21d816b5-1f6a-4696-b7c1-6ed06d22ed81"],
|
||||
"uidnumber": ["1000"]
|
||||
}
|
||||
}"#,
|
||||
let e4 = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_GROUP.clone()),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("21d816b5-1f6a-4696-b7c1-6ed06d22ed81"))
|
||||
),
|
||||
("gidnumber", Value::Uint32(1000))
|
||||
)
|
||||
.into_sealed_new()
|
||||
};
|
||||
|
||||
let f_t1a = unsafe {
|
||||
filter_resolved!(f_and!([
|
||||
f_eq("class", PartialValue::new_class("person")),
|
||||
f_eq("class", PVCLASS_PERSON.clone()),
|
||||
f_or!([
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1001")),
|
||||
f_eq("uidnumber", PartialValue::new_iutf8("1000"))
|
||||
f_eq("gidnumber", PartialValue::Uint32(1001)),
|
||||
f_eq("gidnumber", PartialValue::Uint32(1000))
|
||||
])
|
||||
]))
|
||||
};
|
||||
|
@ -1809,33 +1800,36 @@ mod tests {
|
|||
let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
|
||||
|
||||
let mut server_txn = server.write(time_p1).await;
|
||||
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["object", "person", "account"],
|
||||
"name": ["testperson1"],
|
||||
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
|
||||
"description": ["testperson"],
|
||||
"displayname": ["testperson1"]
|
||||
}
|
||||
}"#,
|
||||
|
||||
let e1 = entry_init!(
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_PERSON.clone()),
|
||||
("class", CLASS_ACCOUNT.clone()),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
let e2: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["object", "person"],
|
||||
"name": ["testperson2"],
|
||||
"uuid": ["a67c0c71-0b35-4218-a6b0-22d23d131d27"],
|
||||
"description": ["testperson"],
|
||||
"displayname": ["testperson2"]
|
||||
}
|
||||
}"#,
|
||||
|
||||
let e2 = entry_init!(
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_PERSON.clone()),
|
||||
("name", Value::new_iname("testperson2")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("a67c0c71-0b35-4218-a6b0-22d23d131d27"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson2")),
|
||||
("displayname", Value::new_utf8s("testperson2"))
|
||||
);
|
||||
|
||||
// We need to add these and then push through the state machine.
|
||||
let e_ts = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_PERSON.clone()),
|
||||
("name", Value::new_iname("testperson3")),
|
||||
(
|
||||
"uuid",
|
||||
|
@ -1861,11 +1855,11 @@ mod tests {
|
|||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
// Now, establish enough time for the recycled items to be purged.
|
||||
let server_txn = server.write(time_p2).await;
|
||||
let mut server_txn = server.write(time_p2).await;
|
||||
assert!(server_txn.purge_recycled().is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
let server_txn = server.write(time_p3).await;
|
||||
let mut server_txn = server.write(time_p3).await;
|
||||
assert!(server_txn.purge_tombstones().is_ok());
|
||||
|
||||
// ===== ✅ now ready to test!
|
||||
|
@ -1902,7 +1896,7 @@ mod tests {
|
|||
|
||||
#[qs_test]
|
||||
async fn test_filter_depth_limits(server: &QueryServer) {
|
||||
let r_txn = server.read().await;
|
||||
let mut r_txn = server.read().await;
|
||||
|
||||
let mut inv_proto = ProtoFilter::Pres("class".to_string());
|
||||
for _i in 0..(FILTER_DEPTH_MAX + 1) {
|
||||
|
@ -1917,26 +1911,26 @@ mod tests {
|
|||
let ev = Identity::from_internal();
|
||||
|
||||
// Test proto + read
|
||||
let res = Filter::from_ro(&ev, &inv_proto, &r_txn);
|
||||
let res = Filter::from_ro(&ev, &inv_proto, &mut r_txn);
|
||||
assert!(res == Err(OperationError::ResourceLimit));
|
||||
|
||||
// ldap
|
||||
let res = Filter::from_ldap_ro(&ev, &inv_ldap, &r_txn);
|
||||
let res = Filter::from_ldap_ro(&ev, &inv_ldap, &mut r_txn);
|
||||
assert!(res == Err(OperationError::ResourceLimit));
|
||||
|
||||
// Can only have one db conn at a time.
|
||||
std::mem::drop(r_txn);
|
||||
|
||||
// proto + write
|
||||
let wr_txn = server.write(duration_from_epoch_now()).await;
|
||||
let res = Filter::from_rw(&ev, &inv_proto, &wr_txn);
|
||||
let mut wr_txn = server.write(duration_from_epoch_now()).await;
|
||||
let res = Filter::from_rw(&ev, &inv_proto, &mut wr_txn);
|
||||
assert!(res == Err(OperationError::ResourceLimit));
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_filter_max_element_limits(server: &QueryServer) {
|
||||
const LIMIT: usize = 4;
|
||||
let r_txn = server.read().await;
|
||||
let mut r_txn = server.read().await;
|
||||
|
||||
let inv_proto = ProtoFilter::And(
|
||||
(0..(LIMIT * 2))
|
||||
|
@ -1954,19 +1948,19 @@ mod tests {
|
|||
ev.limits.filter_max_elements = LIMIT;
|
||||
|
||||
// Test proto + read
|
||||
let res = Filter::from_ro(&ev, &inv_proto, &r_txn);
|
||||
let res = Filter::from_ro(&ev, &inv_proto, &mut r_txn);
|
||||
assert!(res == Err(OperationError::ResourceLimit));
|
||||
|
||||
// ldap
|
||||
let res = Filter::from_ldap_ro(&ev, &inv_ldap, &r_txn);
|
||||
let res = Filter::from_ldap_ro(&ev, &inv_ldap, &mut r_txn);
|
||||
assert!(res == Err(OperationError::ResourceLimit));
|
||||
|
||||
// Can only have one db conn at a time.
|
||||
std::mem::drop(r_txn);
|
||||
|
||||
// proto + write
|
||||
let wr_txn = server.write(duration_from_epoch_now()).await;
|
||||
let res = Filter::from_rw(&ev, &inv_proto, &wr_txn);
|
||||
let mut wr_txn = server.write(duration_from_epoch_now()).await;
|
||||
let res = Filter::from_rw(&ev, &inv_proto, &mut wr_txn);
|
||||
assert!(res == Err(OperationError::ResourceLimit));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -160,7 +160,7 @@ impl Account {
|
|||
#[instrument(level = "trace", skip_all)]
|
||||
pub(crate) fn try_from_entry_ro(
|
||||
value: &Entry<EntrySealed, EntryCommitted>,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let groups = Group::try_from_account_entry_ro(value, qs)?;
|
||||
try_from_entry!(value, groups)
|
||||
|
@ -623,7 +623,7 @@ pub struct ListUserAuthTokenEvent {
|
|||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
pub fn account_list_user_auth_tokens(
|
||||
&self,
|
||||
&mut self,
|
||||
lte: &ListUserAuthTokenEvent,
|
||||
) -> Result<Vec<UatStatus>, OperationError> {
|
||||
// Make an event from the request
|
||||
|
@ -680,7 +680,6 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::event::{CreateEvent, ModifyEvent};
|
||||
use crate::prelude::*;
|
||||
use async_std::task;
|
||||
use kanidm_proto::v1::{AuthType, UiHint};
|
||||
|
|
|
@ -3,7 +3,7 @@ use crate::prelude::*;
|
|||
use kanidm_proto::internal::AppLink;
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
pub fn list_applinks(&self, ident: &Identity) -> Result<Vec<AppLink>, OperationError> {
|
||||
pub fn list_applinks(&mut self, ident: &Identity) -> Result<Vec<AppLink>, OperationError> {
|
||||
// From the member-of of the ident.
|
||||
let ident_mo = match ident.get_memberof() {
|
||||
Some(mo) => mo,
|
||||
|
@ -75,7 +75,6 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
|
|||
#[cfg(test)]
|
||||
mod tests {
|
||||
// use crate::prelude::*;
|
||||
use crate::event::{CreateEvent, ModifyEvent};
|
||||
use async_std::task;
|
||||
use kanidm_proto::internal::AppLink;
|
||||
|
||||
|
@ -135,7 +134,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// Now do an applink query, they will not be there.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
let ident = idms_prox_read
|
||||
.qs_read
|
||||
|
@ -161,7 +160,7 @@ mod tests {
|
|||
assert!(idms_prox_write.qs_write.modify(&me_inv_m).is_ok());
|
||||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
let ident = idms_prox_read
|
||||
.qs_read
|
||||
|
|
|
@ -24,7 +24,6 @@ use webauthn_rs::prelude::{
|
|||
|
||||
use crate::credential::totp::Totp;
|
||||
use crate::credential::{BackupCodes, Credential, CredentialType, Password};
|
||||
use crate::identity::IdentityId;
|
||||
use crate::idm::account::Account;
|
||||
use crate::idm::delayed::{
|
||||
AuthSessionRecord, BackupCodeRemoval, DelayedAction, PasswordUpgrade, WebauthnCounterIncrement,
|
||||
|
|
|
@ -15,7 +15,6 @@ use webauthn_rs::prelude::{
|
|||
RegisterPublicKeyCredential,
|
||||
};
|
||||
|
||||
use crate::access::AccessControlsTransaction;
|
||||
use crate::credential::totp::{Totp, TOTP_DEFAULT_STEP};
|
||||
use crate::credential::{BackupCodes, Credential};
|
||||
use crate::idm::account::Account;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::identity::{AccessScope, IdentityId};
|
||||
use crate::prelude::*;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
use webauthn_rs::prelude::AuthenticationResult;
|
||||
|
|
|
@ -85,7 +85,7 @@ impl Group {
|
|||
|
||||
pub fn try_from_account_entry_ro(
|
||||
value: &Entry<EntrySealed, EntryCommitted>,
|
||||
qs: &QueryServerReadTransaction,
|
||||
qs: &mut QueryServerReadTransaction,
|
||||
) -> Result<Vec<Self>, OperationError> {
|
||||
try_from_account_e!(value, qs)
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ pub struct LdapServer {
|
|||
impl LdapServer {
|
||||
pub fn new(idms: &IdmServer) -> Result<Self, OperationError> {
|
||||
// let ct = duration_from_epoch_now();
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
// This is the rootdse path.
|
||||
// get the domain_info item
|
||||
let domain_entry = idms_prox_read
|
||||
|
@ -261,7 +261,7 @@ impl LdapServer {
|
|||
admin_info!(attr = ?k_attrs, "LDAP Search Request Mapped Attrs");
|
||||
|
||||
let ct = duration_from_epoch_now();
|
||||
let idm_read = idms.proxy_read().await;
|
||||
let mut idm_read = idms.proxy_read().await;
|
||||
// Now start the txn - we need it for resolving filter components.
|
||||
|
||||
// join the filter, with ext_filter
|
||||
|
@ -302,12 +302,16 @@ impl LdapServer {
|
|||
admin_error!("Invalid identity: {:?}", e);
|
||||
e
|
||||
})?;
|
||||
let se =
|
||||
SearchEvent::new_ext_impersonate_uuid(&idm_read.qs_read, ident, &lfilter, k_attrs)
|
||||
.map_err(|e| {
|
||||
admin_error!("failed to create search event -> {:?}", e);
|
||||
e
|
||||
})?;
|
||||
let se = SearchEvent::new_ext_impersonate_uuid(
|
||||
&mut idm_read.qs_read,
|
||||
ident,
|
||||
&lfilter,
|
||||
k_attrs,
|
||||
)
|
||||
.map_err(|e| {
|
||||
admin_error!("failed to create search event -> {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
let res = idm_read.qs_read.search_ext(&se).map_err(|e| {
|
||||
admin_error!("search failure {:?}", e);
|
||||
|
@ -320,9 +324,14 @@ impl LdapServer {
|
|||
let lres: Result<Vec<_>, _> = res
|
||||
.into_iter()
|
||||
.map(|e| {
|
||||
e.to_ldap(&idm_read.qs_read, self.basedn.as_str(), all_attrs, &l_attrs)
|
||||
// if okay, wrap in a ldap msg.
|
||||
.map(|r| sr.gen_result_entry(r))
|
||||
e.to_ldap(
|
||||
&mut idm_read.qs_read,
|
||||
self.basedn.as_str(),
|
||||
all_attrs,
|
||||
&l_attrs,
|
||||
)
|
||||
// if okay, wrap in a ldap msg.
|
||||
.map(|r| sr.gen_result_entry(r))
|
||||
})
|
||||
.chain(iter::once(Ok(sr.gen_success())))
|
||||
.collect();
|
||||
|
@ -574,7 +583,7 @@ pub(crate) fn ldap_attr_filter_map(input: &str) -> AttrString {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
// use crate::prelude::*;
|
||||
use crate::prelude::*;
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_std::task;
|
||||
|
@ -584,10 +593,9 @@ mod tests {
|
|||
use ldap3_proto::proto::{LdapFilter, LdapOp, LdapSearchScope};
|
||||
use ldap3_proto::simple::*;
|
||||
|
||||
use crate::event::{CreateEvent, ModifyEvent};
|
||||
use super::{LdapServer, LdapSession};
|
||||
use crate::idm::event::UnixPasswordChangeEvent;
|
||||
use crate::idm::serviceaccount::GenerateApiTokenEvent;
|
||||
use crate::ldap::{LdapServer, LdapSession};
|
||||
|
||||
const TEST_PASSWORD: &'static str = "ntaoeuntnaoeuhraohuercahu😍";
|
||||
|
||||
|
@ -920,7 +928,14 @@ mod tests {
|
|||
base: "dc=example,dc=com".to_string(),
|
||||
scope: LdapSearchScope::Subtree,
|
||||
filter: LdapFilter::Equality("name".to_string(), "testperson1".to_string()),
|
||||
attrs: vec!["name".to_string(), "mail".to_string(), "mail;primary".to_string(), "mail;alternative".to_string(), "emailprimary".to_string(), "emailalternative".to_string()],
|
||||
attrs: vec![
|
||||
"name".to_string(),
|
||||
"mail".to_string(),
|
||||
"mail;primary".to_string(),
|
||||
"mail;alternative".to_string(),
|
||||
"emailprimary".to_string(),
|
||||
"emailalternative".to_string(),
|
||||
],
|
||||
};
|
||||
|
||||
let sa_uuid = uuid::uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930");
|
|
@ -10,6 +10,7 @@ pub mod credupdatesession;
|
|||
pub mod delayed;
|
||||
pub mod event;
|
||||
pub mod group;
|
||||
pub mod ldap;
|
||||
pub mod oauth2;
|
||||
pub mod radius;
|
||||
pub mod scim;
|
||||
|
|
|
@ -29,11 +29,9 @@ use kanidm_proto::v1::{AuthType, UserAuthToken};
|
|||
use openssl::sha;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::sync::mpsc::UnboundedSender as Sender;
|
||||
use tracing::trace;
|
||||
use url::{Origin, Url};
|
||||
|
||||
use crate::identity::IdentityId;
|
||||
use crate::idm::account::Account;
|
||||
use crate::idm::delayed::{DelayedAction, Oauth2ConsentGrant, Oauth2SessionRecord};
|
||||
use crate::idm::server::{
|
||||
|
@ -540,7 +538,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl Oauth2ResourceServersReadTransaction {
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
pub fn check_oauth2_authorisation(
|
||||
&self,
|
||||
ident: &Identity,
|
||||
|
@ -569,13 +567,18 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
*/
|
||||
|
||||
//
|
||||
let o2rs = self.inner.rs_set.get(&auth_req.client_id).ok_or_else(|| {
|
||||
admin_warn!(
|
||||
"Invalid oauth2 client_id ({}) Have you configured the oauth2 resource server?",
|
||||
&auth_req.client_id
|
||||
);
|
||||
Oauth2Error::InvalidClientId
|
||||
})?;
|
||||
let o2rs = self
|
||||
.oauth2rs
|
||||
.inner
|
||||
.rs_set
|
||||
.get(&auth_req.client_id)
|
||||
.ok_or_else(|| {
|
||||
admin_warn!(
|
||||
"Invalid oauth2 client_id ({}) Have you configured the oauth2 resource server?",
|
||||
&auth_req.client_id
|
||||
);
|
||||
Oauth2Error::InvalidClientId
|
||||
})?;
|
||||
|
||||
// redirect_uri must be part of the client_id origin.
|
||||
if auth_req.redirect_uri.origin() != o2rs.origin {
|
||||
|
@ -808,6 +811,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
})?;
|
||||
|
||||
let consent_token = self
|
||||
.oauth2rs
|
||||
.inner
|
||||
.fernet
|
||||
.encrypt_at_time(&consent_data, ct.as_secs());
|
||||
|
@ -821,16 +825,16 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
pub(crate) fn check_oauth2_authorise_permit(
|
||||
pub fn check_oauth2_authorise_permit(
|
||||
&self,
|
||||
ident: &Identity,
|
||||
uat: &UserAuthToken,
|
||||
consent_token: &str,
|
||||
ct: Duration,
|
||||
async_tx: &Sender<DelayedAction>,
|
||||
) -> Result<AuthorisePermitSuccess, OperationError> {
|
||||
// Decode the consent req with our system fernet key. Use a ttl of 5 minutes.
|
||||
let consent_req: ConsentToken = self
|
||||
.oauth2rs
|
||||
.inner
|
||||
.fernet
|
||||
.decrypt_at_time(consent_token, Some(300), ct.as_secs())
|
||||
|
@ -859,6 +863,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
|
||||
// Get the resource server config based on this client_id.
|
||||
let o2rs = self
|
||||
.oauth2rs
|
||||
.inner
|
||||
.rs_set
|
||||
.get(&consent_req.client_id)
|
||||
|
@ -887,7 +892,8 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
// Everything is DONE! Now submit that it's all happy and the user consented correctly.
|
||||
// this will let them bypass consent steps in the future.
|
||||
// Submit that we consented to the delayed action queue
|
||||
if async_tx
|
||||
if self
|
||||
.async_tx
|
||||
.send(DelayedAction::Oauth2ConsentGrant(Oauth2ConsentGrant {
|
||||
target_uuid: uat.uuid,
|
||||
oauth2_rs_uuid: o2rs.uuid,
|
||||
|
@ -914,6 +920,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
) -> Result<Url, OperationError> {
|
||||
// Decode the consent req with our system fernet key. Use a ttl of 5 minutes.
|
||||
let consent_req: ConsentToken = self
|
||||
.oauth2rs
|
||||
.inner
|
||||
.fernet
|
||||
.decrypt_at_time(consent_token, Some(300), ct.as_secs())
|
||||
|
@ -942,6 +949,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
|
||||
// Get the resource server config based on this client_id.
|
||||
let _o2rs = self
|
||||
.oauth2rs
|
||||
.inner
|
||||
.rs_set
|
||||
.get(&consent_req.client_id)
|
||||
|
@ -955,12 +963,10 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
|
||||
pub fn check_oauth2_token_exchange(
|
||||
&self,
|
||||
idms: &IdmServerProxyReadTransaction<'_>,
|
||||
&mut self,
|
||||
client_authz: Option<&str>,
|
||||
token_req: &AccessTokenRequest,
|
||||
ct: Duration,
|
||||
async_tx: &Sender<DelayedAction>,
|
||||
) -> Result<AccessTokenResponse, Oauth2Error> {
|
||||
let (client_id, secret) = if let Some(client_authz) = client_authz {
|
||||
parse_basic_authz(client_authz)?
|
||||
|
@ -976,11 +982,19 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
};
|
||||
|
||||
// Get the o2rs for the handle.
|
||||
let o2rs = self.inner.rs_set.get(&client_id).ok_or_else(|| {
|
||||
admin_warn!("Invalid oauth2 client_id");
|
||||
Oauth2Error::AuthenticationRequired
|
||||
})?;
|
||||
// DANGER: Why do we have to do this? During the use of qs for internal search
|
||||
// and other operations we need qs to be mut. But when we borrow oauth2rs here we
|
||||
// cause multiple borrows to occur on struct members that freaks rust out. This *IS*
|
||||
// safe however because no element of the search or write process calls the oauth2rs
|
||||
// excepting for this idm layer within a single thread, meaning that stripping the
|
||||
// lifetime here is safe since we are the sole accessor.
|
||||
let o2rs: &Oauth2RS = unsafe {
|
||||
let s = self.oauth2rs.inner.rs_set.get(&client_id).ok_or_else(|| {
|
||||
admin_warn!("Invalid oauth2 client_id");
|
||||
Oauth2Error::AuthenticationRequired
|
||||
})?;
|
||||
&*(s as *const _)
|
||||
};
|
||||
|
||||
// check the secret.
|
||||
if o2rs.authz_secret != secret {
|
||||
|
@ -993,7 +1007,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
// TODO: add refresh token grant type.
|
||||
// If it's a refresh token grant, are the consent permissions the same?
|
||||
if token_req.grant_type == "authorization_code" {
|
||||
self.check_oauth2_token_exchange_authorization_code(idms, o2rs, token_req, ct, async_tx)
|
||||
self.check_oauth2_token_exchange_authorization_code(o2rs, token_req, ct)
|
||||
} else {
|
||||
admin_warn!("Invalid oauth2 grant_type (should be 'authorization_code')");
|
||||
Err(Oauth2Error::InvalidRequest)
|
||||
|
@ -1001,12 +1015,10 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
|
||||
fn check_oauth2_token_exchange_authorization_code(
|
||||
&self,
|
||||
idms: &IdmServerProxyReadTransaction<'_>,
|
||||
&mut self,
|
||||
o2rs: &Oauth2RS,
|
||||
token_req: &AccessTokenRequest,
|
||||
ct: Duration,
|
||||
async_tx: &Sender<DelayedAction>,
|
||||
) -> Result<AccessTokenResponse, Oauth2Error> {
|
||||
// Check the token_req is within the valid time, and correctly signed for
|
||||
// this client.
|
||||
|
@ -1120,12 +1132,12 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
|
||||
let iss = o2rs.iss.clone();
|
||||
|
||||
let entry = match idms.qs_read.internal_search_uuid(code_xchg.uat.uuid) {
|
||||
let entry = match self.qs_read.internal_search_uuid(code_xchg.uat.uuid) {
|
||||
Ok(entry) => entry,
|
||||
Err(err) => return Err(Oauth2Error::ServerError(err)),
|
||||
};
|
||||
|
||||
let account = match Account::try_from_entry_ro(&entry, &idms.qs_read) {
|
||||
let account = match Account::try_from_entry_ro(&entry, &mut self.qs_read) {
|
||||
Ok(account) => account,
|
||||
Err(err) => return Err(Oauth2Error::ServerError(err)),
|
||||
};
|
||||
|
@ -1193,7 +1205,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
|
||||
let refresh_token = None;
|
||||
|
||||
async_tx
|
||||
self.async_tx
|
||||
.send(DelayedAction::Oauth2SessionRecord(Oauth2SessionRecord {
|
||||
target_uuid: code_xchg.uat.uuid,
|
||||
parent_session_id,
|
||||
|
@ -1218,8 +1230,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
|
||||
pub fn check_oauth2_token_introspect(
|
||||
&self,
|
||||
idms: &IdmServerProxyReadTransaction<'_>,
|
||||
&mut self,
|
||||
client_authz: &str,
|
||||
intr_req: &AccessTokenIntrospectRequest,
|
||||
ct: Duration,
|
||||
|
@ -1227,7 +1238,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
let (client_id, secret) = parse_basic_authz(client_authz)?;
|
||||
|
||||
// Get the o2rs for the handle.
|
||||
let o2rs = self.inner.rs_set.get(&client_id).ok_or_else(|| {
|
||||
let o2rs = self.oauth2rs.inner.rs_set.get(&client_id).ok_or_else(|| {
|
||||
admin_warn!("Invalid oauth2 client_id");
|
||||
Oauth2Error::AuthenticationRequired
|
||||
})?;
|
||||
|
@ -1271,10 +1282,10 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
security_info!(?uuid, "access token has expired, returning inactive");
|
||||
return Ok(AccessTokenIntrospectResponse::inactive());
|
||||
}
|
||||
let exp = iat + ((expiry - odt_ct).whole_seconds() as i64);
|
||||
let exp = iat + (expiry - odt_ct).whole_seconds();
|
||||
|
||||
// Is the user expired, or the oauth2 session invalid?
|
||||
let valid = idms
|
||||
let valid = self
|
||||
.check_oauth2_account_uuid_valid(uuid, session_id, parent_session_id, iat, ct)
|
||||
.map_err(|_| admin_error!("Account is not valid"));
|
||||
|
||||
|
@ -1323,18 +1334,26 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
|
||||
pub fn oauth2_openid_userinfo(
|
||||
&self,
|
||||
idms: &IdmServerProxyReadTransaction<'_>,
|
||||
&mut self,
|
||||
client_id: &str,
|
||||
client_authz: &str,
|
||||
ct: Duration,
|
||||
) -> Result<OidcToken, Oauth2Error> {
|
||||
let o2rs = self.inner.rs_set.get(client_id).ok_or_else(|| {
|
||||
admin_warn!(
|
||||
"Invalid oauth2 client_id (have you configured the oauth2 resource server?)"
|
||||
);
|
||||
Oauth2Error::InvalidClientId
|
||||
})?;
|
||||
// DANGER: Why do we have to do this? During the use of qs for internal search
|
||||
// and other operations we need qs to be mut. But when we borrow oauth2rs here we
|
||||
// cause multiple borrows to occur on struct members that freaks rust out. This *IS*
|
||||
// safe however because no element of the search or write process calls the oauth2rs
|
||||
// excepting for this idm layer within a single thread, meaning that stripping the
|
||||
// lifetime here is safe since we are the sole accessor.
|
||||
let o2rs: &Oauth2RS = unsafe {
|
||||
let s = self.oauth2rs.inner.rs_set.get(client_id).ok_or_else(|| {
|
||||
admin_warn!(
|
||||
"Invalid oauth2 client_id (have you configured the oauth2 resource server?)"
|
||||
);
|
||||
Oauth2Error::InvalidClientId
|
||||
})?;
|
||||
&*(s as *const _)
|
||||
};
|
||||
|
||||
let token: Oauth2TokenType = o2rs
|
||||
.token_fernet
|
||||
|
@ -1368,10 +1387,10 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
security_info!(?uuid, "access token has expired, returning inactive");
|
||||
return Err(Oauth2Error::InvalidToken);
|
||||
}
|
||||
let exp = iat + ((expiry - odt_ct).whole_seconds() as i64);
|
||||
let exp = iat + (expiry - odt_ct).whole_seconds();
|
||||
|
||||
// Is the user expired, or the oauth2 session invalid?
|
||||
let valid = idms
|
||||
let valid = self
|
||||
.check_oauth2_account_uuid_valid(uuid, session_id, parent_session_id, iat, ct)
|
||||
.map_err(|_| admin_error!("Account is not valid"));
|
||||
|
||||
|
@ -1386,7 +1405,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
};
|
||||
|
||||
let account = match Account::try_from_entry_ro(&entry, &idms.qs_read) {
|
||||
let account = match Account::try_from_entry_ro(&entry, &mut self.qs_read) {
|
||||
Ok(account) => account,
|
||||
Err(err) => return Err(Oauth2Error::ServerError(err)),
|
||||
};
|
||||
|
@ -1427,7 +1446,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
&self,
|
||||
client_id: &str,
|
||||
) -> Result<OidcDiscoveryResponse, OperationError> {
|
||||
let o2rs = self.inner.rs_set.get(client_id).ok_or_else(|| {
|
||||
let o2rs = self.oauth2rs.inner.rs_set.get(client_id).ok_or_else(|| {
|
||||
admin_warn!(
|
||||
"Invalid oauth2 client_id (have you configured the oauth2 resource server?)"
|
||||
);
|
||||
|
@ -1507,7 +1526,7 @@ impl Oauth2ResourceServersReadTransaction {
|
|||
}
|
||||
|
||||
pub fn oauth2_openid_publickey(&self, client_id: &str) -> Result<JwkKeySet, OperationError> {
|
||||
let o2rs = self.inner.rs_set.get(client_id).ok_or_else(|| {
|
||||
let o2rs = self.oauth2rs.inner.rs_set.get(client_id).ok_or_else(|| {
|
||||
admin_warn!(
|
||||
"Invalid oauth2 client_id (have you configured the oauth2 resource server?)"
|
||||
);
|
||||
|
@ -1592,7 +1611,7 @@ fn extra_claims_for_account(
|
|||
account.groups.iter().map(|x| x.to_proto().uuid).collect(),
|
||||
);
|
||||
}
|
||||
return extra_claims;
|
||||
extra_claims
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -1607,7 +1626,6 @@ mod tests {
|
|||
use kanidm_proto::v1::{AuthType, UserAuthToken};
|
||||
use openssl::sha;
|
||||
|
||||
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent};
|
||||
use crate::idm::delayed::DelayedAction;
|
||||
use crate::idm::oauth2::{AuthoriseResponse, Oauth2Error};
|
||||
use crate::idm::server::{IdmServer, IdmServerTransaction};
|
||||
|
@ -1778,7 +1796,7 @@ mod tests {
|
|||
let (secret, uat, ident, _) =
|
||||
setup_oauth2_resource_server(idms, ct, true, false, false);
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Get an ident/uat for now.
|
||||
|
||||
|
@ -2120,7 +2138,7 @@ mod tests {
|
|||
+ Duration::from_secs(TEST_CURRENT_TIME + UAT_EXPIRE - 1),
|
||||
);
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// == Setup the authorisation request
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
@ -2288,7 +2306,7 @@ mod tests {
|
|||
setup_oauth2_resource_server(idms, ct, true, false, false);
|
||||
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// == Setup the authorisation request
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
@ -2383,7 +2401,7 @@ mod tests {
|
|||
|
||||
// start a new read
|
||||
// check again.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let intr_response = idms_prox_read
|
||||
.check_oauth2_token_introspect(&client_authz.unwrap(), &intr_request, ct)
|
||||
.expect("Failed to inspect token");
|
||||
|
@ -2403,7 +2421,7 @@ mod tests {
|
|||
setup_oauth2_resource_server(idms, ct, true, false, false);
|
||||
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// == Setup the authorisation request
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
@ -2466,7 +2484,7 @@ mod tests {
|
|||
// Okay, now we have the token, we can check behaviours with the revoke interface.
|
||||
|
||||
// First, assert it is valid, similar to the introspect api.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let intr_request = AccessTokenIntrospectRequest {
|
||||
token: oauth2_token.access_token.clone(),
|
||||
token_type_hint: None,
|
||||
|
@ -2509,7 +2527,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// Check our token is still valid.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let intr_response = idms_prox_read
|
||||
.check_oauth2_token_introspect(
|
||||
client_authz.as_deref().unwrap(),
|
||||
|
@ -2532,7 +2550,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// Check it is still valid - this is because we are still in the GRACE window.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let intr_response = idms_prox_read
|
||||
.check_oauth2_token_introspect(
|
||||
client_authz.as_deref().unwrap(),
|
||||
|
@ -2548,7 +2566,7 @@ mod tests {
|
|||
let ct = ct + GRACE_WINDOW;
|
||||
|
||||
// Assert it is now invalid.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let intr_response = idms_prox_read
|
||||
.check_oauth2_token_introspect(
|
||||
client_authz.as_deref().unwrap(),
|
||||
|
@ -2584,7 +2602,7 @@ mod tests {
|
|||
setup_oauth2_resource_server(idms, ct, true, false, false);
|
||||
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// == Setup the authorisation request
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
@ -2927,7 +2945,7 @@ mod tests {
|
|||
setup_oauth2_resource_server(idms, ct, true, false, false);
|
||||
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
||||
|
@ -3065,7 +3083,7 @@ mod tests {
|
|||
setup_oauth2_resource_server(idms, ct, true, false, true);
|
||||
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
||||
|
@ -3162,7 +3180,7 @@ mod tests {
|
|||
setup_oauth2_resource_server(idms, ct, true, false, true);
|
||||
let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret)));
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||
|
||||
|
@ -3312,7 +3330,7 @@ mod tests {
|
|||
let ct = Duration::from_secs(TEST_CURRENT_TIME);
|
||||
let (secret, uat, ident, _) =
|
||||
setup_oauth2_resource_server(idms, ct, false, true, false);
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
// The public key url should offer an rs key
|
||||
// discovery should offer RS256
|
||||
let discovery = idms_prox_read
|
||||
|
@ -3464,7 +3482,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// == Now try the authorise again, should be in the permitted state.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// We need to reload our identity
|
||||
let ident = idms_prox_read
|
||||
|
@ -3516,7 +3534,7 @@ mod tests {
|
|||
|
||||
// And do the workflow once more to see if we need to consent again.
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// We need to reload our identity
|
||||
let ident = idms_prox_read
|
||||
|
@ -3583,7 +3601,7 @@ mod tests {
|
|||
|
||||
// And do the workflow once more to see if we need to consent again.
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// We need to reload our identity
|
||||
let ident = idms_prox_read
|
||||
|
@ -3736,7 +3754,7 @@ mod tests {
|
|||
let (secret, uat, ident, _) =
|
||||
setup_oauth2_resource_server(idms, ct, false, false, false);
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Get an ident/uat for now.
|
||||
|
||||
|
@ -3816,7 +3834,7 @@ mod tests {
|
|||
let (secret, uat, ident, _) =
|
||||
setup_oauth2_resource_server(idms, ct, false, false, false);
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Get an ident/uat for now.
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@ use crate::idm::server::{IdmServerProxyReadTransaction, IdmServerProxyWriteTrans
|
|||
use crate::prelude::*;
|
||||
use crate::value::Session;
|
||||
|
||||
use crate::access::AccessControlsTransaction;
|
||||
use crate::schema::{SchemaClass, SchemaTransaction};
|
||||
|
||||
// Internals of a Scim Sync token
|
||||
|
@ -778,7 +777,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
fn scim_attr_to_values(
|
||||
&self,
|
||||
&mut self,
|
||||
scim_attr_name: &str,
|
||||
scim_attr: &ScimAttr,
|
||||
) -> Result<Vec<Value>, OperationError> {
|
||||
|
@ -888,7 +887,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
fn scim_entry_to_mod(
|
||||
&self,
|
||||
&mut self,
|
||||
scim_ent: &ScimEntry,
|
||||
sync_uuid: Uuid,
|
||||
sync_allow_class_set: &BTreeMap<String, SchemaClass>,
|
||||
|
@ -1190,7 +1189,10 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
pub fn scim_sync_get_state(&self, ident: &Identity) -> Result<ScimSyncState, OperationError> {
|
||||
pub fn scim_sync_get_state(
|
||||
&mut self,
|
||||
ident: &Identity,
|
||||
) -> Result<ScimSyncState, OperationError> {
|
||||
// We must be *extra* careful in these functions since we do *internal* searches
|
||||
// which are *bypassing* normal access checks!
|
||||
|
||||
|
@ -1232,8 +1234,6 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::event::CreateEvent;
|
||||
use crate::event::ModifyEvent;
|
||||
use crate::idm::server::{IdmServerProxyWriteTransaction, IdmServerTransaction};
|
||||
use crate::prelude::*;
|
||||
use base64urlsafedata::Base64UrlSafeData;
|
||||
|
@ -1292,7 +1292,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// Do a get_state to get the current "state cookie" if any.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_sync_token_to_ident(Some(sync_token.as_str()), ct)
|
||||
|
@ -1347,7 +1347,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// -- Check the happy path.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let ident = idms_prox_read
|
||||
.validate_and_parse_sync_token_to_ident(Some(sync_token.as_str()), ct)
|
||||
.expect("Failed to validate sync token");
|
||||
|
@ -1369,7 +1369,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// Must fail
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let fail = idms_prox_read
|
||||
.validate_and_parse_sync_token_to_ident(Some(sync_token.as_str()), ct);
|
||||
assert!(matches!(fail, Err(OperationError::NotAuthenticated)));
|
||||
|
@ -1394,7 +1394,7 @@ mod tests {
|
|||
assert!(idms_prox_write.qs_write.modify(&me_inv_m).is_ok());
|
||||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let fail = idms_prox_read
|
||||
.validate_and_parse_sync_token_to_ident(Some(sync_token.as_str()), ct);
|
||||
assert!(matches!(fail, Err(OperationError::NotAuthenticated)));
|
||||
|
@ -1667,7 +1667,7 @@ mod tests {
|
|||
.is_ok());
|
||||
|
||||
let ct = Duration::from_secs(TEST_CURRENT_TIME);
|
||||
let idms_prox_write = task::block_on(idms.proxy_write(ct));
|
||||
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
|
||||
|
||||
let ent = idms_prox_write
|
||||
.qs_write
|
||||
|
|
|
@ -25,9 +25,9 @@ use url::Url;
|
|||
use webauthn_rs::prelude::{Webauthn, WebauthnBuilder};
|
||||
|
||||
use super::event::ReadBackupCodeEvent;
|
||||
use super::ldap::{LdapBoundToken, LdapSession};
|
||||
use crate::credential::policy::CryptoPolicy;
|
||||
use crate::credential::softlock::CredSoftLock;
|
||||
use crate::identity::{AccessScope, IdentType, IdentUser, Limits};
|
||||
use crate::idm::account::Account;
|
||||
use crate::idm::authsession::AuthSession;
|
||||
use crate::idm::credupdatesession::CredentialUpdateSessionMutex;
|
||||
|
@ -44,17 +44,14 @@ use crate::idm::event::{
|
|||
UnixPasswordChangeEvent, UnixUserAuthEvent, UnixUserTokenEvent,
|
||||
};
|
||||
use crate::idm::oauth2::{
|
||||
AccessTokenIntrospectRequest, AccessTokenIntrospectResponse, AccessTokenRequest,
|
||||
AccessTokenResponse, AuthorisationRequest, AuthorisePermitSuccess, AuthoriseResponse,
|
||||
JwkKeySet, Oauth2Error, Oauth2ResourceServers, Oauth2ResourceServersReadTransaction,
|
||||
Oauth2ResourceServersWriteTransaction, OidcDiscoveryResponse, OidcToken,
|
||||
Oauth2ResourceServers, Oauth2ResourceServersReadTransaction,
|
||||
Oauth2ResourceServersWriteTransaction,
|
||||
};
|
||||
use crate::idm::radius::RadiusAccount;
|
||||
use crate::idm::scim::{ScimSyncToken, SyncAccount};
|
||||
use crate::idm::serviceaccount::ServiceAccount;
|
||||
use crate::idm::unix::{UnixGroup, UnixUserAccount};
|
||||
use crate::idm::AuthState;
|
||||
use crate::ldap::{LdapBoundToken, LdapSession};
|
||||
use crate::prelude::*;
|
||||
use crate::utils::{password_from_random, readable_password_from_random, uuid_from_duration, Sid};
|
||||
use crate::value::{Oauth2Session, Session};
|
||||
|
@ -117,8 +114,8 @@ pub struct IdmServerCredUpdateTransaction<'a> {
|
|||
pub struct IdmServerProxyReadTransaction<'a> {
|
||||
pub qs_read: QueryServerReadTransaction<'a>,
|
||||
uat_jwt_validator: CowCellReadTxn<JwsValidator>,
|
||||
oauth2rs: Oauth2ResourceServersReadTransaction,
|
||||
async_tx: Sender<DelayedAction>,
|
||||
pub(crate) oauth2rs: Oauth2ResourceServersReadTransaction,
|
||||
pub(crate) async_tx: Sender<DelayedAction>,
|
||||
}
|
||||
|
||||
pub struct IdmServerProxyWriteTransaction<'a> {
|
||||
|
@ -160,7 +157,7 @@ impl IdmServer {
|
|||
|
||||
// Get the domain name, as the relying party id.
|
||||
let (rp_id, rp_name, fernet_private_key, es256_private_key, pw_badlist_set, oauth2rs_set) = {
|
||||
let qs_read = task::block_on(qs.read());
|
||||
let mut qs_read = task::block_on(qs.read());
|
||||
(
|
||||
qs_read.get_domain_name().to_string(),
|
||||
qs_read.get_domain_display_name().to_string(),
|
||||
|
@ -390,7 +387,7 @@ pub enum Token {
|
|||
pub trait IdmServerTransaction<'a> {
|
||||
type QsTransactionType: QueryServerTransaction<'a>;
|
||||
|
||||
fn get_qs_txn(&self) -> &Self::QsTransactionType;
|
||||
fn get_qs_txn(&mut self) -> &mut Self::QsTransactionType;
|
||||
|
||||
fn get_uat_validator_txn(&self) -> &JwsValidator;
|
||||
|
||||
|
@ -404,7 +401,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
/// and validation method.
|
||||
#[instrument(level = "info", skip_all)]
|
||||
fn validate_and_parse_token_to_ident(
|
||||
&self,
|
||||
&mut self,
|
||||
token: Option<&str>,
|
||||
ct: Duration,
|
||||
) -> Result<Identity, OperationError> {
|
||||
|
@ -416,7 +413,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
fn validate_and_parse_token_to_uat(
|
||||
&self,
|
||||
&mut self,
|
||||
token: Option<&str>,
|
||||
ct: Duration,
|
||||
) -> Result<UserAuthToken, OperationError> {
|
||||
|
@ -430,7 +427,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
}
|
||||
|
||||
fn validate_and_parse_token_to_token(
|
||||
&self,
|
||||
&mut self,
|
||||
token: Option<&str>,
|
||||
ct: Duration,
|
||||
) -> Result<Token, OperationError> {
|
||||
|
@ -574,7 +571,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
}
|
||||
|
||||
fn check_oauth2_account_uuid_valid(
|
||||
&self,
|
||||
&mut self,
|
||||
uuid: Uuid,
|
||||
session_id: Uuid,
|
||||
parent_session_id: Uuid,
|
||||
|
@ -637,7 +634,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
/// relevant session information is injected.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
fn process_uat_to_identity(
|
||||
&self,
|
||||
&mut self,
|
||||
uat: &UserAuthToken,
|
||||
ct: Duration,
|
||||
) -> Result<Identity, OperationError> {
|
||||
|
@ -701,7 +698,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
fn process_apit_to_identity(
|
||||
&self,
|
||||
&mut self,
|
||||
apit: &ApiToken,
|
||||
entry: Arc<EntrySealedCommitted>,
|
||||
ct: Duration,
|
||||
|
@ -726,7 +723,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
fn validate_ldap_session(
|
||||
&self,
|
||||
&mut self,
|
||||
session: &LdapSession,
|
||||
ct: Duration,
|
||||
) -> Result<Identity, OperationError> {
|
||||
|
@ -786,7 +783,7 @@ pub trait IdmServerTransaction<'a> {
|
|||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
fn validate_and_parse_sync_token_to_ident(
|
||||
&self,
|
||||
&mut self,
|
||||
token: Option<&str>,
|
||||
ct: Duration,
|
||||
) -> Result<Identity, OperationError> {
|
||||
|
@ -871,8 +868,8 @@ pub trait IdmServerTransaction<'a> {
|
|||
impl<'a> IdmServerTransaction<'a> for IdmServerAuthTransaction<'a> {
|
||||
type QsTransactionType = QueryServerReadTransaction<'a>;
|
||||
|
||||
fn get_qs_txn(&self) -> &Self::QsTransactionType {
|
||||
&self.qs_read
|
||||
fn get_qs_txn(&mut self) -> &mut Self::QsTransactionType {
|
||||
&mut self.qs_read
|
||||
}
|
||||
|
||||
fn get_uat_validator_txn(&self) -> &JwsValidator {
|
||||
|
@ -1414,8 +1411,8 @@ impl<'a> IdmServerAuthTransaction<'a> {
|
|||
impl<'a> IdmServerTransaction<'a> for IdmServerProxyReadTransaction<'a> {
|
||||
type QsTransactionType = QueryServerReadTransaction<'a>;
|
||||
|
||||
fn get_qs_txn(&self) -> &Self::QsTransactionType {
|
||||
&self.qs_read
|
||||
fn get_qs_txn(&mut self) -> &mut Self::QsTransactionType {
|
||||
&mut self.qs_read
|
||||
}
|
||||
|
||||
fn get_uat_validator_txn(&self) -> &JwsValidator {
|
||||
|
@ -1512,87 +1509,13 @@ impl<'a> IdmServerProxyReadTransaction<'a> {
|
|||
|
||||
account.to_backupcodesview()
|
||||
}
|
||||
|
||||
pub fn check_oauth2_authorisation(
|
||||
&self,
|
||||
ident: &Identity,
|
||||
uat: &UserAuthToken,
|
||||
auth_req: &AuthorisationRequest,
|
||||
ct: Duration,
|
||||
) -> Result<AuthoriseResponse, Oauth2Error> {
|
||||
self.oauth2rs
|
||||
.check_oauth2_authorisation(ident, uat, auth_req, ct)
|
||||
}
|
||||
|
||||
pub fn check_oauth2_authorise_permit(
|
||||
&self,
|
||||
ident: &Identity,
|
||||
uat: &UserAuthToken,
|
||||
consent_req: &str,
|
||||
ct: Duration,
|
||||
) -> Result<AuthorisePermitSuccess, OperationError> {
|
||||
self.oauth2rs
|
||||
.check_oauth2_authorise_permit(ident, uat, consent_req, ct, &self.async_tx)
|
||||
}
|
||||
|
||||
pub fn check_oauth2_authorise_reject(
|
||||
&self,
|
||||
ident: &Identity,
|
||||
uat: &UserAuthToken,
|
||||
consent_req: &str,
|
||||
ct: Duration,
|
||||
) -> Result<Url, OperationError> {
|
||||
self.oauth2rs
|
||||
.check_oauth2_authorise_reject(ident, uat, consent_req, ct)
|
||||
}
|
||||
|
||||
pub fn check_oauth2_token_exchange(
|
||||
&self,
|
||||
client_authz: Option<&str>,
|
||||
token_req: &AccessTokenRequest,
|
||||
ct: Duration,
|
||||
) -> Result<AccessTokenResponse, Oauth2Error> {
|
||||
self.oauth2rs
|
||||
.check_oauth2_token_exchange(self, client_authz, token_req, ct, &self.async_tx)
|
||||
}
|
||||
|
||||
pub fn check_oauth2_token_introspect(
|
||||
&self,
|
||||
client_authz: &str,
|
||||
intr_req: &AccessTokenIntrospectRequest,
|
||||
ct: Duration,
|
||||
) -> Result<AccessTokenIntrospectResponse, Oauth2Error> {
|
||||
self.oauth2rs
|
||||
.check_oauth2_token_introspect(self, client_authz, intr_req, ct)
|
||||
}
|
||||
|
||||
pub fn oauth2_openid_userinfo(
|
||||
&self,
|
||||
client_id: &str,
|
||||
client_authz: &str,
|
||||
ct: Duration,
|
||||
) -> Result<OidcToken, Oauth2Error> {
|
||||
self.oauth2rs
|
||||
.oauth2_openid_userinfo(self, client_id, client_authz, ct)
|
||||
}
|
||||
|
||||
pub fn oauth2_openid_discovery(
|
||||
&self,
|
||||
client_id: &str,
|
||||
) -> Result<OidcDiscoveryResponse, OperationError> {
|
||||
self.oauth2rs.oauth2_openid_discovery(client_id)
|
||||
}
|
||||
|
||||
pub fn oauth2_openid_publickey(&self, client_id: &str) -> Result<JwkKeySet, OperationError> {
|
||||
self.oauth2rs.oauth2_openid_publickey(client_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerTransaction<'a> for IdmServerProxyWriteTransaction<'a> {
|
||||
type QsTransactionType = QueryServerWriteTransaction<'a>;
|
||||
|
||||
fn get_qs_txn(&self) -> &Self::QsTransactionType {
|
||||
&self.qs_write
|
||||
fn get_qs_txn(&mut self) -> &mut Self::QsTransactionType {
|
||||
&mut self.qs_write
|
||||
}
|
||||
|
||||
fn get_uat_validator_txn(&self) -> &JwsValidator {
|
||||
|
@ -2318,7 +2241,6 @@ mod tests {
|
|||
|
||||
use crate::credential::policy::CryptoPolicy;
|
||||
use crate::credential::{Credential, Password};
|
||||
use crate::event::{CreateEvent, ModifyEvent};
|
||||
use crate::idm::account::DestroySessionTokenEvent;
|
||||
use crate::idm::delayed::{AuthSessionRecord, DelayedAction};
|
||||
use crate::idm::event::{AuthEvent, AuthResult};
|
||||
|
@ -3700,7 +3622,7 @@ mod tests {
|
|||
assert!(Ok(true) == r);
|
||||
idms_delayed.check_is_empty_or_panic();
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Check it's valid.
|
||||
idms_prox_read
|
||||
|
@ -3730,7 +3652,7 @@ mod tests {
|
|||
let session_b = Uuid::new_v4();
|
||||
|
||||
// Assert no sessions present
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let admin = idms_prox_read
|
||||
.qs_read
|
||||
.internal_search_uuid(UUID_ADMIN)
|
||||
|
@ -3753,7 +3675,7 @@ mod tests {
|
|||
assert!(Ok(true) == r);
|
||||
|
||||
// Check it was written, and check
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let admin = idms_prox_read
|
||||
.qs_read
|
||||
.internal_search_uuid(UUID_ADMIN)
|
||||
|
@ -3786,7 +3708,7 @@ mod tests {
|
|||
let r = task::block_on(idms.delayed_action(expiry_a, da));
|
||||
assert!(Ok(true) == r);
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let admin = idms_prox_read
|
||||
.qs_read
|
||||
.internal_search_uuid(UUID_ADMIN)
|
||||
|
@ -3842,7 +3764,7 @@ mod tests {
|
|||
.expect("Embedded jwk not found");
|
||||
let uat_inner = uat_inner.into_inner();
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Check it's valid.
|
||||
idms_prox_read
|
||||
|
@ -3864,7 +3786,7 @@ mod tests {
|
|||
assert!(idms_prox_write.commit().is_ok());
|
||||
|
||||
// Now check again with the session destroyed.
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Now, within gracewindow, it's still valid.
|
||||
idms_prox_read
|
||||
|
@ -4001,7 +3923,7 @@ mod tests {
|
|||
assert!(matches!(da, DelayedAction::AuthSessionRecord(_)));
|
||||
idms_delayed.check_is_empty_or_panic();
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
|
||||
// Check it's valid.
|
||||
idms_prox_read
|
||||
|
@ -4036,7 +3958,7 @@ mod tests {
|
|||
assert!(matches!(da, DelayedAction::AuthSessionRecord(_)));
|
||||
idms_delayed.check_is_empty_or_panic();
|
||||
|
||||
let idms_prox_read = task::block_on(idms.proxy_read());
|
||||
let mut idms_prox_read = task::block_on(idms.proxy_read());
|
||||
assert!(idms_prox_read
|
||||
.validate_and_parse_token_to_ident(Some(token.as_str()), ct)
|
||||
.is_err());
|
||||
|
|
|
@ -308,7 +308,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
pub fn service_account_list_api_token(
|
||||
&self,
|
||||
&mut self,
|
||||
lte: &ListApiTokenEvent,
|
||||
) -> Result<Vec<ApiToken>, OperationError> {
|
||||
// Make an event from the request
|
||||
|
|
|
@ -38,15 +38,12 @@ pub mod credential;
|
|||
pub mod entry;
|
||||
pub mod event;
|
||||
pub mod filter;
|
||||
pub mod identity;
|
||||
pub mod ldap;
|
||||
pub mod modify;
|
||||
pub mod utils;
|
||||
pub mod value;
|
||||
pub mod valueset;
|
||||
#[macro_use]
|
||||
mod plugins;
|
||||
mod access;
|
||||
pub mod idm;
|
||||
mod repl;
|
||||
pub mod schema;
|
||||
|
@ -57,32 +54,36 @@ pub mod testkit;
|
|||
/// A prelude of imports that should be imported by all other Kanidm modules to
|
||||
/// help make imports cleaner.
|
||||
pub mod prelude {
|
||||
pub use kanidm_proto::v1::{ConsistencyError, OperationError};
|
||||
pub use kanidm_proto::v1::{ConsistencyError, OperationError, SchemaError};
|
||||
pub use sketching::{
|
||||
admin_debug, admin_error, admin_info, admin_warn, filter_error, filter_info, filter_trace,
|
||||
filter_warn, perf_trace, request_error, request_info, request_trace, request_warn,
|
||||
security_access, security_critical, security_error, security_info, tagged_event, EventTag,
|
||||
};
|
||||
pub use smartstring::alias::String as AttrString;
|
||||
pub use std::time::Duration;
|
||||
pub use url::Url;
|
||||
pub use uuid::{uuid, Uuid};
|
||||
|
||||
pub use crate::be::Limits;
|
||||
pub use crate::constants::*;
|
||||
pub use crate::entry::{
|
||||
Entry, EntryCommitted, EntryInit, EntryInitNew, EntryInvalid, EntryInvalidCommitted,
|
||||
EntryInvalidNew, EntryNew, EntryReduced, EntryReducedCommitted, EntrySealed,
|
||||
EntrySealedCommitted, EntrySealedNew, EntryTuple, EntryValid,
|
||||
};
|
||||
pub use crate::event::{CreateEvent, DeleteEvent, ExistsEvent, ModifyEvent, SearchEvent};
|
||||
pub use crate::filter::{
|
||||
f_and, f_andnot, f_eq, f_id, f_inc, f_lt, f_or, f_pres, f_self, f_spn_name, f_sub, Filter,
|
||||
FilterInvalid, FC,
|
||||
FilterInvalid, FilterValid, FC,
|
||||
};
|
||||
pub use crate::identity::{AccessScope, IdentType, Identity, IdentityId};
|
||||
pub use crate::idm::server::{IdmServer, IdmServerDelayed};
|
||||
pub use crate::modify::{
|
||||
m_assert, m_pres, m_purge, m_remove, Modify, ModifyInvalid, ModifyList, ModifyValid,
|
||||
};
|
||||
pub use crate::server::access::AccessControlsTransaction;
|
||||
pub use crate::server::batch_modify::BatchModifyEvent;
|
||||
pub use crate::server::identity::{AccessScope, IdentType, IdentUser, Identity, IdentityId};
|
||||
pub use crate::server::{
|
||||
QueryServer, QueryServerReadTransaction, QueryServerTransaction,
|
||||
QueryServerWriteTransaction,
|
||||
|
|
|
@ -146,7 +146,7 @@ macro_rules! run_create_test {
|
|||
let r = qs_write.create(&ce);
|
||||
trace!("test result: {:?}", r);
|
||||
assert!(r == $expect);
|
||||
$check(&qs_write);
|
||||
$check(&mut qs_write);
|
||||
match r {
|
||||
Ok(_) => {
|
||||
qs_write.commit().expect("commit failure!");
|
||||
|
@ -199,7 +199,7 @@ macro_rules! run_modify_test {
|
|||
{
|
||||
let mut qs_write = async_std::task::block_on(qs.write(duration_from_epoch_now()));
|
||||
let r = qs_write.modify(&me);
|
||||
$check(&qs_write);
|
||||
$check(&mut qs_write);
|
||||
trace!("test result: {:?}", r);
|
||||
assert!(r == $expect);
|
||||
match r {
|
||||
|
|
|
@ -57,7 +57,10 @@ pub fn m_assert(a: &str, v: &PartialValue) -> Modify {
|
|||
}
|
||||
|
||||
impl Modify {
|
||||
pub fn from(m: &ProtoModify, qs: &QueryServerWriteTransaction) -> Result<Self, OperationError> {
|
||||
pub fn from(
|
||||
m: &ProtoModify,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
Ok(match m {
|
||||
ProtoModify::Present(a, v) => Modify::Present(a.into(), qs.clone_value(a, v)?),
|
||||
ProtoModify::Removed(a, v) => Modify::Removed(a.into(), qs.clone_partialvalue(a, v)?),
|
||||
|
@ -124,7 +127,7 @@ impl ModifyList<ModifyInvalid> {
|
|||
|
||||
pub fn from(
|
||||
ml: &ProtoModifyList,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
// For each ProtoModify, do a from.
|
||||
let inner: Result<Vec<_>, _> = ml.mods.iter().map(|pm| Modify::from(pm, qs)).collect();
|
||||
|
@ -139,7 +142,7 @@ impl ModifyList<ModifyInvalid> {
|
|||
|
||||
pub fn from_patch(
|
||||
pe: &ProtoEntry,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
) -> Result<Self, OperationError> {
|
||||
let mut mods = Vec::new();
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ fn get_cand_attr_set<VALID, STATE>(
|
|||
}
|
||||
|
||||
fn enforce_unique<STATE>(
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
cand: &[Entry<EntryInvalid, STATE>],
|
||||
attr: &str,
|
||||
) -> Result<(), OperationError> {
|
||||
|
|
|
@ -325,7 +325,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq("name", PartialValue::new_iname("testperson"))))
|
||||
.expect("Internal search failure");
|
||||
|
@ -420,7 +420,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq("name", PartialValue::new_iname("testperson"))))
|
||||
.expect("Internal search failure");
|
||||
|
|
|
@ -110,7 +110,7 @@ mod tests {
|
|||
// test we can create and generate the id
|
||||
#[qs_test]
|
||||
async fn test_domain_generate_uuid(server: &QueryServer) {
|
||||
let server_txn = server.write(duration_from_epoch_now()).await;
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
let e_dom = server_txn
|
||||
.internal_search_uuid(UUID_DOMAIN_INFO)
|
||||
.expect("must not fail");
|
||||
|
|
|
@ -16,7 +16,7 @@ pub struct DynGroup;
|
|||
impl DynGroup {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn apply_dyngroup_change(
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
ident: &Identity,
|
||||
pre_candidates: &mut Vec<Arc<EntrySealedCommitted>>,
|
||||
candidates: &mut Vec<EntryInvalidCommitted>,
|
||||
|
@ -93,7 +93,7 @@ impl DynGroup {
|
|||
}
|
||||
|
||||
#[instrument(level = "debug", name = "dyngroup_reload", skip(qs))]
|
||||
pub fn reload(qs: &QueryServerWriteTransaction) -> Result<(), OperationError> {
|
||||
pub fn reload(qs: &mut QueryServerWriteTransaction) -> Result<(), OperationError> {
|
||||
let ident_internal = Identity::from_internal();
|
||||
// Internal search all our definitions.
|
||||
let filt = filter!(f_eq("class", PVCLASS_DYNGROUP.clone()));
|
||||
|
@ -102,9 +102,7 @@ impl DynGroup {
|
|||
e
|
||||
})?;
|
||||
|
||||
let dyn_groups = qs.get_dyngroup_cache();
|
||||
|
||||
dyn_groups.insts.clear();
|
||||
let mut reload_groups = BTreeMap::default();
|
||||
|
||||
for nd_group in entries.into_iter() {
|
||||
let scope_f: ProtoFilter = nd_group
|
||||
|
@ -122,12 +120,15 @@ impl DynGroup {
|
|||
|
||||
let uuid = nd_group.get_uuid();
|
||||
|
||||
if dyn_groups.insts.insert(uuid, scope_i).is_some() {
|
||||
if reload_groups.insert(uuid, scope_i).is_some() {
|
||||
admin_error!("dyngroup cache uuid conflict {}", uuid);
|
||||
return Err(OperationError::InvalidState);
|
||||
}
|
||||
}
|
||||
|
||||
let dyn_groups = qs.get_dyngroup_cache();
|
||||
std::mem::swap(&mut reload_groups, &mut dyn_groups.insts);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -145,27 +146,33 @@ impl DynGroup {
|
|||
.iter()
|
||||
.partition(|entry| entry.attribute_equality("class", &PVCLASS_DYNGROUP));
|
||||
|
||||
let dyn_groups = qs.get_dyngroup_cache();
|
||||
// DANGER: Why do we have to do this? During the use of qs for internal search
|
||||
// and other operations we need qs to be mut. But when we borrow dyn groups here we
|
||||
// cause multiple borrows to occur on struct members that freaks rust out. This *IS*
|
||||
// safe however because no element of the search or write process calls the dyngroup
|
||||
// cache excepting for this plugin within a single thread, meaning that stripping the
|
||||
// lifetime here is safe since we are the sole accessor.
|
||||
let dyn_groups: &mut DynGroupCache = unsafe { &mut *(qs.get_dyngroup_cache() as *mut _) };
|
||||
|
||||
// For any other entries, check if they SHOULD trigger
|
||||
// a dyn group inclusion. We do this FIRST because the new
|
||||
// dyn groups will see the created entries on an internal search
|
||||
// so we don't need to reference them.
|
||||
|
||||
//
|
||||
let resolve_filter_cache = qs.get_resolve_filter_cache();
|
||||
|
||||
let mut pre_candidates = Vec::with_capacity(dyn_groups.insts.len() + cand.len());
|
||||
let mut candidates = Vec::with_capacity(dyn_groups.insts.len() + cand.len());
|
||||
|
||||
// Apply existing dyn_groups to entries.
|
||||
trace!(?dyn_groups.insts);
|
||||
|
||||
for (dg_uuid, dg_filter) in dyn_groups.insts.iter() {
|
||||
let dg_filter_valid = dg_filter
|
||||
.validate(qs.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)
|
||||
.and_then(|f| f.resolve(&ident_internal, None, Some(resolve_filter_cache)))?;
|
||||
.and_then(|f| {
|
||||
f.resolve(&ident_internal, None, Some(qs.get_resolve_filter_cache()))
|
||||
})?;
|
||||
|
||||
// Did any of our modified entries match our dyn group filter?
|
||||
let matches: Vec<_> = entries
|
||||
.iter()
|
||||
.filter_map(|e| {
|
||||
|
@ -177,6 +184,8 @@ impl DynGroup {
|
|||
})
|
||||
.collect();
|
||||
|
||||
// If any of them did, we retrieve the dyngroup and setup to write the new
|
||||
// members to it.
|
||||
if !matches.is_empty() {
|
||||
let filt = filter!(f_eq("uuid", PartialValue::Uuid(*dg_uuid)));
|
||||
let mut work_set = qs.internal_search_writeable(&filt)?;
|
||||
|
@ -238,7 +247,6 @@ impl DynGroup {
|
|||
let mut affected_uuids = Vec::with_capacity(cand.len());
|
||||
|
||||
let ident_internal = Identity::from_internal();
|
||||
let resolve_filter_cache = qs.get_resolve_filter_cache();
|
||||
|
||||
// Probably should be filter here instead.
|
||||
let (_, pre_entries): (Vec<&Arc<Entry<_, _>>>, Vec<_>) = pre_cand
|
||||
|
@ -249,7 +257,13 @@ impl DynGroup {
|
|||
.iter()
|
||||
.partition(|entry| entry.attribute_equality("class", &PVCLASS_DYNGROUP));
|
||||
|
||||
let dyn_groups = qs.get_dyngroup_cache();
|
||||
// DANGER: Why do we have to do this? During the use of qs for internal search
|
||||
// and other operations we need qs to be mut. But when we borrow dyn groups here we
|
||||
// cause multiple borrows to occur on struct members that freaks rust out. This *IS*
|
||||
// safe however because no element of the search or write process calls the dyngroup
|
||||
// cache excepting for this plugin within a single thread, meaning that stripping the
|
||||
// lifetime here is safe since we are the sole accessor.
|
||||
let dyn_groups: &mut DynGroupCache = unsafe { &mut *(qs.get_dyngroup_cache() as *mut _) };
|
||||
|
||||
let mut pre_candidates = Vec::with_capacity(dyn_groups.insts.len() + cand.len());
|
||||
let mut candidates = Vec::with_capacity(dyn_groups.insts.len() + cand.len());
|
||||
|
@ -283,7 +297,9 @@ impl DynGroup {
|
|||
let dg_filter_valid = dg_filter
|
||||
.validate(qs.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)
|
||||
.and_then(|f| f.resolve(&ident_internal, None, Some(resolve_filter_cache)))?;
|
||||
.and_then(|f| {
|
||||
f.resolve(&ident_internal, None, Some(qs.get_resolve_filter_cache()))
|
||||
})?;
|
||||
|
||||
let matches: Vec<_> = pre_entries
|
||||
.iter()
|
||||
|
@ -381,7 +397,7 @@ mod tests {
|
|||
create,
|
||||
None,
|
||||
// Need to validate it did things
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -427,7 +443,7 @@ mod tests {
|
|||
create,
|
||||
None,
|
||||
// Need to validate it did things
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -476,7 +492,7 @@ mod tests {
|
|||
create,
|
||||
None,
|
||||
// Need to validate it did things
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -518,7 +534,7 @@ mod tests {
|
|||
create,
|
||||
None,
|
||||
// Need to validate it did things
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -573,7 +589,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -628,7 +644,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -673,7 +689,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -720,7 +736,7 @@ mod tests {
|
|||
ModifyList::new_list(vec![Modify::Purged(AttrString::from("member"),)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -769,7 +785,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -818,7 +834,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -858,7 +874,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("name", PartialValue::new_iname("testgroup"))),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -898,7 +914,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("name", PartialValue::new_iname("test_dyngroup"))),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// Note we check memberof is empty here!
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq("name", PartialValue::new_iname("testgroup"))))
|
||||
|
|
|
@ -96,7 +96,7 @@ impl Plugin for GidNumber {
|
|||
mod tests {
|
||||
use crate::prelude::*;
|
||||
|
||||
fn check_gid(qs_write: &QueryServerWriteTransaction, uuid: &str, gid: u32) {
|
||||
fn check_gid(qs_write: &mut QueryServerWriteTransaction, uuid: &str, gid: u32) {
|
||||
let u = Uuid::parse_str(uuid).unwrap();
|
||||
let e = qs_write.internal_search_uuid(u).unwrap();
|
||||
let gidnumber = e.get_ava_single("gidnumber").unwrap();
|
||||
|
@ -126,7 +126,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs_write: &QueryServerWriteTransaction| check_gid(
|
||||
|qs_write: &mut QueryServerWriteTransaction| check_gid(
|
||||
qs_write,
|
||||
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
|
||||
0x997ef244
|
||||
|
@ -158,7 +158,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs_write: &QueryServerWriteTransaction| check_gid(
|
||||
|qs_write: &mut QueryServerWriteTransaction| check_gid(
|
||||
qs_write,
|
||||
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
|
||||
10001
|
||||
|
@ -190,7 +190,7 @@ mod tests {
|
|||
modlist!([m_pres("class", &Value::new_class("posixgroup"))]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs_write: &QueryServerWriteTransaction| check_gid(
|
||||
|qs_write: &mut QueryServerWriteTransaction| check_gid(
|
||||
qs_write,
|
||||
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
|
||||
0x997ef244
|
||||
|
@ -222,7 +222,7 @@ mod tests {
|
|||
modlist!([m_purge("gidnumber")]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs_write: &QueryServerWriteTransaction| check_gid(
|
||||
|qs_write: &mut QueryServerWriteTransaction| check_gid(
|
||||
qs_write,
|
||||
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
|
||||
0x997ef244
|
||||
|
@ -257,7 +257,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs_write: &QueryServerWriteTransaction| check_gid(
|
||||
|qs_write: &mut QueryServerWriteTransaction| check_gid(
|
||||
qs_write,
|
||||
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
|
||||
2000
|
||||
|
|
|
@ -132,7 +132,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let e = qs
|
||||
.internal_search_uuid(uuid)
|
||||
.expect("failed to get oauth2 config");
|
||||
|
@ -178,7 +178,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let e = qs
|
||||
.internal_search_uuid(uuid)
|
||||
.expect("failed to get oauth2 config");
|
||||
|
|
|
@ -25,7 +25,7 @@ use crate::value::PartialValue;
|
|||
pub struct MemberOf;
|
||||
|
||||
fn do_memberof(
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
uuid: Uuid,
|
||||
tgte: &mut EntryInvalidCommitted,
|
||||
) -> Result<(), OperationError> {
|
||||
|
@ -533,7 +533,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_memberof!(qs, UUID_B, UUID_A);
|
||||
|
@ -564,7 +564,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -616,7 +616,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -674,7 +674,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -740,7 +740,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_memberof!(qs, UUID_B, UUID_A);
|
||||
|
@ -776,7 +776,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -830,7 +830,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -887,7 +887,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -954,7 +954,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -1023,7 +1023,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_B, UUID_A);
|
||||
|
@ -1062,7 +1062,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -1120,7 +1120,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -1188,7 +1188,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -1280,7 +1280,7 @@ mod tests {
|
|||
]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -1342,7 +1342,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("uuid", PartialValue::new_uuid_s(&UUID_A).unwrap())),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_B, UUID_A);
|
||||
|
@ -1376,7 +1376,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("uuid", PartialValue::new_uuid_s(&UUID_A).unwrap())),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_B, UUID_A);
|
||||
|
@ -1420,7 +1420,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("uuid", PartialValue::new_uuid_s(&UUID_B).unwrap())),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_A);
|
||||
|
@ -1473,7 +1473,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("uuid", PartialValue::new_uuid_s(&UUID_A).unwrap())),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_B, UUID_A);
|
||||
|
@ -1539,7 +1539,7 @@ mod tests {
|
|||
preload,
|
||||
filter!(f_eq("uuid", PartialValue::new_uuid_s(&UUID_B).unwrap())),
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
// V-- this uuid is
|
||||
// V-- memberof this UUID
|
||||
assert_not_memberof!(qs, UUID_A, UUID_B);
|
||||
|
|
|
@ -276,7 +276,7 @@ mod tests {
|
|||
)]),
|
||||
None,
|
||||
|_| {},
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let e = qs
|
||||
.internal_search_uuid(uuid!("d2b496bd-8493-47b7-8142-f568b5cf47ee"))
|
||||
.expect("failed to get entry");
|
||||
|
|
|
@ -27,7 +27,7 @@ pub struct ReferentialIntegrity;
|
|||
|
||||
impl ReferentialIntegrity {
|
||||
fn check_uuids_exist(
|
||||
qs: &QueryServerWriteTransaction,
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
inner: Vec<PartialValue>,
|
||||
) -> Result<(), OperationError> {
|
||||
if inner.is_empty() {
|
||||
|
@ -316,7 +316,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq(
|
||||
"name",
|
||||
|
@ -352,7 +352,7 @@ mod tests {
|
|||
preload,
|
||||
create,
|
||||
None,
|
||||
|qs: &QueryServerWriteTransaction| {
|
||||
|qs: &mut QueryServerWriteTransaction| {
|
||||
let cands = qs
|
||||
.internal_search(filter!(f_eq("name", PartialValue::new_iname("testgroup"))))
|
||||
.expect("Internal search failure");
|
||||
|
|
|
@ -78,7 +78,7 @@ impl Plugin for Spn {
|
|||
// so we should be able to verify that *those* spns validate to the trusted domain info
|
||||
// we have been sent also. It's not up to use to generate those though ...
|
||||
|
||||
let domain_name = qs.get_domain_name();
|
||||
let domain_name = qs.get_domain_name().to_string();
|
||||
|
||||
let filt_in = filter!(f_or!([
|
||||
f_eq("class", PVCLASS_GROUP.clone()),
|
||||
|
@ -96,7 +96,7 @@ impl Plugin for Spn {
|
|||
let mut r = Vec::new();
|
||||
|
||||
for e in all_cand {
|
||||
let g_spn = match e.generate_spn(domain_name) {
|
||||
let g_spn = match e.generate_spn(&domain_name) {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
admin_error!(
|
||||
|
|
|
@ -2201,41 +2201,31 @@ mod tests {
|
|||
// We do
|
||||
let schema_outer = Schema::new().expect("failed to create schema");
|
||||
let schema = schema_outer.read();
|
||||
let e_no_uuid: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {}
|
||||
}"#,
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
||||
let e_no_uuid = unsafe { entry_init!().into_invalid_new() };
|
||||
|
||||
assert_eq!(
|
||||
e_no_uuid.validate(&schema),
|
||||
Err(SchemaError::MissingMustAttribute(vec!["uuid".to_string()]))
|
||||
);
|
||||
|
||||
let e_no_class: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"]
|
||||
}
|
||||
}"#,
|
||||
)
|
||||
let e_no_class = unsafe {
|
||||
entry_init!((
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
))
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
||||
assert_eq!(e_no_class.validate(&schema), Err(SchemaError::NoClassFound));
|
||||
|
||||
let e_bad_class: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"class": ["zzzzzz"]
|
||||
}
|
||||
}"#,
|
||||
let e_bad_class = unsafe {
|
||||
entry_init!(
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("class", Value::new_class("zzzzzz"))
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
@ -2244,38 +2234,37 @@ mod tests {
|
|||
Err(SchemaError::InvalidClass(vec!["zzzzzz".to_string()]))
|
||||
);
|
||||
|
||||
let e_attr_invalid: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"class": ["object", "attributetype"]
|
||||
}
|
||||
}"#,
|
||||
let e_attr_invalid = unsafe {
|
||||
entry_init!(
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_ATTRIBUTETYPE.clone())
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
||||
let res = e_attr_invalid.validate(&schema);
|
||||
assert!(match res {
|
||||
Err(SchemaError::MissingMustAttribute(_)) => true,
|
||||
_ => false,
|
||||
});
|
||||
|
||||
let e_attr_invalid_may: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["object", "attributetype"],
|
||||
"attributename": ["testattr"],
|
||||
"description": ["testattr"],
|
||||
"multivalue": ["false"],
|
||||
"unique": ["false"],
|
||||
"syntax": ["UTF8STRING"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"zzzzz": ["zzzz"]
|
||||
}
|
||||
}"#,
|
||||
let e_attr_invalid_may = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_ATTRIBUTETYPE.clone()),
|
||||
("attributename", Value::new_iutf8("testattr")),
|
||||
("description", Value::Utf8("testattr".to_string())),
|
||||
("multivalue", Value::Bool(false)),
|
||||
("unique", Value::Bool(false)),
|
||||
("syntax", Value::Syntax(SyntaxType::Utf8String)),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("zzzzz", Value::Utf8("zzzz".to_string()))
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
@ -2285,19 +2274,19 @@ mod tests {
|
|||
Err(SchemaError::AttributeNotValidForClass("zzzzz".to_string()))
|
||||
);
|
||||
|
||||
let e_attr_invalid_syn: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["object", "attributetype"],
|
||||
"attributename": ["testattr"],
|
||||
"description": ["testattr"],
|
||||
"multivalue": ["zzzzz"],
|
||||
"unique": ["false"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"syntax": ["UTF8STRING"]
|
||||
}
|
||||
}"#,
|
||||
let e_attr_invalid_syn = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_ATTRIBUTETYPE.clone()),
|
||||
("attributename", Value::new_iutf8("testattr")),
|
||||
("description", Value::Utf8("testattr".to_string())),
|
||||
("multivalue", Value::Utf8("false".to_string())),
|
||||
("unique", Value::Bool(false)),
|
||||
("syntax", Value::Syntax(SyntaxType::Utf8String)),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
)
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
@ -2310,38 +2299,38 @@ mod tests {
|
|||
);
|
||||
|
||||
// You may not have the phantom.
|
||||
let e_phantom: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["object", "attributetype"],
|
||||
"attributename": ["testattr"],
|
||||
"description": ["testattr"],
|
||||
"multivalue": ["true"],
|
||||
"unique": ["false"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"syntax": ["UTF8STRING"],
|
||||
"password_import": ["password"]
|
||||
}
|
||||
}"#,
|
||||
let e_phantom = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_ATTRIBUTETYPE.clone()),
|
||||
("attributename", Value::new_iutf8("testattr")),
|
||||
("description", Value::Utf8("testattr".to_string())),
|
||||
("multivalue", Value::Bool(false)),
|
||||
("unique", Value::Bool(false)),
|
||||
("syntax", Value::Syntax(SyntaxType::Utf8String)),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
),
|
||||
("password_import", Value::Utf8("password".to_string()))
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
assert!(e_phantom.validate(&schema).is_err());
|
||||
|
||||
let e_ok: Entry<EntryInvalid, EntryNew> = unsafe {
|
||||
Entry::unsafe_from_entry_str(
|
||||
r#"{
|
||||
"attrs": {
|
||||
"class": ["object", "attributetype"],
|
||||
"attributename": ["testattr"],
|
||||
"description": ["testattr"],
|
||||
"multivalue": ["true"],
|
||||
"unique": ["false"],
|
||||
"uuid": ["db237e8a-0079-4b8c-8a56-593b22aa44d1"],
|
||||
"syntax": ["UTF8STRING"]
|
||||
}
|
||||
}"#,
|
||||
let e_ok = unsafe {
|
||||
entry_init!(
|
||||
("class", CLASS_OBJECT.clone()),
|
||||
("class", CLASS_ATTRIBUTETYPE.clone()),
|
||||
("attributename", Value::new_iutf8("testattr")),
|
||||
("description", Value::Utf8("testattr".to_string())),
|
||||
("multivalue", Value::Bool(true)),
|
||||
("unique", Value::Bool(false)),
|
||||
("syntax", Value::Syntax(SyntaxType::Utf8String)),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid::uuid!("db237e8a-0079-4b8c-8a56-593b22aa44d1"))
|
||||
)
|
||||
)
|
||||
.into_invalid_new()
|
||||
};
|
||||
|
|
|
@ -29,7 +29,6 @@ use uuid::Uuid;
|
|||
use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced, EntrySealed};
|
||||
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent};
|
||||
use crate::filter::{Filter, FilterValid, FilterValidResolved};
|
||||
use crate::identity::{AccessScope, IdentType, IdentityId};
|
||||
use crate::modify::Modify;
|
||||
use crate::prelude::*;
|
||||
|
||||
|
@ -1547,9 +1546,9 @@ impl<'a> AccessControlsTransaction<'a> for AccessControlsReadTransaction<'a> {
|
|||
// ACP transaction operations
|
||||
// =========================================================================
|
||||
|
||||
impl AccessControls {
|
||||
impl Default for AccessControls {
|
||||
#![allow(clippy::expect_used)]
|
||||
pub fn new() -> Self {
|
||||
fn default() -> Self {
|
||||
AccessControls {
|
||||
inner: CowCell::new(AccessControlsInner {
|
||||
acps_search: Vec::new(),
|
||||
|
@ -1566,7 +1565,9 @@ impl AccessControls {
|
|||
.expect("Failed to construct acp_resolve_filter_cache"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AccessControls {
|
||||
pub fn try_quiesce(&self) {
|
||||
self.acp_resolve_filter_cache.try_quiesce();
|
||||
}
|
||||
|
@ -1596,11 +1597,10 @@ mod tests {
|
|||
|
||||
use uuid::uuid;
|
||||
|
||||
use crate::access::{
|
||||
use super::{
|
||||
AccessControlCreate, AccessControlDelete, AccessControlModify, AccessControlProfile,
|
||||
AccessControlSearch, AccessControls, AccessControlsTransaction, AccessEffectivePermission,
|
||||
};
|
||||
use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent};
|
||||
use crate::prelude::*;
|
||||
|
||||
const UUID_TEST_ACCOUNT_1: Uuid = uuid::uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930");
|
||||
|
@ -2055,7 +2055,7 @@ mod tests {
|
|||
$entries:expr,
|
||||
$expect:expr
|
||||
) => {{
|
||||
let ac = AccessControls::new();
|
||||
let ac = AccessControls::default();
|
||||
let mut acw = ac.write();
|
||||
acw.update_search($controls).expect("Failed to update");
|
||||
let acw = acw;
|
||||
|
@ -2077,7 +2077,7 @@ mod tests {
|
|||
$entries:expr,
|
||||
$expect:expr
|
||||
) => {{
|
||||
let ac = AccessControls::new();
|
||||
let ac = AccessControls::default();
|
||||
let mut acw = ac.write();
|
||||
acw.update_search($controls).expect("Failed to update");
|
||||
let acw = acw;
|
||||
|
@ -2362,7 +2362,7 @@ mod tests {
|
|||
$entries:expr,
|
||||
$expect:expr
|
||||
) => {{
|
||||
let ac = AccessControls::new();
|
||||
let ac = AccessControls::default();
|
||||
let mut acw = ac.write();
|
||||
acw.update_modify($controls).expect("Failed to update");
|
||||
let acw = acw;
|
||||
|
@ -2579,7 +2579,7 @@ mod tests {
|
|||
$entries:expr,
|
||||
$expect:expr
|
||||
) => {{
|
||||
let ac = AccessControls::new();
|
||||
let ac = AccessControls::default();
|
||||
let mut acw = ac.write();
|
||||
acw.update_create($controls).expect("Failed to update");
|
||||
let acw = acw;
|
||||
|
@ -2737,7 +2737,7 @@ mod tests {
|
|||
$entries:expr,
|
||||
$expect:expr
|
||||
) => {{
|
||||
let ac = AccessControls::new();
|
||||
let ac = AccessControls::default();
|
||||
let mut acw = ac.write();
|
||||
acw.update_delete($controls).expect("Failed to update");
|
||||
let acw = acw;
|
||||
|
@ -2838,7 +2838,7 @@ mod tests {
|
|||
$entries:expr,
|
||||
$expect:expr
|
||||
) => {{
|
||||
let ac = AccessControls::new();
|
||||
let ac = AccessControls::default();
|
||||
let mut acw = ac.write();
|
||||
acw.update_search($search_controls)
|
||||
.expect("Failed to update");
|
|
@ -1,7 +1,5 @@
|
|||
use super::QueryServerWriteTransaction;
|
||||
use crate::prelude::*;
|
||||
// use std::collections::BTreeMap;
|
||||
use crate::access::AccessControlsTransaction;
|
||||
use crate::server::Plugins;
|
||||
use hashbrown::HashMap;
|
||||
|
||||
|
@ -183,51 +181,40 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// We have finished all plugs and now have a successful operation - flag if
|
||||
// schema or acp requires reload. Remember, this is a modify, so we need to check
|
||||
// pre and post cands.
|
||||
if !self.changed_schema.get() {
|
||||
self.changed_schema.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
}),
|
||||
)
|
||||
if !self.changed_schema {
|
||||
self.changed_schema = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
});
|
||||
}
|
||||
if !self.changed_acp.get() {
|
||||
self.changed_acp.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP)),
|
||||
)
|
||||
if !self.changed_acp {
|
||||
self.changed_acp = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP));
|
||||
}
|
||||
if !self.changed_oauth2.get() {
|
||||
self.changed_oauth2.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)),
|
||||
)
|
||||
if !self.changed_oauth2 {
|
||||
self.changed_oauth2 = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS));
|
||||
}
|
||||
if !self.changed_domain.get() {
|
||||
self.changed_domain.set(
|
||||
norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)),
|
||||
)
|
||||
if !self.changed_domain {
|
||||
self.changed_domain = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO));
|
||||
}
|
||||
|
||||
let cu = self.changed_uuid.as_ptr();
|
||||
unsafe {
|
||||
(*cu).extend(
|
||||
norm_cand
|
||||
.iter()
|
||||
.map(|e| e.get_uuid())
|
||||
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
|
||||
);
|
||||
}
|
||||
self.changed_uuid.extend(
|
||||
norm_cand
|
||||
.iter()
|
||||
.map(|e| e.get_uuid())
|
||||
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
|
||||
);
|
||||
|
||||
trace!(
|
||||
schema_reload = ?self.changed_schema,
|
||||
|
|
|
@ -1 +1,213 @@
|
|||
use crate::prelude::*;
|
||||
use crate::server::CreateEvent;
|
||||
use crate::server::Plugins;
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn create(&mut self, ce: &CreateEvent) -> Result<(), OperationError> {
|
||||
// The create event is a raw, read only representation of the request
|
||||
// that was made to us, including information about the identity
|
||||
// performing the request.
|
||||
if !ce.ident.is_internal() {
|
||||
security_info!(name = %ce.ident, "create initiator");
|
||||
}
|
||||
|
||||
if ce.entries.is_empty() {
|
||||
request_error!("create: empty create request");
|
||||
return Err(OperationError::EmptyRequest);
|
||||
}
|
||||
|
||||
// TODO #67: Do we need limits on number of creates, or do we constraint
|
||||
// based on request size in the frontend?
|
||||
|
||||
// Copy the entries to a writeable form, this involves assigning a
|
||||
// change id so we can track what's happening.
|
||||
let candidates: Vec<Entry<EntryInit, EntryNew>> = ce.entries.clone();
|
||||
|
||||
// Do we have rights to perform these creates?
|
||||
// create_allow_operation
|
||||
let access = self.get_accesscontrols();
|
||||
let op_allow = access
|
||||
.create_allow_operation(ce, &candidates)
|
||||
.map_err(|e| {
|
||||
admin_error!("Failed to check create access {:?}", e);
|
||||
e
|
||||
})?;
|
||||
if !op_allow {
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Before we assign replication metadata, we need to assert these entries
|
||||
// are valid to create within the set of replication transitions. This
|
||||
// means they *can not* be recycled or tombstones!
|
||||
if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) {
|
||||
admin_warn!("Refusing to create invalid entries that are attempting to bypass replication state machine.");
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Assign our replication metadata now, since we can proceed with this operation.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryNew>> = candidates
|
||||
.into_iter()
|
||||
.map(|e| e.assign_cid(self.cid.clone(), &self.schema))
|
||||
.collect();
|
||||
|
||||
// run any pre plugins, giving them the list of mutable candidates.
|
||||
// pre-plugins are defined here in their correct order of calling!
|
||||
// I have no intent to make these dynamic or configurable.
|
||||
|
||||
Plugins::run_pre_create_transform(self, &mut candidates, ce).map_err(|e| {
|
||||
admin_error!("Create operation failed (pre_transform plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// NOTE: This is how you map from Vec<Result<T>> to Result<Vec<T>>
|
||||
// remember, that you only get the first error and the iter terminates.
|
||||
|
||||
// eprintln!("{:?}", candidates);
|
||||
|
||||
// Now, normalise AND validate!
|
||||
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryNew>>, OperationError> = candidates
|
||||
.into_iter()
|
||||
.map(|e| {
|
||||
e.validate(&self.schema)
|
||||
.map_err(|e| {
|
||||
admin_error!("Schema Violation in create validate {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})
|
||||
.map(|e| {
|
||||
// Then seal the changes?
|
||||
e.seal(&self.schema)
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let norm_cand: Vec<Entry<_, _>> = res?;
|
||||
|
||||
// Run any pre-create plugins now with schema validated entries.
|
||||
// This is important for normalisation of certain types IE class
|
||||
// or attributes for these checks.
|
||||
Plugins::run_pre_create(self, &norm_cand, ce).map_err(|e| {
|
||||
admin_error!("Create operation failed (plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// We may change from ce.entries later to something else?
|
||||
let commit_cand = self.be_txn.create(&self.cid, norm_cand).map_err(|e| {
|
||||
admin_error!("betxn create failure {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Run any post plugins
|
||||
|
||||
Plugins::run_post_create(self, &commit_cand, ce).map_err(|e| {
|
||||
admin_error!("Create operation failed (post plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// We have finished all plugs and now have a successful operation - flag if
|
||||
// schema or acp requires reload.
|
||||
if !self.changed_schema {
|
||||
self.changed_schema = commit_cand.iter().any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
});
|
||||
}
|
||||
if !self.changed_acp {
|
||||
self.changed_acp = commit_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP));
|
||||
}
|
||||
if !self.changed_oauth2 {
|
||||
self.changed_oauth2 = commit_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS));
|
||||
}
|
||||
if !self.changed_domain {
|
||||
self.changed_domain = commit_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO));
|
||||
}
|
||||
|
||||
self.changed_uuid
|
||||
.extend(commit_cand.iter().map(|e| e.get_uuid()));
|
||||
trace!(
|
||||
schema_reload = ?self.changed_schema,
|
||||
acp_reload = ?self.changed_acp,
|
||||
oauth2_reload = ?self.changed_oauth2,
|
||||
domain_reload = ?self.changed_domain,
|
||||
);
|
||||
|
||||
// We are complete, finalise logging and return
|
||||
|
||||
if ce.ident.is_internal() {
|
||||
trace!("Create operation success");
|
||||
} else {
|
||||
admin_info!("Create operation success");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn internal_create(
|
||||
&mut self,
|
||||
entries: Vec<Entry<EntryInit, EntryNew>>,
|
||||
) -> Result<(), OperationError> {
|
||||
let ce = CreateEvent::new_internal(entries);
|
||||
self.create(&ce)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::prelude::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[qs_test]
|
||||
async fn test_create_user(server: &QueryServer) {
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
let filt = filter!(f_eq("name", PartialValue::new_iname("testperson")));
|
||||
let admin = server_txn.internal_search_uuid(UUID_ADMIN).expect("failed");
|
||||
|
||||
let se1 = unsafe { SearchEvent::new_impersonate_entry(admin.clone(), filt.clone()) };
|
||||
let se2 = unsafe { SearchEvent::new_impersonate_entry(admin, filt) };
|
||||
|
||||
let mut e = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("class", Value::new_class("account")),
|
||||
("name", Value::new_iname("testperson")),
|
||||
("spn", Value::new_spn_str("testperson", "example.com")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson")),
|
||||
("displayname", Value::new_utf8s("testperson"))
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e.clone()]);
|
||||
|
||||
let r1 = server_txn.search(&se1).expect("search failure");
|
||||
assert!(r1.is_empty());
|
||||
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
let r2 = server_txn.search(&se2).expect("search failure");
|
||||
debug!("--> {:?}", r2);
|
||||
assert!(r2.len() == 1);
|
||||
|
||||
// We apply some member-of in the server now, so we add these before we seal.
|
||||
e.add_ava("class", Value::new_class("memberof"));
|
||||
e.add_ava("memberof", Value::Refer(UUID_IDM_ALL_PERSONS));
|
||||
e.add_ava("directmemberof", Value::Refer(UUID_IDM_ALL_PERSONS));
|
||||
e.add_ava("memberof", Value::Refer(UUID_IDM_ALL_ACCOUNTS));
|
||||
e.add_ava("directmemberof", Value::Refer(UUID_IDM_ALL_ACCOUNTS));
|
||||
|
||||
let expected = unsafe { vec![Arc::new(e.into_sealed_committed())] };
|
||||
|
||||
assert_eq!(r2, expected);
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1 +1,245 @@
|
|||
use crate::plugins::Plugins;
|
||||
use crate::prelude::*;
|
||||
use crate::server::DeleteEvent;
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn delete(&mut self, de: &DeleteEvent) -> Result<(), OperationError> {
|
||||
// Do you have access to view all the set members? Reduce based on your
|
||||
// read permissions and attrs
|
||||
// THIS IS PRETTY COMPLEX SEE THE DESIGN DOC
|
||||
// In this case we need a search, but not INTERNAL to keep the same
|
||||
// associated credentials.
|
||||
// We only need to retrieve uuid though ...
|
||||
if !de.ident.is_internal() {
|
||||
security_info!(name = %de.ident, "delete initiator");
|
||||
}
|
||||
|
||||
// Now, delete only what you can see
|
||||
let pre_candidates = self
|
||||
.impersonate_search_valid(de.filter.clone(), de.filter_orig.clone(), &de.ident)
|
||||
.map_err(|e| {
|
||||
admin_error!("delete: error in pre-candidate selection {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Apply access controls to reduce the set if required.
|
||||
// delete_allow_operation
|
||||
let access = self.get_accesscontrols();
|
||||
let op_allow = access
|
||||
.delete_allow_operation(de, &pre_candidates)
|
||||
.map_err(|e| {
|
||||
admin_error!("Failed to check delete access {:?}", e);
|
||||
e
|
||||
})?;
|
||||
if !op_allow {
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Is the candidate set empty?
|
||||
if pre_candidates.is_empty() {
|
||||
request_error!(filter = ?de.filter, "delete: no candidates match filter");
|
||||
return Err(OperationError::NoMatchingEntries);
|
||||
};
|
||||
|
||||
if pre_candidates.iter().any(|e| e.mask_tombstone().is_none()) {
|
||||
admin_warn!("Refusing to delete entries which may be an attempt to bypass replication state machine.");
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
// Invalidate and assign change id's
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
trace!(?candidates, "delete: candidates");
|
||||
|
||||
// Pre delete plugs
|
||||
Plugins::run_pre_delete(self, &mut candidates, de).map_err(|e| {
|
||||
admin_error!("Delete operation failed (plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
trace!(?candidates, "delete: now marking candidates as recycled");
|
||||
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates
|
||||
.into_iter()
|
||||
.map(|e| {
|
||||
e.to_recycled()
|
||||
.validate(&self.schema)
|
||||
.map_err(|e| {
|
||||
admin_error!(err = ?e, "Schema Violation in delete validate");
|
||||
OperationError::SchemaViolation(e)
|
||||
})
|
||||
// seal if it worked.
|
||||
.map(|e| e.seal(&self.schema))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let del_cand: Vec<Entry<_, _>> = res?;
|
||||
|
||||
self.be_txn
|
||||
.modify(&self.cid, &pre_candidates, &del_cand)
|
||||
.map_err(|e| {
|
||||
// be_txn is dropped, ie aborted here.
|
||||
admin_error!("Delete operation failed (backend), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Post delete plugins
|
||||
Plugins::run_post_delete(self, &del_cand, de).map_err(|e| {
|
||||
admin_error!("Delete operation failed (plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// We have finished all plugs and now have a successful operation - flag if
|
||||
// schema or acp requires reload.
|
||||
if !self.changed_schema {
|
||||
self.changed_schema = del_cand.iter().any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
});
|
||||
}
|
||||
if !self.changed_acp {
|
||||
self.changed_acp = del_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP));
|
||||
}
|
||||
if !self.changed_oauth2 {
|
||||
self.changed_oauth2 = del_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS));
|
||||
}
|
||||
if !self.changed_domain {
|
||||
self.changed_domain = del_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO));
|
||||
}
|
||||
|
||||
self.changed_uuid
|
||||
.extend(del_cand.iter().map(|e| e.get_uuid()));
|
||||
|
||||
trace!(
|
||||
schema_reload = ?self.changed_schema,
|
||||
acp_reload = ?self.changed_acp,
|
||||
oauth2_reload = ?self.changed_oauth2,
|
||||
domain_reload = ?self.changed_domain,
|
||||
);
|
||||
|
||||
// Send result
|
||||
if de.ident.is_internal() {
|
||||
trace!("Delete operation success");
|
||||
} else {
|
||||
admin_info!("Delete operation success");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn internal_delete(
|
||||
&mut self,
|
||||
filter: &Filter<FilterInvalid>,
|
||||
) -> Result<(), OperationError> {
|
||||
let f_valid = filter
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let de = DeleteEvent::new_internal(f_valid);
|
||||
self.delete(&de)
|
||||
}
|
||||
|
||||
pub fn internal_delete_uuid(&mut self, target_uuid: Uuid) -> Result<(), OperationError> {
|
||||
let filter = filter!(f_eq("uuid", PartialValue::Uuid(target_uuid)));
|
||||
let f_valid = filter
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let de = DeleteEvent::new_internal(f_valid);
|
||||
self.delete(&de)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::prelude::*;
|
||||
|
||||
#[qs_test]
|
||||
async fn test_delete(server: &QueryServer) {
|
||||
// Create
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
|
||||
let e2 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson2")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63932"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson")),
|
||||
("displayname", Value::new_utf8s("testperson2"))
|
||||
);
|
||||
|
||||
let e3 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson3")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63933"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson")),
|
||||
("displayname", Value::new_utf8s("testperson3"))
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e1.clone(), e2.clone(), e3.clone()]);
|
||||
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
// Delete filter is syntax invalid
|
||||
let de_inv =
|
||||
unsafe { DeleteEvent::new_internal_invalid(filter!(f_pres("nhtoaunaoehtnu"))) };
|
||||
assert!(server_txn.delete(&de_inv).is_err());
|
||||
|
||||
// Delete deletes nothing
|
||||
let de_empty = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_eq(
|
||||
"uuid",
|
||||
PartialValue::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-000000000000"))
|
||||
)))
|
||||
};
|
||||
assert!(server_txn.delete(&de_empty).is_err());
|
||||
|
||||
// Delete matches one
|
||||
let de_sin = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_eq(
|
||||
"name",
|
||||
PartialValue::new_iname("testperson3")
|
||||
)))
|
||||
};
|
||||
assert!(server_txn.delete(&de_sin).is_ok());
|
||||
|
||||
// Delete matches many
|
||||
let de_mult = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_eq(
|
||||
"description",
|
||||
PartialValue::new_utf8s("testperson")
|
||||
)))
|
||||
};
|
||||
assert!(server_txn.delete(&de_mult).is_ok());
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
//! and this provides the set of `Limits` to confine how many resources that the
|
||||
//! identity may consume during operations to prevent denial-of-service.
|
||||
|
||||
use crate::be::Limits;
|
||||
use std::collections::BTreeSet;
|
||||
use std::hash::Hash;
|
||||
use std::sync::Arc;
|
||||
|
@ -14,38 +15,6 @@ use serde::{Deserialize, Serialize};
|
|||
|
||||
use crate::prelude::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Limits on the resources a single event can consume. These are defined per-event
|
||||
/// as they are derived from the userAuthToken based on that individual session
|
||||
pub struct Limits {
|
||||
pub unindexed_allow: bool,
|
||||
pub search_max_results: usize,
|
||||
pub search_max_filter_test: usize,
|
||||
pub filter_max_elements: usize,
|
||||
}
|
||||
|
||||
impl Default for Limits {
|
||||
fn default() -> Self {
|
||||
Limits {
|
||||
unindexed_allow: false,
|
||||
search_max_results: 128,
|
||||
search_max_filter_test: 256,
|
||||
filter_max_elements: 32,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Limits {
|
||||
pub fn unlimited() -> Self {
|
||||
Limits {
|
||||
unindexed_allow: true,
|
||||
search_max_results: usize::MAX,
|
||||
search_max_filter_test: usize::MAX,
|
||||
filter_max_elements: usize::MAX,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum AccessScope {
|
||||
IdentityOnly,
|
816
kanidmd/lib/src/server/migrations.rs
Normal file
816
kanidmd/lib/src/server/migrations.rs
Normal file
|
@ -0,0 +1,816 @@
|
|||
use kanidm_proto::v1::SchemaError;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::prelude::*;
|
||||
|
||||
use super::ServerPhase;
|
||||
|
||||
impl QueryServer {
|
||||
#[instrument(level = "info", name = "system_initialisation", skip_all)]
|
||||
pub async fn initialise_helper(&self, ts: Duration) -> Result<(), OperationError> {
|
||||
// Check our database version - attempt to do an initial indexing
|
||||
// based on the in memory configuration
|
||||
//
|
||||
// If we ever change the core in memory schema, or the schema that we ship
|
||||
// in fixtures, we have to bump these values. This is how we manage the
|
||||
// first-run and upgrade reindexings.
|
||||
//
|
||||
// A major reason here to split to multiple transactions is to allow schema
|
||||
// reloading to occur, which causes the idxmeta to update, and allows validation
|
||||
// of the schema in the subsequent steps as we proceed.
|
||||
let mut reindex_write_1 = self.write(ts).await;
|
||||
reindex_write_1
|
||||
.upgrade_reindex(SYSTEM_INDEX_VERSION)
|
||||
.and_then(|_| reindex_write_1.commit())?;
|
||||
|
||||
// Because we init the schema here, and commit, this reloads meaning
|
||||
// that the on-disk index meta has been loaded, so our subsequent
|
||||
// migrations will be correctly indexed.
|
||||
//
|
||||
// Remember, that this would normally mean that it's possible for schema
|
||||
// to be mis-indexed (IE we index the new schemas here before we read
|
||||
// the schema to tell us what's indexed), but because we have the in
|
||||
// mem schema that defines how schema is structuded, and this is all
|
||||
// marked "system", then we won't have an issue here.
|
||||
let mut ts_write_1 = self.write(ts).await;
|
||||
ts_write_1
|
||||
.initialise_schema_core()
|
||||
.and_then(|_| ts_write_1.commit())?;
|
||||
|
||||
let mut ts_write_2 = self.write(ts).await;
|
||||
ts_write_2
|
||||
.initialise_schema_idm()
|
||||
.and_then(|_| ts_write_2.commit())?;
|
||||
|
||||
// reindex and set to version + 1, this way when we bump the version
|
||||
// we are essetially pushing this version id back up to step write_1
|
||||
let mut reindex_write_2 = self.write(ts).await;
|
||||
reindex_write_2
|
||||
.upgrade_reindex(SYSTEM_INDEX_VERSION + 1)
|
||||
.and_then(|_| reindex_write_2.commit())?;
|
||||
|
||||
// Force the schema to reload - this is so that any changes to index slope
|
||||
// analysis are now reflected correctly.
|
||||
//
|
||||
// A side effect of these reloads is that other plugins or elements that reload
|
||||
// on schema change are now setup.
|
||||
let mut slope_reload = self.write(ts).await;
|
||||
slope_reload.set_phase(ServerPhase::SchemaReady);
|
||||
slope_reload.force_schema_reload();
|
||||
slope_reload.commit()?;
|
||||
|
||||
// Now, based on the system version apply migrations. You may ask "should you not
|
||||
// be doing migrations before indexes?". And this is a very good question! The issue
|
||||
// is within a migration we must be able to search for content by pres index, and those
|
||||
// rely on us being indexed! It *is* safe to index content even if the
|
||||
// migration would cause a value type change (ie name changing from iutf8s to iname) because
|
||||
// the indexing subsystem is schema/value agnostic - the fact the values still let their keys
|
||||
// be extracted, means that the pres indexes will be valid even though the entries are pending
|
||||
// migration. We must be sure to NOT use EQ/SUB indexes in the migration code however!
|
||||
let mut migrate_txn = self.write(ts).await;
|
||||
// If we are "in the process of being setup" this is 0, and the migrations will have no
|
||||
// effect as ... there is nothing to migrate! It allows reset of the version to 0 to force
|
||||
// db migrations to take place.
|
||||
let system_info_version = match migrate_txn.internal_search_uuid(UUID_SYSTEM_INFO) {
|
||||
Ok(e) => Ok(e.get_ava_single_uint32("version").unwrap_or(0)),
|
||||
Err(OperationError::NoMatchingEntries) => Ok(0),
|
||||
Err(r) => Err(r),
|
||||
}?;
|
||||
admin_debug!(?system_info_version);
|
||||
|
||||
if system_info_version < 3 {
|
||||
migrate_txn.migrate_2_to_3()?;
|
||||
}
|
||||
|
||||
if system_info_version < 4 {
|
||||
migrate_txn.migrate_3_to_4()?;
|
||||
}
|
||||
|
||||
if system_info_version < 5 {
|
||||
migrate_txn.migrate_4_to_5()?;
|
||||
}
|
||||
|
||||
if system_info_version < 6 {
|
||||
migrate_txn.migrate_5_to_6()?;
|
||||
}
|
||||
|
||||
if system_info_version < 7 {
|
||||
migrate_txn.migrate_6_to_7()?;
|
||||
}
|
||||
|
||||
if system_info_version < 8 {
|
||||
migrate_txn.migrate_7_to_8()?;
|
||||
}
|
||||
|
||||
if system_info_version < 9 {
|
||||
migrate_txn.migrate_8_to_9()?;
|
||||
}
|
||||
|
||||
migrate_txn.commit()?;
|
||||
// Migrations complete. Init idm will now set the version as needed.
|
||||
|
||||
let mut ts_write_3 = self.write(ts).await;
|
||||
ts_write_3.initialise_idm().and_then(|_| {
|
||||
ts_write_3.set_phase(ServerPhase::Running);
|
||||
ts_write_3.commit()
|
||||
})?;
|
||||
// TODO: work out if we've actually done any migrations before printing this
|
||||
admin_debug!("Database version check and migrations success! ☀️ ");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn internal_migrate_or_create_str(&mut self, e_str: &str) -> Result<(), OperationError> {
|
||||
let res = Entry::from_proto_entry_str(e_str, self)
|
||||
/*
|
||||
.and_then(|e: Entry<EntryInvalid, EntryNew>| {
|
||||
let schema = self.get_schema();
|
||||
e.validate(schema).map_err(OperationError::SchemaViolation)
|
||||
})
|
||||
*/
|
||||
.and_then(|e: Entry<EntryInit, EntryNew>| self.internal_migrate_or_create(e));
|
||||
trace!(?res);
|
||||
debug_assert!(res.is_ok());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn internal_migrate_or_create(
|
||||
&mut self,
|
||||
e: Entry<EntryInit, EntryNew>,
|
||||
) -> Result<(), OperationError> {
|
||||
// if the thing exists, ensure the set of attributes on
|
||||
// Entry A match and are present (but don't delete multivalue, or extended
|
||||
// attributes in the situation.
|
||||
// If not exist, create from Entry B
|
||||
//
|
||||
// This will extra classes an attributes alone!
|
||||
//
|
||||
// NOTE: gen modlist IS schema aware and will handle multivalue
|
||||
// correctly!
|
||||
trace!("internal_migrate_or_create operating on {:?}", e.get_uuid());
|
||||
|
||||
let filt = match e.filter_from_attrs(&[AttrString::from("uuid")]) {
|
||||
Some(f) => f,
|
||||
None => return Err(OperationError::FilterGeneration),
|
||||
};
|
||||
|
||||
trace!("internal_migrate_or_create search {:?}", filt);
|
||||
|
||||
let results = self.internal_search(filt.clone())?;
|
||||
|
||||
if results.is_empty() {
|
||||
// It does not exist. Create it.
|
||||
self.internal_create(vec![e])
|
||||
} else if results.len() == 1 {
|
||||
// If the thing is subset, pass
|
||||
match e.gen_modlist_assert(&self.schema) {
|
||||
Ok(modlist) => {
|
||||
// Apply to &results[0]
|
||||
trace!("Generated modlist -> {:?}", modlist);
|
||||
self.internal_modify(&filt, &modlist)
|
||||
}
|
||||
Err(e) => Err(OperationError::SchemaViolation(e)),
|
||||
}
|
||||
} else {
|
||||
admin_error!(
|
||||
"Invalid Result Set - Expected One Entry for {:?} - {:?}",
|
||||
filt,
|
||||
results
|
||||
);
|
||||
Err(OperationError::InvalidDbState)
|
||||
}
|
||||
}
|
||||
|
||||
/// Migrate 2 to 3 changes the name, domain_name types from iutf8 to iname.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_2_to_3(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 2 to 3 migration. THIS MAY TAKE A LONG TIME!");
|
||||
// Get all entries where pres name or domain_name. INCLUDE TS + RECYCLE.
|
||||
|
||||
let filt = filter_all!(f_or!([f_pres("name"), f_pres("domain_name"),]));
|
||||
|
||||
let pre_candidates = self.internal_search(filt).map_err(|e| {
|
||||
admin_error!(err = ?e, "migrate_2_to_3 internal search failure");
|
||||
e
|
||||
})?;
|
||||
|
||||
// If there is nothing, we donn't need to do anything.
|
||||
if pre_candidates.is_empty() {
|
||||
admin_info!("migrate_2_to_3 no entries to migrate, complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Change the value type.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
candidates.iter_mut().try_for_each(|er| {
|
||||
let nvs = if let Some(vs) = er.get_ava_set("name") {
|
||||
vs.migrate_iutf8_iname()?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(nvs) = nvs {
|
||||
er.set_ava_set("name", nvs)
|
||||
}
|
||||
|
||||
let nvs = if let Some(vs) = er.get_ava_set("domain_name") {
|
||||
vs.migrate_iutf8_iname()?
|
||||
} else {
|
||||
None
|
||||
};
|
||||
if let Some(nvs) = nvs {
|
||||
er.set_ava_set("domain_name", nvs)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Schema check all.
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, SchemaError> = candidates
|
||||
.into_iter()
|
||||
.map(|e| e.validate(&self.schema).map(|e| e.seal(&self.schema)))
|
||||
.collect();
|
||||
|
||||
let norm_cand: Vec<Entry<_, _>> = match res {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
admin_error!("migrate_2_to_3 schema error -> {:?}", e);
|
||||
return Err(OperationError::SchemaViolation(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Write them back.
|
||||
self.be_txn
|
||||
.modify(&self.cid, &pre_candidates, &norm_cand)
|
||||
.map_err(|e| {
|
||||
admin_error!("migrate_2_to_3 modification failure -> {:?}", e);
|
||||
e
|
||||
})
|
||||
// Complete
|
||||
}
|
||||
|
||||
/// Migrate 3 to 4 - this triggers a regen of the domains security token
|
||||
/// as we previously did not have it in the entry.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_3_to_4(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 3 to 4 migration.");
|
||||
let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone()));
|
||||
let modlist = ModifyList::new_purge("domain_token_key");
|
||||
self.internal_modify(&filter, &modlist)
|
||||
// Complete
|
||||
}
|
||||
|
||||
/// Migrate 4 to 5 - this triggers a regen of all oauth2 RS es256 der keys
|
||||
/// as we previously did not generate them on entry creation.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_4_to_5(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 4 to 5 migration.");
|
||||
let filter = filter!(f_and!([
|
||||
f_eq("class", (*PVCLASS_OAUTH2_RS).clone()),
|
||||
f_andnot(f_pres("es256_private_key_der")),
|
||||
]));
|
||||
let modlist = ModifyList::new_purge("es256_private_key_der");
|
||||
self.internal_modify(&filter, &modlist)
|
||||
// Complete
|
||||
}
|
||||
|
||||
/// Migrate 5 to 6 - This updates the domain info item to reset the token
|
||||
/// keys based on the new encryption types.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_5_to_6(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 5 to 6 migration.");
|
||||
let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone()));
|
||||
let mut modlist = ModifyList::new_purge("domain_token_key");
|
||||
// We need to also push the version here so that we pass schema.
|
||||
modlist.push_mod(Modify::Present(
|
||||
AttrString::from("version"),
|
||||
Value::Uint32(0),
|
||||
));
|
||||
self.internal_modify(&filter, &modlist)
|
||||
// Complete
|
||||
}
|
||||
|
||||
/// Migrate 6 to 7
|
||||
///
|
||||
/// Modify accounts that are not persons, to be service accounts so that the extension
|
||||
/// rules remain valid.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_6_to_7(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 6 to 7 migration.");
|
||||
let filter = filter!(f_and!([
|
||||
f_eq("class", (*PVCLASS_ACCOUNT).clone()),
|
||||
f_andnot(f_eq("class", (*PVCLASS_PERSON).clone())),
|
||||
]));
|
||||
let modlist = ModifyList::new_append("class", Value::new_class("service_account"));
|
||||
self.internal_modify(&filter, &modlist)
|
||||
// Complete
|
||||
}
|
||||
|
||||
/// Migrate 7 to 8
|
||||
///
|
||||
/// Touch all service accounts to trigger a regen of their es256 jws keys for api tokens
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_7_to_8(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 7 to 8 migration.");
|
||||
let filter = filter!(f_eq("class", (*PVCLASS_SERVICE_ACCOUNT).clone()));
|
||||
let modlist = ModifyList::new_append("class", Value::new_class("service_account"));
|
||||
self.internal_modify(&filter, &modlist)
|
||||
// Complete
|
||||
}
|
||||
|
||||
/// Migrate 8 to 9
|
||||
///
|
||||
/// This migration updates properties of oauth2 relying server properties. First, it changes
|
||||
/// the former basic value to a secret utf8string.
|
||||
///
|
||||
/// The second change improves the current scope system to remove the implicit scope type.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn migrate_8_to_9(&mut self) -> Result<(), OperationError> {
|
||||
admin_warn!("starting 8 to 9 migration.");
|
||||
let filt = filter_all!(f_or!([
|
||||
f_eq("class", PVCLASS_OAUTH2_RS.clone()),
|
||||
f_eq("class", PVCLASS_OAUTH2_BASIC.clone()),
|
||||
]));
|
||||
|
||||
let pre_candidates = self.internal_search(filt).map_err(|e| {
|
||||
admin_error!(err = ?e, "migrate_8_to_9 internal search failure");
|
||||
e
|
||||
})?;
|
||||
|
||||
// If there is nothing, we donn't need to do anything.
|
||||
if pre_candidates.is_empty() {
|
||||
admin_info!("migrate_8_to_9 no entries to migrate, complete");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Change the value type.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
candidates.iter_mut().try_for_each(|er| {
|
||||
// Migrate basic secrets if they exist.
|
||||
let nvs = er
|
||||
.get_ava_set("oauth2_rs_basic_secret")
|
||||
.and_then(|vs| vs.as_utf8_iter())
|
||||
.and_then(|vs_iter| {
|
||||
ValueSetSecret::from_iter(vs_iter.map(|s: &str| s.to_string()))
|
||||
});
|
||||
if let Some(nvs) = nvs {
|
||||
er.set_ava_set("oauth2_rs_basic_secret", nvs)
|
||||
}
|
||||
|
||||
// Migrate implicit scopes if they exist.
|
||||
let nv = if let Some(vs) = er.get_ava_set("oauth2_rs_implicit_scopes") {
|
||||
vs.as_oauthscope_set()
|
||||
.map(|v| Value::OauthScopeMap(UUID_IDM_ALL_PERSONS, v.clone()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(nv) = nv {
|
||||
er.add_ava("oauth2_rs_scope_map", nv)
|
||||
}
|
||||
er.purge_ava("oauth2_rs_implicit_scopes");
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
// Schema check all.
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, SchemaError> = candidates
|
||||
.into_iter()
|
||||
.map(|e| e.validate(&self.schema).map(|e| e.seal(&self.schema)))
|
||||
.collect();
|
||||
|
||||
let norm_cand: Vec<Entry<_, _>> = match res {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
admin_error!("migrate_8_to_9 schema error -> {:?}", e);
|
||||
return Err(OperationError::SchemaViolation(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Write them back.
|
||||
self.be_txn
|
||||
.modify(&self.cid, &pre_candidates, &norm_cand)
|
||||
.map_err(|e| {
|
||||
admin_error!("migrate_8_to_9 modification failure -> {:?}", e);
|
||||
e
|
||||
})
|
||||
// Complete
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub fn initialise_schema_core(&mut self) -> Result<(), OperationError> {
|
||||
admin_debug!("initialise_schema_core -> start ...");
|
||||
// Load in all the "core" schema, that we already have in "memory".
|
||||
let entries = self.schema.to_entries();
|
||||
|
||||
// admin_debug!("Dumping schemas: {:?}", entries);
|
||||
|
||||
// internal_migrate_or_create.
|
||||
let r: Result<_, _> = entries.into_iter().try_for_each(|e| {
|
||||
trace!(?e, "init schema entry");
|
||||
self.internal_migrate_or_create(e)
|
||||
});
|
||||
if r.is_ok() {
|
||||
admin_debug!("initialise_schema_core -> Ok!");
|
||||
} else {
|
||||
admin_error!(?r, "initialise_schema_core -> Error");
|
||||
}
|
||||
// why do we have error handling if it's always supposed to be `Ok`?
|
||||
debug_assert!(r.is_ok());
|
||||
r
|
||||
}
|
||||
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub fn initialise_schema_idm(&mut self) -> Result<(), OperationError> {
|
||||
admin_debug!("initialise_schema_idm -> start ...");
|
||||
// List of IDM schemas to init.
|
||||
let idm_schema: Vec<&str> = vec![
|
||||
JSON_SCHEMA_ATTR_DISPLAYNAME,
|
||||
JSON_SCHEMA_ATTR_LEGALNAME,
|
||||
JSON_SCHEMA_ATTR_MAIL,
|
||||
JSON_SCHEMA_ATTR_SSH_PUBLICKEY,
|
||||
JSON_SCHEMA_ATTR_PRIMARY_CREDENTIAL,
|
||||
JSON_SCHEMA_ATTR_RADIUS_SECRET,
|
||||
JSON_SCHEMA_ATTR_DOMAIN_NAME,
|
||||
JSON_SCHEMA_ATTR_DOMAIN_DISPLAY_NAME,
|
||||
JSON_SCHEMA_ATTR_DOMAIN_UUID,
|
||||
JSON_SCHEMA_ATTR_DOMAIN_SSID,
|
||||
JSON_SCHEMA_ATTR_DOMAIN_TOKEN_KEY,
|
||||
JSON_SCHEMA_ATTR_FERNET_PRIVATE_KEY_STR,
|
||||
JSON_SCHEMA_ATTR_GIDNUMBER,
|
||||
JSON_SCHEMA_ATTR_BADLIST_PASSWORD,
|
||||
JSON_SCHEMA_ATTR_LOGINSHELL,
|
||||
JSON_SCHEMA_ATTR_UNIX_PASSWORD,
|
||||
JSON_SCHEMA_ATTR_ACCOUNT_EXPIRE,
|
||||
JSON_SCHEMA_ATTR_ACCOUNT_VALID_FROM,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_NAME,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_ORIGIN,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_SCOPE_MAP,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_IMPLICIT_SCOPES,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_BASIC_SECRET,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_TOKEN_KEY,
|
||||
JSON_SCHEMA_ATTR_ES256_PRIVATE_KEY_DER,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_ALLOW_INSECURE_CLIENT_DISABLE_PKCE,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_JWT_LEGACY_CRYPTO_ENABLE,
|
||||
JSON_SCHEMA_ATTR_RS256_PRIVATE_KEY_DER,
|
||||
JSON_SCHEMA_ATTR_CREDENTIAL_UPDATE_INTENT_TOKEN,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_CONSENT_SCOPE_MAP,
|
||||
JSON_SCHEMA_ATTR_PASSKEYS,
|
||||
JSON_SCHEMA_ATTR_DEVICEKEYS,
|
||||
JSON_SCHEMA_ATTR_DYNGROUP_FILTER,
|
||||
JSON_SCHEMA_ATTR_JWS_ES256_PRIVATE_KEY,
|
||||
JSON_SCHEMA_ATTR_API_TOKEN_SESSION,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_SUP_SCOPE_MAP,
|
||||
JSON_SCHEMA_ATTR_USER_AUTH_TOKEN_SESSION,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_SESSION,
|
||||
JSON_SCHEMA_ATTR_NSUNIQUEID,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_PREFER_SHORT_USERNAME,
|
||||
JSON_SCHEMA_ATTR_SYNC_TOKEN_SESSION,
|
||||
JSON_SCHEMA_ATTR_SYNC_COOKIE,
|
||||
JSON_SCHEMA_ATTR_GRANT_UI_HINT,
|
||||
JSON_SCHEMA_ATTR_OAUTH2_RS_ORIGIN_LANDING,
|
||||
JSON_SCHEMA_CLASS_PERSON,
|
||||
JSON_SCHEMA_CLASS_ORGPERSON,
|
||||
JSON_SCHEMA_CLASS_GROUP,
|
||||
JSON_SCHEMA_CLASS_DYNGROUP,
|
||||
JSON_SCHEMA_CLASS_ACCOUNT,
|
||||
JSON_SCHEMA_CLASS_SERVICE_ACCOUNT,
|
||||
JSON_SCHEMA_CLASS_DOMAIN_INFO,
|
||||
JSON_SCHEMA_CLASS_POSIXACCOUNT,
|
||||
JSON_SCHEMA_CLASS_POSIXGROUP,
|
||||
JSON_SCHEMA_CLASS_SYSTEM_CONFIG,
|
||||
JSON_SCHEMA_CLASS_OAUTH2_RS,
|
||||
JSON_SCHEMA_CLASS_OAUTH2_RS_BASIC,
|
||||
JSON_SCHEMA_CLASS_SYNC_ACCOUNT,
|
||||
];
|
||||
|
||||
let r = idm_schema
|
||||
.iter()
|
||||
// Each item individually logs it's result
|
||||
.try_for_each(|e_str| self.internal_migrate_or_create_str(e_str));
|
||||
|
||||
if r.is_ok() {
|
||||
admin_debug!("initialise_schema_idm -> Ok!");
|
||||
} else {
|
||||
admin_error!(res = ?r, "initialise_schema_idm -> Error");
|
||||
}
|
||||
debug_assert!(r.is_ok()); // why return a result if we assert it's `Ok`?
|
||||
|
||||
r
|
||||
}
|
||||
|
||||
// This function is idempotent
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub fn initialise_idm(&mut self) -> Result<(), OperationError> {
|
||||
// First, check the system_info object. This stores some server information
|
||||
// and details. It's a pretty const thing. Also check anonymous, important to many
|
||||
// concepts.
|
||||
let res = self
|
||||
.internal_migrate_or_create_str(JSON_SYSTEM_INFO_V1)
|
||||
.and_then(|_| self.internal_migrate_or_create_str(JSON_DOMAIN_INFO_V1))
|
||||
.and_then(|_| self.internal_migrate_or_create_str(JSON_SYSTEM_CONFIG_V1));
|
||||
if res.is_err() {
|
||||
admin_error!("initialise_idm p1 -> result {:?}", res);
|
||||
}
|
||||
debug_assert!(res.is_ok());
|
||||
res?;
|
||||
|
||||
// The domain info now exists, we should be able to do these migrations as they will
|
||||
// cause SPN regenerations to occur
|
||||
|
||||
// Check the admin object exists (migrations).
|
||||
// Create the default idm_admin group.
|
||||
let admin_entries = [
|
||||
JSON_ANONYMOUS_V1,
|
||||
JSON_ADMIN_V1,
|
||||
JSON_IDM_ADMIN_V1,
|
||||
JSON_IDM_ADMINS_V1,
|
||||
JSON_SYSTEM_ADMINS_V1,
|
||||
];
|
||||
let res: Result<(), _> = admin_entries
|
||||
.iter()
|
||||
// Each item individually logs it's result
|
||||
.try_for_each(|e_str| self.internal_migrate_or_create_str(e_str));
|
||||
if res.is_err() {
|
||||
admin_error!("initialise_idm p2 -> result {:?}", res);
|
||||
}
|
||||
debug_assert!(res.is_ok());
|
||||
res?;
|
||||
|
||||
// Create any system default schema entries.
|
||||
|
||||
// Create any system default access profile entries.
|
||||
let idm_entries = [
|
||||
// Builtin dyn groups,
|
||||
JSON_IDM_ALL_PERSONS,
|
||||
JSON_IDM_ALL_ACCOUNTS,
|
||||
// Builtin groups
|
||||
JSON_IDM_PEOPLE_MANAGE_PRIV_V1,
|
||||
JSON_IDM_PEOPLE_ACCOUNT_PASSWORD_IMPORT_PRIV_V1,
|
||||
JSON_IDM_PEOPLE_EXTEND_PRIV_V1,
|
||||
JSON_IDM_PEOPLE_SELF_WRITE_MAIL_PRIV_V1,
|
||||
JSON_IDM_PEOPLE_WRITE_PRIV_V1,
|
||||
JSON_IDM_PEOPLE_READ_PRIV_V1,
|
||||
JSON_IDM_HP_PEOPLE_EXTEND_PRIV_V1,
|
||||
JSON_IDM_HP_PEOPLE_WRITE_PRIV_V1,
|
||||
JSON_IDM_HP_PEOPLE_READ_PRIV_V1,
|
||||
JSON_IDM_GROUP_MANAGE_PRIV_V1,
|
||||
JSON_IDM_GROUP_WRITE_PRIV_V1,
|
||||
JSON_IDM_GROUP_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_ACCOUNT_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACCOUNT_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACCOUNT_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_ACCOUNT_READ_PRIV_V1,
|
||||
JSON_IDM_RADIUS_SECRET_WRITE_PRIV_V1,
|
||||
JSON_IDM_RADIUS_SECRET_READ_PRIV_V1,
|
||||
JSON_IDM_RADIUS_SERVERS_V1,
|
||||
// Write deps on read, so write must be added first.
|
||||
JSON_IDM_HP_ACCOUNT_MANAGE_PRIV_V1,
|
||||
JSON_IDM_HP_ACCOUNT_WRITE_PRIV_V1,
|
||||
JSON_IDM_HP_ACCOUNT_READ_PRIV_V1,
|
||||
JSON_IDM_HP_ACCOUNT_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_SCHEMA_MANAGE_PRIV_V1,
|
||||
JSON_IDM_HP_GROUP_MANAGE_PRIV_V1,
|
||||
JSON_IDM_HP_GROUP_WRITE_PRIV_V1,
|
||||
JSON_IDM_HP_GROUP_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_ACP_MANAGE_PRIV_V1,
|
||||
JSON_DOMAIN_ADMINS,
|
||||
JSON_IDM_HP_OAUTH2_MANAGE_PRIV_V1,
|
||||
JSON_IDM_HP_SERVICE_ACCOUNT_INTO_PERSON_MIGRATE_PRIV,
|
||||
JSON_IDM_HP_SYNC_ACCOUNT_MANAGE_PRIV,
|
||||
// All members must exist before we write HP
|
||||
JSON_IDM_HIGH_PRIVILEGE_V1,
|
||||
// Built in access controls.
|
||||
JSON_IDM_ADMINS_ACP_RECYCLE_SEARCH_V1,
|
||||
JSON_IDM_ADMINS_ACP_REVIVE_V1,
|
||||
// JSON_IDM_ADMINS_ACP_MANAGE_V1,
|
||||
JSON_IDM_ALL_ACP_READ_V1,
|
||||
JSON_IDM_SELF_ACP_READ_V1,
|
||||
JSON_IDM_SELF_ACP_WRITE_V1,
|
||||
JSON_IDM_PEOPLE_SELF_ACP_WRITE_MAIL_PRIV_V1,
|
||||
JSON_IDM_ACP_PEOPLE_READ_PRIV_V1,
|
||||
JSON_IDM_ACP_PEOPLE_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACP_PEOPLE_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_GROUP_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACP_GROUP_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_ACCOUNT_READ_PRIV_V1,
|
||||
JSON_IDM_ACP_ACCOUNT_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACP_ACCOUNT_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_RADIUS_SERVERS_V1,
|
||||
JSON_IDM_ACP_HP_ACCOUNT_READ_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_ACCOUNT_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_ACCOUNT_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_GROUP_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_GROUP_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_SCHEMA_WRITE_ATTRS_PRIV_V1,
|
||||
JSON_IDM_ACP_SCHEMA_WRITE_CLASSES_PRIV_V1,
|
||||
JSON_IDM_ACP_ACP_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_DOMAIN_ADMIN_PRIV_V1,
|
||||
JSON_IDM_ACP_SYSTEM_CONFIG_PRIV_V1,
|
||||
JSON_IDM_ACP_ACCOUNT_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_ACP_GROUP_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_ACP_PEOPLE_ACCOUNT_PASSWORD_IMPORT_PRIV_V1,
|
||||
JSON_IDM_ACP_PEOPLE_EXTEND_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_PEOPLE_READ_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_PEOPLE_WRITE_PRIV_V1,
|
||||
JSON_IDM_ACP_HP_PEOPLE_EXTEND_PRIV_V1,
|
||||
JSON_IDM_HP_ACP_ACCOUNT_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_HP_ACP_GROUP_UNIX_EXTEND_PRIV_V1,
|
||||
JSON_IDM_HP_ACP_OAUTH2_MANAGE_PRIV_V1,
|
||||
JSON_IDM_ACP_RADIUS_SECRET_READ_PRIV_V1,
|
||||
JSON_IDM_ACP_RADIUS_SECRET_WRITE_PRIV_V1,
|
||||
JSON_IDM_HP_ACP_SERVICE_ACCOUNT_INTO_PERSON_MIGRATE_V1,
|
||||
JSON_IDM_ACP_OAUTH2_READ_PRIV_V1,
|
||||
JSON_IDM_HP_ACP_SYNC_ACCOUNT_MANAGE_PRIV_V1,
|
||||
];
|
||||
|
||||
let res: Result<(), _> = idm_entries
|
||||
.iter()
|
||||
.try_for_each(|e_str| self.internal_migrate_or_create_str(e_str));
|
||||
if res.is_ok() {
|
||||
admin_debug!("initialise_idm -> result Ok!");
|
||||
} else {
|
||||
admin_error!(?res, "initialise_idm p3 -> result");
|
||||
}
|
||||
debug_assert!(res.is_ok());
|
||||
res?;
|
||||
|
||||
let idm_entries = [
|
||||
E_IDM_UI_ENABLE_EXPERIMENTAL_FEATURES.clone(),
|
||||
E_IDM_ACCOUNT_MAIL_READ_PRIV.clone(),
|
||||
E_IDM_ACP_ACCOUNT_MAIL_READ_PRIV_V1.clone(),
|
||||
];
|
||||
|
||||
let res: Result<(), _> = idm_entries
|
||||
.into_iter()
|
||||
.try_for_each(|entry| self.internal_migrate_or_create(entry));
|
||||
if res.is_ok() {
|
||||
admin_debug!("initialise_idm -> result Ok!");
|
||||
} else {
|
||||
admin_error!(?res, "initialise_idm p3 -> result");
|
||||
}
|
||||
debug_assert!(res.is_ok());
|
||||
res?;
|
||||
|
||||
self.changed_schema = true;
|
||||
self.changed_acp = true;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::prelude::*;
|
||||
|
||||
#[qs_test]
|
||||
async fn test_init_idempotent_schema_core(server: &QueryServer) {
|
||||
{
|
||||
// Setup and abort.
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn.initialise_schema_core().is_ok());
|
||||
}
|
||||
{
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn.initialise_schema_core().is_ok());
|
||||
assert!(server_txn.initialise_schema_core().is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
{
|
||||
// Now do it again in a new txn, but abort
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn.initialise_schema_core().is_ok());
|
||||
}
|
||||
{
|
||||
// Now do it again in a new txn.
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn.initialise_schema_core().is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
#[qs_test_no_init]
|
||||
async fn test_qs_upgrade_entry_attrs(server: &QueryServer) {
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION).is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
server_txn.initialise_schema_core().unwrap();
|
||||
server_txn.initialise_schema_idm().unwrap();
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION + 1).is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
assert!(server_txn
|
||||
.internal_migrate_or_create_str(JSON_SYSTEM_INFO_V1)
|
||||
.is_ok());
|
||||
assert!(server_txn
|
||||
.internal_migrate_or_create_str(JSON_DOMAIN_INFO_V1)
|
||||
.is_ok());
|
||||
assert!(server_txn
|
||||
.internal_migrate_or_create_str(JSON_SYSTEM_CONFIG_V1)
|
||||
.is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
// ++ Mod the schema to set name to the old string type
|
||||
let me_syn = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_or!([
|
||||
f_eq("attributename", PartialValue::new_iutf8("name")),
|
||||
f_eq("attributename", PartialValue::new_iutf8("domain_name")),
|
||||
])),
|
||||
ModifyList::new_purge_and_set(
|
||||
"syntax",
|
||||
Value::new_syntaxs("UTF8STRING_INSENSITIVE").unwrap(),
|
||||
),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_syn).is_ok());
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
// ++ Mod domain name and name to be the old type.
|
||||
let me_dn = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("uuid", PartialValue::Uuid(UUID_DOMAIN_INFO))),
|
||||
ModifyList::new_list(vec![
|
||||
Modify::Purged(AttrString::from("name")),
|
||||
Modify::Purged(AttrString::from("domain_name")),
|
||||
Modify::Present(AttrString::from("name"), Value::new_iutf8("domain_local")),
|
||||
Modify::Present(
|
||||
AttrString::from("domain_name"),
|
||||
Value::new_iutf8("example.com"),
|
||||
),
|
||||
]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_dn).is_ok());
|
||||
|
||||
// Now, both the types are invalid.
|
||||
|
||||
// WARNING! We can't commit here because this triggers domain_reload which will fail
|
||||
// due to incorrect syntax of the domain name! Run the migration in the same txn!
|
||||
// Trigger a schema reload.
|
||||
assert!(server_txn.reload_schema().is_ok());
|
||||
|
||||
// We can't just re-run the migrate here because name takes it's definition from
|
||||
// in memory, and we can't re-run the initial memory gen. So we just fix it to match
|
||||
// what the migrate "would do".
|
||||
let me_syn = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_or!([
|
||||
f_eq("attributename", PartialValue::new_iutf8("name")),
|
||||
f_eq("attributename", PartialValue::new_iutf8("domain_name")),
|
||||
])),
|
||||
ModifyList::new_purge_and_set(
|
||||
"syntax",
|
||||
Value::new_syntaxs("UTF8STRING_INAME").unwrap(),
|
||||
),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_syn).is_ok());
|
||||
|
||||
// WARNING! We can't commit here because this triggers domain_reload which will fail
|
||||
// due to incorrect syntax of the domain name! Run the migration in the same txn!
|
||||
// Trigger a schema reload.
|
||||
assert!(server_txn.reload_schema().is_ok());
|
||||
|
||||
// ++ Run the upgrade for X to Y
|
||||
assert!(server_txn.migrate_2_to_3().is_ok());
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
// Assert that it migrated and worked as expected.
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
let domain = server_txn
|
||||
.internal_search_uuid(UUID_DOMAIN_INFO)
|
||||
.expect("failed");
|
||||
// ++ assert all names are iname
|
||||
assert!(
|
||||
domain.get_ava_set("name").expect("no name?").syntax() == SyntaxType::Utf8StringIname
|
||||
);
|
||||
// ++ assert all domain/domain_name are iname
|
||||
assert!(
|
||||
domain
|
||||
.get_ava_set("domain_name")
|
||||
.expect("no domain_name?")
|
||||
.syntax()
|
||||
== SyntaxType::Utf8StringIname
|
||||
);
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1 +1,747 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use crate::plugins::Plugins;
|
||||
use crate::prelude::*;
|
||||
|
||||
pub(crate) struct ModifyPartial<'a> {
|
||||
pub norm_cand: Vec<Entry<EntrySealed, EntryCommitted>>,
|
||||
pub pre_candidates: Vec<Arc<Entry<EntrySealed, EntryCommitted>>>,
|
||||
pub me: &'a ModifyEvent,
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn modify(&mut self, me: &ModifyEvent) -> Result<(), OperationError> {
|
||||
let mp = unsafe { self.modify_pre_apply(me)? };
|
||||
if let Some(mp) = mp {
|
||||
self.modify_apply(mp)
|
||||
} else {
|
||||
// No action to apply, the pre-apply said nothing to be done.
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Unsafety: This is unsafe because you need to be careful about how you handle and check
|
||||
/// the Ok(None) case which occurs during internal operations, and that you DO NOT re-order
|
||||
/// and call multiple pre-applies at the same time, else you can cause DB corruption.
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub(crate) unsafe fn modify_pre_apply<'x>(
|
||||
&mut self,
|
||||
me: &'x ModifyEvent,
|
||||
) -> Result<Option<ModifyPartial<'x>>, OperationError> {
|
||||
// Get the candidates.
|
||||
// Modify applies a modlist to a filter, so we need to internal search
|
||||
// then apply.
|
||||
if !me.ident.is_internal() {
|
||||
security_info!(name = %me.ident, "modify initiator");
|
||||
}
|
||||
|
||||
// Validate input.
|
||||
|
||||
// Is the modlist non zero?
|
||||
if me.modlist.is_empty() {
|
||||
request_error!("modify: empty modify request");
|
||||
return Err(OperationError::EmptyRequest);
|
||||
}
|
||||
|
||||
// Is the modlist valid?
|
||||
// This is now done in the event transform
|
||||
|
||||
// Is the filter invalid to schema?
|
||||
// This is now done in the event transform
|
||||
|
||||
// This also checks access controls due to use of the impersonation.
|
||||
let pre_candidates = self
|
||||
.impersonate_search_valid(me.filter.clone(), me.filter_orig.clone(), &me.ident)
|
||||
.map_err(|e| {
|
||||
admin_error!("modify: error in pre-candidate selection {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
if pre_candidates.is_empty() {
|
||||
if me.ident.is_internal() {
|
||||
trace!(
|
||||
"modify: no candidates match filter ... continuing {:?}",
|
||||
me.filter
|
||||
);
|
||||
return Ok(None);
|
||||
} else {
|
||||
request_error!(
|
||||
"modify: no candidates match filter, failure {:?}",
|
||||
me.filter
|
||||
);
|
||||
return Err(OperationError::NoMatchingEntries);
|
||||
}
|
||||
};
|
||||
|
||||
trace!("modify: pre_candidates -> {:?}", pre_candidates);
|
||||
trace!("modify: modlist -> {:?}", me.modlist);
|
||||
|
||||
// Are we allowed to make the changes we want to?
|
||||
// modify_allow_operation
|
||||
let access = self.get_accesscontrols();
|
||||
let op_allow = access
|
||||
.modify_allow_operation(me, &pre_candidates)
|
||||
.map_err(|e| {
|
||||
admin_error!("Unable to check modify access {:?}", e);
|
||||
e
|
||||
})?;
|
||||
if !op_allow {
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Clone a set of writeables.
|
||||
// Apply the modlist -> Remember, we have a set of origs
|
||||
// and the new modified ents.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
.collect();
|
||||
|
||||
candidates.iter_mut().try_for_each(|er| {
|
||||
er.apply_modlist(&me.modlist).map_err(|e| {
|
||||
error!("Modification failed for {:?}", er.get_uuid());
|
||||
e
|
||||
})
|
||||
})?;
|
||||
|
||||
trace!("modify: candidates -> {:?}", candidates);
|
||||
|
||||
// Did any of the candidates now become masked?
|
||||
if std::iter::zip(
|
||||
pre_candidates
|
||||
.iter()
|
||||
.map(|e| e.mask_recycled_ts().is_none()),
|
||||
candidates.iter().map(|e| e.mask_recycled_ts().is_none()),
|
||||
)
|
||||
.any(|(a, b)| a != b)
|
||||
{
|
||||
admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine.");
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Pre mod plugins
|
||||
// We should probably supply the pre-post cands here.
|
||||
Plugins::run_pre_modify(self, &mut candidates, me).map_err(|e| {
|
||||
admin_error!("Pre-Modify operation failed (plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// NOTE: There is a potential optimisation here, where if
|
||||
// candidates == pre-candidates, then we don't need to store anything
|
||||
// because we effectively just did an assert. However, like all
|
||||
// optimisations, this could be premature - so we for now, just
|
||||
// do the CORRECT thing and recommit as we may find later we always
|
||||
// want to add CSN's or other.
|
||||
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates
|
||||
.into_iter()
|
||||
.map(|entry| {
|
||||
entry
|
||||
.validate(&self.schema)
|
||||
.map_err(|e| {
|
||||
admin_error!("Schema Violation in validation of modify_pre_apply {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})
|
||||
.map(|entry| entry.seal(&self.schema))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let norm_cand: Vec<Entry<_, _>> = res?;
|
||||
|
||||
Ok(Some(ModifyPartial {
|
||||
norm_cand,
|
||||
pre_candidates,
|
||||
me,
|
||||
}))
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub(crate) fn modify_apply(&mut self, mp: ModifyPartial<'_>) -> Result<(), OperationError> {
|
||||
let ModifyPartial {
|
||||
norm_cand,
|
||||
pre_candidates,
|
||||
me,
|
||||
} = mp;
|
||||
|
||||
// Backend Modify
|
||||
self.be_txn
|
||||
.modify(&self.cid, &pre_candidates, &norm_cand)
|
||||
.map_err(|e| {
|
||||
admin_error!("Modify operation failed (backend), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Post Plugins
|
||||
//
|
||||
// memberOf actually wants the pre cand list and the norm_cand list to see what
|
||||
// changed. Could be optimised, but this is correct still ...
|
||||
Plugins::run_post_modify(self, &pre_candidates, &norm_cand, me).map_err(|e| {
|
||||
admin_error!("Post-Modify operation failed (plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// We have finished all plugs and now have a successful operation - flag if
|
||||
// schema or acp requires reload. Remember, this is a modify, so we need to check
|
||||
// pre and post cands.
|
||||
if !self.changed_schema {
|
||||
self.changed_schema = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
});
|
||||
}
|
||||
if !self.changed_acp {
|
||||
self.changed_acp = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP))
|
||||
}
|
||||
if !self.changed_oauth2 {
|
||||
self.changed_oauth2 = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS));
|
||||
}
|
||||
if !self.changed_domain {
|
||||
self.changed_domain = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO));
|
||||
}
|
||||
|
||||
self.changed_uuid.extend(
|
||||
norm_cand
|
||||
.iter()
|
||||
.map(|e| e.get_uuid())
|
||||
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
|
||||
);
|
||||
|
||||
trace!(
|
||||
schema_reload = ?self.changed_schema,
|
||||
acp_reload = ?self.changed_acp,
|
||||
oauth2_reload = ?self.changed_oauth2,
|
||||
domain_reload = ?self.changed_domain,
|
||||
);
|
||||
|
||||
// return
|
||||
if me.ident.is_internal() {
|
||||
trace!("Modify operation success");
|
||||
} else {
|
||||
admin_info!("Modify operation success");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
/// Used in conjunction with internal_apply_writable, to get a pre/post
|
||||
/// pair, where post is pre-configured with metadata to allow
|
||||
/// modificiation before submit back to internal_apply_writable
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub(crate) fn internal_search_writeable(
|
||||
&mut self,
|
||||
filter: &Filter<FilterInvalid>,
|
||||
) -> Result<Vec<EntryTuple>, OperationError> {
|
||||
let f_valid = filter
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let se = SearchEvent::new_internal(f_valid);
|
||||
self.search(&se).map(|vs| {
|
||||
vs.into_iter()
|
||||
.map(|e| {
|
||||
let writeable = e.as_ref().clone().invalidate(self.cid.clone());
|
||||
(e, writeable)
|
||||
})
|
||||
.collect()
|
||||
})
|
||||
}
|
||||
|
||||
/// Allows writing batches of modified entries without going through
|
||||
/// the modlist path. This allows more effecient batch transformations
|
||||
/// such as memberof, but at the expense that YOU must guarantee you
|
||||
/// uphold all other plugin and state rules that are important. You
|
||||
/// probably want modify instead.
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub(crate) fn internal_apply_writable(
|
||||
&mut self,
|
||||
pre_candidates: Vec<Arc<EntrySealedCommitted>>,
|
||||
candidates: Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||
) -> Result<(), OperationError> {
|
||||
if pre_candidates.is_empty() && candidates.is_empty() {
|
||||
// No action needed.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if pre_candidates.len() != candidates.len() {
|
||||
admin_error!("internal_apply_writable - cand lengths differ");
|
||||
return Err(OperationError::InvalidRequestState);
|
||||
}
|
||||
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates
|
||||
.into_iter()
|
||||
.map(|e| {
|
||||
e.validate(&self.schema)
|
||||
.map_err(|e| {
|
||||
admin_error!(
|
||||
"Schema Violation in internal_apply_writable validate: {:?}",
|
||||
e
|
||||
);
|
||||
OperationError::SchemaViolation(e)
|
||||
})
|
||||
.map(|e| e.seal(&self.schema))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let norm_cand: Vec<Entry<_, _>> = res?;
|
||||
|
||||
if cfg!(debug_assertions) {
|
||||
pre_candidates
|
||||
.iter()
|
||||
.zip(norm_cand.iter())
|
||||
.try_for_each(|(pre, post)| {
|
||||
if pre.get_uuid() == post.get_uuid() {
|
||||
Ok(())
|
||||
} else {
|
||||
admin_error!("modify - cand sets not correctly aligned");
|
||||
Err(OperationError::InvalidRequestState)
|
||||
}
|
||||
})?;
|
||||
}
|
||||
|
||||
// Backend Modify
|
||||
self.be_txn
|
||||
.modify(&self.cid, &pre_candidates, &norm_cand)
|
||||
.map_err(|e| {
|
||||
admin_error!("Modify operation failed (backend), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
if !self.changed_schema {
|
||||
self.changed_schema = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| {
|
||||
e.attribute_equality("class", &PVCLASS_CLASSTYPE)
|
||||
|| e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE)
|
||||
});
|
||||
}
|
||||
if !self.changed_acp {
|
||||
self.changed_acp = norm_cand
|
||||
.iter()
|
||||
.chain(pre_candidates.iter().map(|e| e.as_ref()))
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_ACP));
|
||||
}
|
||||
if !self.changed_oauth2 {
|
||||
self.changed_oauth2 = norm_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS));
|
||||
}
|
||||
if !self.changed_domain {
|
||||
self.changed_domain = norm_cand
|
||||
.iter()
|
||||
.any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO));
|
||||
}
|
||||
self.changed_uuid.extend(
|
||||
norm_cand
|
||||
.iter()
|
||||
.map(|e| e.get_uuid())
|
||||
.chain(pre_candidates.iter().map(|e| e.get_uuid())),
|
||||
);
|
||||
trace!(
|
||||
schema_reload = ?self.changed_schema,
|
||||
acp_reload = ?self.changed_acp,
|
||||
oauth2_reload = ?self.changed_oauth2,
|
||||
domain_reload = ?self.changed_domain,
|
||||
);
|
||||
|
||||
trace!("Modify operation success");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn internal_modify(
|
||||
&mut self,
|
||||
filter: &Filter<FilterInvalid>,
|
||||
modlist: &ModifyList<ModifyInvalid>,
|
||||
) -> Result<(), OperationError> {
|
||||
let f_valid = filter
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let m_valid = modlist
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let me = ModifyEvent::new_internal(f_valid, m_valid);
|
||||
self.modify(&me)
|
||||
}
|
||||
|
||||
pub fn internal_modify_uuid(
|
||||
&mut self,
|
||||
target_uuid: Uuid,
|
||||
modlist: &ModifyList<ModifyInvalid>,
|
||||
) -> Result<(), OperationError> {
|
||||
let filter = filter!(f_eq("uuid", PartialValue::Uuid(target_uuid)));
|
||||
let f_valid = filter
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let m_valid = modlist
|
||||
.validate(self.get_schema())
|
||||
.map_err(OperationError::SchemaViolation)?;
|
||||
let me = ModifyEvent::new_internal(f_valid, m_valid);
|
||||
self.modify(&me)
|
||||
}
|
||||
|
||||
pub fn impersonate_modify_valid(
|
||||
&mut self,
|
||||
f_valid: Filter<FilterValid>,
|
||||
f_intent_valid: Filter<FilterValid>,
|
||||
m_valid: ModifyList<ModifyValid>,
|
||||
event: &Identity,
|
||||
) -> Result<(), OperationError> {
|
||||
let me = ModifyEvent::new_impersonate(event, f_valid, f_intent_valid, m_valid);
|
||||
self.modify(&me)
|
||||
}
|
||||
|
||||
pub fn impersonate_modify(
|
||||
&mut self,
|
||||
filter: &Filter<FilterInvalid>,
|
||||
filter_intent: &Filter<FilterInvalid>,
|
||||
modlist: &ModifyList<ModifyInvalid>,
|
||||
event: &Identity,
|
||||
) -> Result<(), OperationError> {
|
||||
let f_valid = filter.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("filter Schema Invalid {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
let f_intent_valid = filter_intent.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("f_intent Schema Invalid {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
let m_valid = modlist.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("modlist Schema Invalid {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
self.impersonate_modify_valid(f_valid, f_intent_valid, m_valid, event)
|
||||
}
|
||||
|
||||
pub fn impersonate_modify_gen_event(
|
||||
&mut self,
|
||||
filter: &Filter<FilterInvalid>,
|
||||
filter_intent: &Filter<FilterInvalid>,
|
||||
modlist: &ModifyList<ModifyInvalid>,
|
||||
event: &Identity,
|
||||
) -> Result<ModifyEvent, OperationError> {
|
||||
let f_valid = filter.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("filter Schema Invalid {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
let f_intent_valid = filter_intent.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("f_intent Schema Invalid {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
let m_valid = modlist.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("modlist Schema Invalid {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
Ok(ModifyEvent::new_impersonate(
|
||||
event,
|
||||
f_valid,
|
||||
f_intent_valid,
|
||||
m_valid,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::credential::policy::CryptoPolicy;
|
||||
use crate::credential::Credential;
|
||||
use crate::prelude::*;
|
||||
|
||||
#[qs_test]
|
||||
async fn test_modify(server: &QueryServer) {
|
||||
// Create an object
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
|
||||
let e2 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson2")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63932"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson2")),
|
||||
("displayname", Value::new_utf8s("testperson2"))
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e1.clone(), e2.clone()]);
|
||||
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
// Empty Modlist (filter is valid)
|
||||
let me_emp = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_pres("class")),
|
||||
ModifyList::new_list(vec![]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_emp) == Err(OperationError::EmptyRequest));
|
||||
|
||||
// Mod changes no objects
|
||||
let me_nochg = unsafe {
|
||||
ModifyEvent::new_impersonate_entry_ser(
|
||||
JSON_ADMIN_V1,
|
||||
filter!(f_eq("name", PartialValue::new_iname("flarbalgarble"))),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("description"),
|
||||
Value::from("anusaosu"),
|
||||
)]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_nochg) == Err(OperationError::NoMatchingEntries));
|
||||
|
||||
// Filter is invalid to schema - to check this due to changes in the way events are
|
||||
// handled, we put this via the internal modify function to get the modlist
|
||||
// checked for us. Normal server operation doesn't allow weird bypasses like
|
||||
// this.
|
||||
let r_inv_1 = server_txn.internal_modify(
|
||||
&filter!(f_eq("tnanuanou", PartialValue::new_iname("Flarbalgarble"))),
|
||||
&ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("description"),
|
||||
Value::from("anusaosu"),
|
||||
)]),
|
||||
);
|
||||
assert!(
|
||||
r_inv_1
|
||||
== Err(OperationError::SchemaViolation(
|
||||
SchemaError::InvalidAttribute("tnanuanou".to_string())
|
||||
))
|
||||
);
|
||||
|
||||
// Mod is invalid to schema
|
||||
let me_inv_m = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_pres("class")),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("htnaonu"),
|
||||
Value::from("anusaosu"),
|
||||
)]),
|
||||
)
|
||||
};
|
||||
assert!(
|
||||
server_txn.modify(&me_inv_m)
|
||||
== Err(OperationError::SchemaViolation(
|
||||
SchemaError::InvalidAttribute("htnaonu".to_string())
|
||||
))
|
||||
);
|
||||
|
||||
// Mod single object
|
||||
let me_sin = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("name", PartialValue::new_iname("testperson2"))),
|
||||
ModifyList::new_list(vec![
|
||||
Modify::Purged(AttrString::from("description")),
|
||||
Modify::Present(AttrString::from("description"), Value::from("anusaosu")),
|
||||
]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_sin).is_ok());
|
||||
|
||||
// Mod multiple object
|
||||
let me_mult = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_or!([
|
||||
f_eq("name", PartialValue::new_iname("testperson1")),
|
||||
f_eq("name", PartialValue::new_iname("testperson2")),
|
||||
])),
|
||||
ModifyList::new_list(vec![
|
||||
Modify::Purged(AttrString::from("description")),
|
||||
Modify::Present(AttrString::from("description"), Value::from("anusaosu")),
|
||||
]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_mult).is_ok());
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_modify_assert(server: &QueryServer) {
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
|
||||
let t_uuid = Uuid::new_v4();
|
||||
let r_uuid = Uuid::new_v4();
|
||||
|
||||
assert!(server_txn
|
||||
.internal_create(vec![entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("uuid", Value::Uuid(t_uuid))
|
||||
),])
|
||||
.is_ok());
|
||||
|
||||
// This assertion will FAIL
|
||||
assert!(matches!(
|
||||
server_txn.internal_modify_uuid(
|
||||
t_uuid,
|
||||
&ModifyList::new_list(vec![
|
||||
m_assert("uuid", &PartialValue::Uuid(r_uuid)),
|
||||
m_pres("description", &Value::Utf8("test".into()))
|
||||
])
|
||||
),
|
||||
Err(OperationError::ModifyAssertionFailed)
|
||||
));
|
||||
|
||||
// This assertion will PASS
|
||||
assert!(server_txn
|
||||
.internal_modify_uuid(
|
||||
t_uuid,
|
||||
&ModifyList::new_list(vec![
|
||||
m_assert("uuid", &PartialValue::Uuid(t_uuid)),
|
||||
m_pres("description", &Value::Utf8("test".into()))
|
||||
])
|
||||
)
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_modify_invalid_class(server: &QueryServer) {
|
||||
// Test modifying an entry and adding an extra class, that would cause the entry
|
||||
// to no longer conform to schema.
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e1.clone()]);
|
||||
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
// Add class but no values
|
||||
let me_sin = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("class"),
|
||||
Value::new_class("system_info"),
|
||||
)]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_sin).is_err());
|
||||
|
||||
// Add multivalue where not valid
|
||||
let me_sin = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("name"),
|
||||
Value::new_iname("testpersonx"),
|
||||
)]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_sin).is_err());
|
||||
|
||||
// add class and valid values?
|
||||
let me_sin = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
ModifyList::new_list(vec![
|
||||
Modify::Present(AttrString::from("class"), Value::new_class("system_info")),
|
||||
// Modify::Present("domain".to_string(), Value::new_iutf8("domain.name")),
|
||||
Modify::Present(AttrString::from("version"), Value::new_uint32(1)),
|
||||
]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_sin).is_ok());
|
||||
|
||||
// Replace a value
|
||||
let me_sin = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
ModifyList::new_list(vec![
|
||||
Modify::Purged(AttrString::from("name")),
|
||||
Modify::Present(AttrString::from("name"), Value::new_iname("testpersonx")),
|
||||
]),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.modify(&me_sin).is_ok());
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_modify_password_only(server: &QueryServer) {
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("class", Value::new_class("account")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
// Add the entry. Today we have no syntax to take simple str to a credential
|
||||
// but honestly, that's probably okay :)
|
||||
let ce = CreateEvent::new_internal(vec![e1]);
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
// Build the credential.
|
||||
let p = CryptoPolicy::minimum();
|
||||
let cred = Credential::new_password_only(&p, "test_password").unwrap();
|
||||
let v_cred = Value::new_credential("primary", cred);
|
||||
assert!(v_cred.validate());
|
||||
|
||||
// now modify and provide a primary credential.
|
||||
let me_inv_m = unsafe {
|
||||
ModifyEvent::new_internal_invalid(
|
||||
filter!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("primary_credential"),
|
||||
v_cred,
|
||||
)]),
|
||||
)
|
||||
};
|
||||
// go!
|
||||
assert!(server_txn.modify(&me_inv_m).is_ok());
|
||||
|
||||
// assert it exists and the password checks out
|
||||
let test_ent = server_txn
|
||||
.internal_search_uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
.expect("failed");
|
||||
// get the primary ava
|
||||
let cred_ref = test_ent
|
||||
.get_ava_single_credential("primary_credential")
|
||||
.expect("Failed");
|
||||
// do a pw check.
|
||||
assert!(cred_ref.verify_password("test_password").unwrap());
|
||||
}
|
||||
}
|
||||
|
|
856
kanidmd/lib/src/server/recycle.rs
Normal file
856
kanidmd/lib/src/server/recycle.rs
Normal file
|
@ -0,0 +1,856 @@
|
|||
use super::modify::ModifyPartial;
|
||||
use crate::event::ReviveRecycledEvent;
|
||||
use crate::prelude::*;
|
||||
use crate::server::Plugins;
|
||||
use hashbrown::HashMap;
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn purge_tombstones(&mut self) -> Result<(), OperationError> {
|
||||
// purge everything that is a tombstone.
|
||||
let cid = self.cid.sub_secs(CHANGELOG_MAX_AGE).map_err(|e| {
|
||||
admin_error!("Unable to generate search cid {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Delete them - this is a TRUE delete, no going back now!
|
||||
self.be_txn
|
||||
.reap_tombstones(&cid)
|
||||
.map_err(|e| {
|
||||
admin_error!(err = ?e, "Tombstone purge operation failed (backend)");
|
||||
e
|
||||
})
|
||||
.map(|_| {
|
||||
admin_info!("Tombstone purge operation success");
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn purge_recycled(&mut self) -> Result<(), OperationError> {
|
||||
// Send everything that is recycled to tombstone
|
||||
// Search all recycled
|
||||
let cid = self.cid.sub_secs(RECYCLEBIN_MAX_AGE).map_err(|e| {
|
||||
admin_error!(err = ?e, "Unable to generate search cid");
|
||||
e
|
||||
})?;
|
||||
let rc = self.internal_search(filter_all!(f_and!([
|
||||
f_eq("class", PVCLASS_RECYCLED.clone()),
|
||||
f_lt("last_modified_cid", PartialValue::new_cid(cid)),
|
||||
])))?;
|
||||
|
||||
if rc.is_empty() {
|
||||
admin_info!("No recycled present - purge operation success");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Modify them to strip all avas except uuid
|
||||
let tombstone_cand: Result<Vec<_>, _> = rc
|
||||
.iter()
|
||||
.map(|e| {
|
||||
e.to_tombstone(self.cid.clone())
|
||||
.validate(&self.schema)
|
||||
.map_err(|e| {
|
||||
admin_error!("Schema Violation in purge_recycled validate: {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})
|
||||
// seal if it worked.
|
||||
.map(|e| e.seal(&self.schema))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let tombstone_cand = tombstone_cand?;
|
||||
|
||||
// Backend Modify
|
||||
self.be_txn
|
||||
.modify(&self.cid, &rc, &tombstone_cand)
|
||||
.map_err(|e| {
|
||||
admin_error!("Purge recycled operation failed (backend), {:?}", e);
|
||||
e
|
||||
})
|
||||
.map(|_| {
|
||||
admin_info!("Purge recycled operation success");
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn revive_recycled(&mut self, re: &ReviveRecycledEvent) -> Result<(), OperationError> {
|
||||
// Revive an entry to live. This is a specialised function, and draws a lot of
|
||||
// inspiration from modify.
|
||||
//
|
||||
// Access is granted by the ability to ability to search the class=recycled
|
||||
// and the ability modify + remove that class from the object.
|
||||
if !re.ident.is_internal() {
|
||||
security_info!(name = %re.ident, "revive initiator");
|
||||
}
|
||||
|
||||
// Get the list of pre_candidates, using impersonate search.
|
||||
let pre_candidates =
|
||||
self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?;
|
||||
|
||||
// Is the list empty?
|
||||
if pre_candidates.is_empty() {
|
||||
if re.ident.is_internal() {
|
||||
trace!(
|
||||
"revive: no candidates match filter ... continuing {:?}",
|
||||
re.filter
|
||||
);
|
||||
return Ok(());
|
||||
} else {
|
||||
request_error!(
|
||||
"revive: no candidates match filter, failure {:?}",
|
||||
re.filter
|
||||
);
|
||||
return Err(OperationError::NoMatchingEntries);
|
||||
}
|
||||
};
|
||||
|
||||
trace!("revive: pre_candidates -> {:?}", pre_candidates);
|
||||
|
||||
// Check access against a "fake" modify.
|
||||
let modlist = ModifyList::new_list(vec![Modify::Removed(
|
||||
AttrString::from("class"),
|
||||
PVCLASS_RECYCLED.clone(),
|
||||
)]);
|
||||
|
||||
let m_valid = modlist.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!("revive recycled modlist Schema Violation {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
|
||||
let me =
|
||||
ModifyEvent::new_impersonate(&re.ident, re.filter.clone(), re.filter.clone(), m_valid);
|
||||
|
||||
let access = self.get_accesscontrols();
|
||||
let op_allow = access
|
||||
.modify_allow_operation(&me, &pre_candidates)
|
||||
.map_err(|e| {
|
||||
admin_error!("Unable to check modify access {:?}", e);
|
||||
e
|
||||
})?;
|
||||
if !op_allow {
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Are all of the entries actually recycled?
|
||||
if pre_candidates.iter().all(|e| e.mask_recycled().is_some()) {
|
||||
admin_warn!("Refusing to revive entries that are already live!");
|
||||
return Err(OperationError::AccessDenied);
|
||||
}
|
||||
|
||||
// Build the list of mods from directmo, to revive memberships.
|
||||
let mut dm_mods: HashMap<Uuid, ModifyList<ModifyInvalid>> =
|
||||
HashMap::with_capacity(pre_candidates.len());
|
||||
|
||||
for e in &pre_candidates {
|
||||
// Get this entries uuid.
|
||||
let u: Uuid = e.get_uuid();
|
||||
|
||||
if let Some(riter) = e.get_ava_as_refuuid("directmemberof") {
|
||||
for g_uuid in riter {
|
||||
dm_mods
|
||||
.entry(g_uuid)
|
||||
.and_modify(|mlist| {
|
||||
let m = Modify::Present(AttrString::from("member"), Value::Refer(u));
|
||||
mlist.push_mod(m);
|
||||
})
|
||||
.or_insert({
|
||||
let m = Modify::Present(AttrString::from("member"), Value::Refer(u));
|
||||
ModifyList::new_list(vec![m])
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clone the writeable entries.
|
||||
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
|
||||
.iter()
|
||||
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
|
||||
// Mutate to apply the revive.
|
||||
.map(|er| er.to_revived())
|
||||
.collect();
|
||||
|
||||
// Are they all revived?
|
||||
if candidates.iter().all(|e| e.mask_recycled().is_none()) {
|
||||
admin_error!("Not all candidates were correctly revived, unable to proceed");
|
||||
return Err(OperationError::InvalidEntryState);
|
||||
}
|
||||
|
||||
// Do we need to apply pre-mod?
|
||||
// Very likely, incase domain has renamed etc.
|
||||
Plugins::run_pre_modify(self, &mut candidates, &me).map_err(|e| {
|
||||
admin_error!("Revive operation failed (plugin), {:?}", e);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Schema validate
|
||||
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates
|
||||
.into_iter()
|
||||
.map(|e| {
|
||||
e.validate(&self.schema)
|
||||
.map_err(|e| {
|
||||
admin_error!("Schema Violation {:?}", e);
|
||||
OperationError::SchemaViolation(e)
|
||||
})
|
||||
.map(|e| e.seal(&self.schema))
|
||||
})
|
||||
.collect();
|
||||
|
||||
let norm_cand: Vec<Entry<_, _>> = res?;
|
||||
|
||||
// build the mod partial
|
||||
let mp = ModifyPartial {
|
||||
norm_cand,
|
||||
pre_candidates,
|
||||
me: &me,
|
||||
};
|
||||
|
||||
// Call modify_apply
|
||||
self.modify_apply(mp)?;
|
||||
|
||||
// If and only if that succeeds, apply the direct membership modifications
|
||||
// if possible.
|
||||
for (g, mods) in dm_mods {
|
||||
// I think the filter/filter_all shouldn't matter here because the only
|
||||
// valid direct memberships should be still valid/live references, as refint
|
||||
// removes anything that was deleted even from recycled entries.
|
||||
let f = filter_all!(f_eq("uuid", PartialValue::Uuid(g)));
|
||||
self.internal_modify(&f, &mods)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/*
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub(crate) fn revive_recycled_legacy(
|
||||
&mut self,
|
||||
re: &ReviveRecycledEvent,
|
||||
) -> Result<(), OperationError> {
|
||||
// Revive an entry to live. This is a specialised function, and draws a lot of
|
||||
// inspiration from modify.
|
||||
//
|
||||
//
|
||||
// Access is granted by the ability to ability to search the class=recycled
|
||||
// and the ability modify + remove that class from the object.
|
||||
|
||||
// create the modify for access testing.
|
||||
// tl;dr, remove the class=recycled
|
||||
let modlist = ModifyList::new_list(vec![Modify::Removed(
|
||||
AttrString::from("class"),
|
||||
PVCLASS_RECYCLED.clone(),
|
||||
)]);
|
||||
|
||||
let m_valid = modlist.validate(self.get_schema()).map_err(|e| {
|
||||
admin_error!(
|
||||
"Schema Violation in revive recycled modlist validate: {:?}",
|
||||
e
|
||||
);
|
||||
OperationError::SchemaViolation(e)
|
||||
})?;
|
||||
|
||||
// Get the entries we are about to revive.
|
||||
// we make a set of per-entry mod lists. A list of lists even ...
|
||||
let revive_cands =
|
||||
self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?;
|
||||
|
||||
let mut dm_mods: HashMap<Uuid, ModifyList<ModifyInvalid>> =
|
||||
HashMap::with_capacity(revive_cands.len());
|
||||
|
||||
for e in revive_cands {
|
||||
// Get this entries uuid.
|
||||
let u: Uuid = e.get_uuid();
|
||||
|
||||
if let Some(riter) = e.get_ava_as_refuuid("directmemberof") {
|
||||
for g_uuid in riter {
|
||||
dm_mods
|
||||
.entry(g_uuid)
|
||||
.and_modify(|mlist| {
|
||||
let m = Modify::Present(AttrString::from("member"), Value::Refer(u));
|
||||
mlist.push_mod(m);
|
||||
})
|
||||
.or_insert({
|
||||
let m = Modify::Present(AttrString::from("member"), Value::Refer(u));
|
||||
ModifyList::new_list(vec![m])
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now impersonate the modify
|
||||
self.impersonate_modify_valid(re.filter.clone(), re.filter.clone(), m_valid, &re.ident)?;
|
||||
// If and only if that succeeds, apply the direct membership modifications
|
||||
// if possible.
|
||||
for (g, mods) in dm_mods {
|
||||
// I think the filter/filter_all shouldn't matter here because the only
|
||||
// valid direct memberships should be still valid/live references.
|
||||
let f = filter_all!(f_eq("uuid", PartialValue::Uuid(g)));
|
||||
self.internal_modify(&f, &mods)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::prelude::*;
|
||||
|
||||
use crate::event::{CreateEvent, DeleteEvent};
|
||||
use crate::server::ModifyEvent;
|
||||
use crate::server::SearchEvent;
|
||||
|
||||
use super::ReviveRecycledEvent;
|
||||
|
||||
#[qs_test]
|
||||
async fn test_recycle_simple(server: &QueryServer) {
|
||||
// First we setup some timestamps
|
||||
let time_p1 = duration_from_epoch_now();
|
||||
let time_p2 = time_p1 + Duration::from_secs(RECYCLEBIN_MAX_AGE * 2);
|
||||
|
||||
let mut server_txn = server.write(time_p1).await;
|
||||
let admin = server_txn.internal_search_uuid(UUID_ADMIN).expect("failed");
|
||||
|
||||
let filt_i_rc = filter_all!(f_eq("class", PartialValue::new_class("recycled")));
|
||||
|
||||
let filt_i_ts = filter_all!(f_eq("class", PartialValue::new_class("tombstone")));
|
||||
|
||||
let filt_i_per = filter_all!(f_eq("class", PartialValue::new_class("person")));
|
||||
|
||||
// Create fake external requests. Probably from admin later
|
||||
let me_rc = unsafe {
|
||||
ModifyEvent::new_impersonate_entry(
|
||||
admin.clone(),
|
||||
filt_i_rc.clone(),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("class"),
|
||||
Value::new_class("recycled"),
|
||||
)]),
|
||||
)
|
||||
};
|
||||
|
||||
let de_rc = unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_rc.clone()) };
|
||||
|
||||
let se_rc =
|
||||
unsafe { SearchEvent::new_ext_impersonate_entry(admin.clone(), filt_i_rc.clone()) };
|
||||
|
||||
let sre_rc =
|
||||
unsafe { SearchEvent::new_rec_impersonate_entry(admin.clone(), filt_i_rc.clone()) };
|
||||
|
||||
let rre_rc = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin,
|
||||
filter_all!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
)
|
||||
};
|
||||
|
||||
// Create some recycled objects
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
|
||||
let e2 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson2")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63932"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson2")),
|
||||
("displayname", Value::new_utf8s("testperson2"))
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e1, e2]);
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
// Now we immediately delete these to force them to the correct state.
|
||||
let de_sin = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_or!([
|
||||
f_eq("name", PartialValue::new_iname("testperson1")),
|
||||
f_eq("name", PartialValue::new_iname("testperson2")),
|
||||
])))
|
||||
};
|
||||
assert!(server_txn.delete(&de_sin).is_ok());
|
||||
|
||||
// Can it be seen (external search)
|
||||
let r1 = server_txn.search(&se_rc).expect("search failed");
|
||||
assert!(r1.is_empty());
|
||||
|
||||
// Can it be deleted (external delete)
|
||||
// Should be err-no candidates.
|
||||
assert!(server_txn.delete(&de_rc).is_err());
|
||||
|
||||
// Can it be modified? (external modify)
|
||||
// Should be err-no candidates
|
||||
assert!(server_txn.modify(&me_rc).is_err());
|
||||
|
||||
// Can in be seen by special search? (external recycle search)
|
||||
let r2 = server_txn.search(&sre_rc).expect("search failed");
|
||||
assert!(r2.len() == 2);
|
||||
|
||||
// Can it be seen (internal search)
|
||||
// Internal search should see it.
|
||||
let r2 = server_txn
|
||||
.internal_search(filt_i_rc.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r2.len() == 2);
|
||||
|
||||
// There are now two paths forward
|
||||
// revival or purge!
|
||||
assert!(server_txn.revive_recycled(&rre_rc).is_ok());
|
||||
|
||||
// Not enough time has passed, won't have an effect for purge to TS
|
||||
assert!(server_txn.purge_recycled().is_ok());
|
||||
let r3 = server_txn
|
||||
.internal_search(filt_i_rc.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r3.len() == 1);
|
||||
|
||||
// Commit
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
// Now, establish enough time for the recycled items to be purged.
|
||||
let mut server_txn = server.write(time_p2).await;
|
||||
|
||||
// purge to tombstone, now that time has passed.
|
||||
assert!(server_txn.purge_recycled().is_ok());
|
||||
|
||||
// Should be no recycled objects.
|
||||
let r4 = server_txn
|
||||
.internal_search(filt_i_rc.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r4.is_empty());
|
||||
|
||||
// There should be one tombstone
|
||||
let r5 = server_txn
|
||||
.internal_search(filt_i_ts.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r5.len() == 1);
|
||||
|
||||
// There should be one entry
|
||||
let r6 = server_txn
|
||||
.internal_search(filt_i_per.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r6.len() == 1);
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
|
||||
// The delete test above should be unaffected by recycle anyway
|
||||
#[qs_test]
|
||||
async fn test_qs_recycle_advanced(server: &QueryServer) {
|
||||
// Create items
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
let admin = server_txn.internal_search_uuid(UUID_ADMIN).expect("failed");
|
||||
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
let ce = CreateEvent::new_internal(vec![e1]);
|
||||
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
// Delete and ensure they became recycled.
|
||||
let de_sin = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_eq(
|
||||
"name",
|
||||
PartialValue::new_iname("testperson1")
|
||||
)))
|
||||
};
|
||||
assert!(server_txn.delete(&de_sin).is_ok());
|
||||
// Can in be seen by special search? (external recycle search)
|
||||
let filt_rc = filter_all!(f_eq("class", PartialValue::new_class("recycled")));
|
||||
let sre_rc = unsafe { SearchEvent::new_rec_impersonate_entry(admin, filt_rc.clone()) };
|
||||
let r2 = server_txn.search(&sre_rc).expect("search failed");
|
||||
assert!(r2.len() == 1);
|
||||
|
||||
// Create dup uuid (rej)
|
||||
// After a delete -> recycle, create duplicate name etc.
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_err());
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_uuid_to_star_recycle(server: &QueryServer) {
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
|
||||
let e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("class", Value::new_class("account")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
|
||||
let tuuid = uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930");
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e1]);
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
assert!(server_txn.uuid_to_rdn(tuuid) == Ok("spn=testperson1@example.com".to_string()));
|
||||
|
||||
assert!(
|
||||
server_txn.uuid_to_spn(tuuid)
|
||||
== Ok(Some(Value::new_spn_str("testperson1", "example.com")))
|
||||
);
|
||||
|
||||
assert!(server_txn.name_to_uuid("testperson1") == Ok(tuuid));
|
||||
|
||||
// delete
|
||||
let de_sin = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_eq(
|
||||
"name",
|
||||
PartialValue::new_iname("testperson1")
|
||||
)))
|
||||
};
|
||||
assert!(server_txn.delete(&de_sin).is_ok());
|
||||
|
||||
// all should fail
|
||||
assert!(
|
||||
server_txn.uuid_to_rdn(tuuid)
|
||||
== Ok("uuid=cc8e95b4-c24f-4d68-ba54-8bed76f63930".to_string())
|
||||
);
|
||||
|
||||
assert!(server_txn.uuid_to_spn(tuuid) == Ok(None));
|
||||
|
||||
assert!(server_txn.name_to_uuid("testperson1").is_err());
|
||||
|
||||
// revive
|
||||
let admin = server_txn.internal_search_uuid(UUID_ADMIN).expect("failed");
|
||||
let rre_rc = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin,
|
||||
filter_all!(f_eq("name", PartialValue::new_iname("testperson1"))),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.revive_recycled(&rre_rc).is_ok());
|
||||
|
||||
// all checks pass
|
||||
|
||||
assert!(server_txn.uuid_to_rdn(tuuid) == Ok("spn=testperson1@example.com".to_string()));
|
||||
|
||||
assert!(
|
||||
server_txn.uuid_to_spn(tuuid)
|
||||
== Ok(Some(Value::new_spn_str("testperson1", "example.com")))
|
||||
);
|
||||
|
||||
assert!(server_txn.name_to_uuid("testperson1") == Ok(tuuid));
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_tombstone(server: &QueryServer) {
|
||||
// First we setup some timestamps
|
||||
let time_p1 = duration_from_epoch_now();
|
||||
let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
|
||||
let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
|
||||
|
||||
let mut server_txn = server.write(time_p1).await;
|
||||
let admin = server_txn.internal_search_uuid(UUID_ADMIN).expect("failed");
|
||||
|
||||
let filt_i_ts = filter_all!(f_eq("class", PartialValue::new_class("tombstone")));
|
||||
|
||||
// Create fake external requests. Probably from admin later
|
||||
// Should we do this with impersonate instead of using the external
|
||||
let me_ts = unsafe {
|
||||
ModifyEvent::new_impersonate_entry(
|
||||
admin.clone(),
|
||||
filt_i_ts.clone(),
|
||||
ModifyList::new_list(vec![Modify::Present(
|
||||
AttrString::from("class"),
|
||||
Value::new_class("tombstone"),
|
||||
)]),
|
||||
)
|
||||
};
|
||||
|
||||
let de_ts = unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_ts.clone()) };
|
||||
let se_ts = unsafe { SearchEvent::new_ext_impersonate_entry(admin, filt_i_ts.clone()) };
|
||||
|
||||
// First, create an entry, then push it through the lifecycle.
|
||||
let e_ts = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname("testperson1")),
|
||||
(
|
||||
"uuid",
|
||||
Value::Uuid(uuid!("9557f49c-97a5-4277-a9a5-097d17eb8317"))
|
||||
),
|
||||
("description", Value::new_utf8s("testperson1")),
|
||||
("displayname", Value::new_utf8s("testperson1"))
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![e_ts]);
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
let de_sin = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq(
|
||||
"name",
|
||||
PartialValue::new_iname("testperson1")
|
||||
)])))
|
||||
};
|
||||
assert!(server_txn.delete(&de_sin).is_ok());
|
||||
|
||||
// Commit
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
// Now, establish enough time for the recycled items to be purged.
|
||||
let mut server_txn = server.write(time_p2).await;
|
||||
assert!(server_txn.purge_recycled().is_ok());
|
||||
|
||||
// Now test the tombstone properties.
|
||||
|
||||
// Can it be seen (external search)
|
||||
let r1 = server_txn.search(&se_ts).expect("search failed");
|
||||
assert!(r1.is_empty());
|
||||
|
||||
// Can it be deleted (external delete)
|
||||
// Should be err-no candidates.
|
||||
assert!(server_txn.delete(&de_ts).is_err());
|
||||
|
||||
// Can it be modified? (external modify)
|
||||
// Should be err-no candidates
|
||||
assert!(server_txn.modify(&me_ts).is_err());
|
||||
|
||||
// Can it be seen (internal search)
|
||||
// Internal search should see it.
|
||||
let r2 = server_txn
|
||||
.internal_search(filt_i_ts.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r2.len() == 1);
|
||||
|
||||
// If we purge now, nothing happens, we aren't past the time window.
|
||||
assert!(server_txn.purge_tombstones().is_ok());
|
||||
|
||||
let r3 = server_txn
|
||||
.internal_search(filt_i_ts.clone())
|
||||
.expect("internal search failed");
|
||||
assert!(r3.len() == 1);
|
||||
|
||||
// Commit
|
||||
assert!(server_txn.commit().is_ok());
|
||||
|
||||
// New txn, push the cid forward.
|
||||
let mut server_txn = server.write(time_p3).await;
|
||||
|
||||
// Now purge
|
||||
assert!(server_txn.purge_tombstones().is_ok());
|
||||
|
||||
// Assert it's gone
|
||||
// Internal search should not see it.
|
||||
let r4 = server_txn
|
||||
.internal_search(filt_i_ts)
|
||||
.expect("internal search failed");
|
||||
assert!(r4.is_empty());
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
|
||||
fn create_user(name: &str, uuid: &str) -> Entry<EntryInit, EntryNew> {
|
||||
entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("person")),
|
||||
("name", Value::new_iname(name)),
|
||||
("uuid", Value::new_uuid_s(uuid).expect("uuid")),
|
||||
("description", Value::new_utf8s("testperson-entry")),
|
||||
("displayname", Value::new_utf8s(name))
|
||||
)
|
||||
}
|
||||
|
||||
fn create_group(name: &str, uuid: &str, members: &[&str]) -> Entry<EntryInit, EntryNew> {
|
||||
let mut e1 = entry_init!(
|
||||
("class", Value::new_class("object")),
|
||||
("class", Value::new_class("group")),
|
||||
("name", Value::new_iname(name)),
|
||||
("uuid", Value::new_uuid_s(uuid).expect("uuid")),
|
||||
("description", Value::new_utf8s("testgroup-entry"))
|
||||
);
|
||||
members
|
||||
.iter()
|
||||
.for_each(|m| e1.add_ava("member", Value::new_refer_s(m).unwrap()));
|
||||
e1
|
||||
}
|
||||
|
||||
fn check_entry_has_mo(qs: &mut QueryServerWriteTransaction, name: &str, mo: &str) -> bool {
|
||||
let e = qs
|
||||
.internal_search(filter!(f_eq("name", PartialValue::new_iname(name))))
|
||||
.unwrap()
|
||||
.pop()
|
||||
.unwrap();
|
||||
|
||||
e.attribute_equality("memberof", &PartialValue::new_refer_s(mo).unwrap())
|
||||
}
|
||||
|
||||
#[qs_test]
|
||||
async fn test_revive_advanced_directmemberships(server: &QueryServer) {
|
||||
// Create items
|
||||
let mut server_txn = server.write(duration_from_epoch_now()).await;
|
||||
let admin = server_txn.internal_search_uuid(UUID_ADMIN).expect("failed");
|
||||
|
||||
// Right need a user in a direct group.
|
||||
let u1 = create_user("u1", "22b47373-d123-421f-859e-9ddd8ab14a2a");
|
||||
let g1 = create_group(
|
||||
"g1",
|
||||
"cca2bbfc-5b43-43f3-be9e-f5b03b3defec",
|
||||
&["22b47373-d123-421f-859e-9ddd8ab14a2a"],
|
||||
);
|
||||
|
||||
// Need a user in A -> B -> User, such that A/B are re-adde as MO
|
||||
let u2 = create_user("u2", "5c19a4a2-b9f0-4429-b130-5782de5fddda");
|
||||
let g2a = create_group(
|
||||
"g2a",
|
||||
"e44cf9cd-9941-44cb-a02f-307b6e15ac54",
|
||||
&["5c19a4a2-b9f0-4429-b130-5782de5fddda"],
|
||||
);
|
||||
let g2b = create_group(
|
||||
"g2b",
|
||||
"d3132e6e-18ce-4b87-bee1-1d25e4bfe96d",
|
||||
&["e44cf9cd-9941-44cb-a02f-307b6e15ac54"],
|
||||
);
|
||||
|
||||
// Need a user in a group that is recycled after, then revived at the same time.
|
||||
let u3 = create_user("u3", "68467a41-6e8e-44d0-9214-a5164e75ca03");
|
||||
let g3 = create_group(
|
||||
"g3",
|
||||
"36048117-e479-45ed-aeb5-611e8d83d5b1",
|
||||
&["68467a41-6e8e-44d0-9214-a5164e75ca03"],
|
||||
);
|
||||
|
||||
// A user in a group that is recycled, user is revived, THEN the group is. Group
|
||||
// should be present in MO after the second revive.
|
||||
let u4 = create_user("u4", "d696b10f-1729-4f1a-83d0-ca06525c2f59");
|
||||
let g4 = create_group(
|
||||
"g4",
|
||||
"d5c59ac6-c533-4b00-989f-d0e183f07bab",
|
||||
&["d696b10f-1729-4f1a-83d0-ca06525c2f59"],
|
||||
);
|
||||
|
||||
let ce = CreateEvent::new_internal(vec![u1, g1, u2, g2a, g2b, u3, g3, u4, g4]);
|
||||
let cr = server_txn.create(&ce);
|
||||
assert!(cr.is_ok());
|
||||
|
||||
// Now recycle the needed entries.
|
||||
let de = unsafe {
|
||||
DeleteEvent::new_internal_invalid(filter!(f_or(vec![
|
||||
f_eq("name", PartialValue::new_iname("u1")),
|
||||
f_eq("name", PartialValue::new_iname("u2")),
|
||||
f_eq("name", PartialValue::new_iname("u3")),
|
||||
f_eq("name", PartialValue::new_iname("g3")),
|
||||
f_eq("name", PartialValue::new_iname("u4")),
|
||||
f_eq("name", PartialValue::new_iname("g4"))
|
||||
])))
|
||||
};
|
||||
assert!(server_txn.delete(&de).is_ok());
|
||||
|
||||
// Now revive and check each one, one at a time.
|
||||
let rev1 = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin.clone(),
|
||||
filter_all!(f_eq("name", PartialValue::new_iname("u1"))),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.revive_recycled(&rev1).is_ok());
|
||||
// check u1 contains MO ->
|
||||
assert!(check_entry_has_mo(
|
||||
&mut server_txn,
|
||||
"u1",
|
||||
"cca2bbfc-5b43-43f3-be9e-f5b03b3defec"
|
||||
));
|
||||
|
||||
// Revive u2 and check it has two mo.
|
||||
let rev2 = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin.clone(),
|
||||
filter_all!(f_eq("name", PartialValue::new_iname("u2"))),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.revive_recycled(&rev2).is_ok());
|
||||
assert!(check_entry_has_mo(
|
||||
&mut server_txn,
|
||||
"u2",
|
||||
"e44cf9cd-9941-44cb-a02f-307b6e15ac54"
|
||||
));
|
||||
assert!(check_entry_has_mo(
|
||||
&mut server_txn,
|
||||
"u2",
|
||||
"d3132e6e-18ce-4b87-bee1-1d25e4bfe96d"
|
||||
));
|
||||
|
||||
// Revive u3 and g3 at the same time.
|
||||
let rev3 = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin.clone(),
|
||||
filter_all!(f_or(vec![
|
||||
f_eq("name", PartialValue::new_iname("u3")),
|
||||
f_eq("name", PartialValue::new_iname("g3"))
|
||||
])),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.revive_recycled(&rev3).is_ok());
|
||||
assert!(
|
||||
check_entry_has_mo(
|
||||
&mut server_txn,
|
||||
"u3",
|
||||
"36048117-e479-45ed-aeb5-611e8d83d5b1"
|
||||
) == false
|
||||
);
|
||||
|
||||
// Revive u4, should NOT have the MO.
|
||||
let rev4a = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin.clone(),
|
||||
filter_all!(f_eq("name", PartialValue::new_iname("u4"))),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.revive_recycled(&rev4a).is_ok());
|
||||
assert!(
|
||||
check_entry_has_mo(
|
||||
&mut server_txn,
|
||||
"u4",
|
||||
"d5c59ac6-c533-4b00-989f-d0e183f07bab"
|
||||
) == false
|
||||
);
|
||||
|
||||
// Now revive g4, should allow MO onto u4.
|
||||
let rev4b = unsafe {
|
||||
ReviveRecycledEvent::new_impersonate_entry(
|
||||
admin,
|
||||
filter_all!(f_eq("name", PartialValue::new_iname("g4"))),
|
||||
)
|
||||
};
|
||||
assert!(server_txn.revive_recycled(&rev4b).is_ok());
|
||||
assert!(
|
||||
check_entry_has_mo(
|
||||
&mut server_txn,
|
||||
"u4",
|
||||
"d5c59ac6-c533-4b00-989f-d0e183f07bab"
|
||||
) == false
|
||||
);
|
||||
|
||||
assert!(server_txn.commit().is_ok());
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
|
|
@ -24,8 +24,8 @@ use webauthn_rs::prelude::{DeviceKey as DeviceKeyV4, Passkey as PasskeyV4};
|
|||
|
||||
use crate::be::dbentry::DbIdentSpn;
|
||||
use crate::credential::Credential;
|
||||
use crate::identity::{AccessScope, IdentityId};
|
||||
use crate::repl::cid::Cid;
|
||||
use crate::server::identity::{AccessScope, IdentityId};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref SPN_RE: Regex = {
|
||||
|
|
|
@ -6,7 +6,6 @@ use time::OffsetDateTime;
|
|||
use crate::be::dbvalue::{
|
||||
DbValueAccessScopeV1, DbValueIdentityId, DbValueOauth2Session, DbValueSession,
|
||||
};
|
||||
use crate::identity::{AccessScope, IdentityId};
|
||||
use crate::prelude::*;
|
||||
use crate::schema::SchemaAttribute;
|
||||
use crate::value::{Oauth2Session, Session};
|
||||
|
|
|
@ -310,7 +310,7 @@ pub fn doit(input: &Path, output: &Path) {
|
|||
});
|
||||
|
||||
// now collect these into the set of connections containing their operations.
|
||||
let connections: Vec<_> = connections.into_iter().map(|(_, v)| v).collect();
|
||||
let connections: Vec<_> = connections.into_values().collect();
|
||||
|
||||
// Now from the set of connections, we need to know what access may or may not
|
||||
// be required.
|
||||
|
|
Loading…
Reference in a new issue