From c4ecdf444775052dbd680689fe14741183940439 Mon Sep 17 00:00:00 2001 From: Firstyear Date: Mon, 24 Oct 2022 09:50:31 +1000 Subject: [PATCH] 20221022 improve test macros (#1139) --- Cargo.lock | 52 +- Cargo.toml | 11 +- kanidmd/core/Cargo.toml | 12 +- kanidmd/core/src/actors/v1_read.rs | 52 +- kanidmd/core/src/actors/v1_write.rs | 60 +- kanidmd/core/src/https/v1.rs | 7 +- kanidmd/core/src/lib.rs | 48 +- kanidmd/core/tests/https_middleware.rs | 80 - kanidmd/daemon/src/main.rs | 10 +- kanidmd/lib-macros/Cargo.toml | 13 + kanidmd/lib-macros/src/entry.rs | 99 + kanidmd/lib-macros/src/lib.rs | 28 + kanidmd/lib/Cargo.toml | 1 + kanidmd/lib/src/access.rs | 596 ++-- kanidmd/lib/src/filter.rs | 252 +- kanidmd/lib/src/idm/account.rs | 4 +- kanidmd/lib/src/idm/credupdatesession.rs | 8 +- kanidmd/lib/src/idm/oauth2.rs | 54 +- kanidmd/lib/src/idm/server.rs | 160 +- kanidmd/lib/src/idm/serviceaccount.rs | 8 +- kanidmd/lib/src/ldap.rs | 12 +- kanidmd/lib/src/lib.rs | 5 + kanidmd/lib/src/macros.rs | 84 +- kanidmd/lib/src/plugins/attrunique.rs | 6 +- kanidmd/lib/src/plugins/base.rs | 6 +- kanidmd/lib/src/plugins/domain.rs | 24 +- kanidmd/lib/src/plugins/dyngroup.rs | 6 +- kanidmd/lib/src/plugins/failure.rs | 6 - kanidmd/lib/src/plugins/gidnumber.rs | 4 +- kanidmd/lib/src/plugins/jwskeygen.rs | 4 +- kanidmd/lib/src/plugins/memberof.rs | 10 +- kanidmd/lib/src/plugins/mod.rs | 34 +- kanidmd/lib/src/plugins/password_import.rs | 4 +- kanidmd/lib/src/plugins/protected.rs | 6 +- kanidmd/lib/src/plugins/recycle.rs | 2 - kanidmd/lib/src/plugins/refint.rs | 18 +- kanidmd/lib/src/plugins/spn.rs | 68 +- kanidmd/lib/src/schema.rs | 124 +- kanidmd/lib/src/server.rs | 2569 ++++++++--------- kanidmd/lib/src/testkit.rs | 21 + kanidmd/testkit-macros/Cargo.toml | 14 + kanidmd/testkit-macros/src/entry.rs | 88 + kanidmd/testkit-macros/src/lib.rs | 23 + kanidmd/testkit/Cargo.toml | 41 + kanidmd/testkit/build.rs | 3 + .../tests/common.rs => testkit/src/lib.rs} | 20 +- .../tests/default_entries.rs | 63 +- kanidmd/testkit/tests/https_middleware.rs | 39 + .../{core => testkit}/tests/oauth2_test.rs | 9 +- .../{core => testkit}/tests/proto_v1_test.rs | 120 +- profiles/Cargo.toml | 2 + sketching/Cargo.toml | 2 + 52 files changed, 2577 insertions(+), 2415 deletions(-) delete mode 100644 kanidmd/core/tests/https_middleware.rs create mode 100644 kanidmd/lib-macros/Cargo.toml create mode 100644 kanidmd/lib-macros/src/entry.rs create mode 100644 kanidmd/lib-macros/src/lib.rs delete mode 100644 kanidmd/lib/src/plugins/failure.rs delete mode 100644 kanidmd/lib/src/plugins/recycle.rs create mode 100644 kanidmd/lib/src/testkit.rs create mode 100644 kanidmd/testkit-macros/Cargo.toml create mode 100644 kanidmd/testkit-macros/src/entry.rs create mode 100644 kanidmd/testkit-macros/src/lib.rs create mode 100644 kanidmd/testkit/Cargo.toml create mode 100644 kanidmd/testkit/build.rs rename kanidmd/{core/tests/common.rs => testkit/src/lib.rs} (81%) rename kanidmd/{core => testkit}/tests/default_entries.rs (94%) create mode 100644 kanidmd/testkit/tests/https_middleware.rs rename kanidmd/{core => testkit}/tests/oauth2_test.rs (98%) rename kanidmd/{core => testkit}/tests/proto_v1_test.rs (93%) diff --git a/Cargo.lock b/Cargo.lock index 76d0ee7ba..bb6cb1d0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2274,24 +2274,19 @@ dependencies = [ name = "kanidmd_core" version = "1.1.0-alpha.9" dependencies = [ - "async-std", "async-trait", "chrono", "compact_jwt", - "futures", "futures-util", "http-types", - "kanidm_client", "kanidm_proto", "kanidmd_lib", "ldap3_proto", "libc", - "oauth2", "openssl", "profiles", "rand 0.8.5", "regex", - "reqwest", "saffron", "serde", "serde_json", @@ -2304,9 +2299,7 @@ dependencies = [ "tokio-openssl", "tokio-util", "tracing", - "url", "uuid", - "webauthn-authenticator-rs", ] [[package]] @@ -2329,6 +2322,7 @@ dependencies = [ "hex", "idlset", "kanidm_proto", + "kanidmd_lib_macros", "lazy_static", "ldap3_proto", "libc", @@ -2367,6 +2361,37 @@ dependencies = [ "zxcvbn", ] +[[package]] +name = "kanidmd_lib_macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "kanidmd_testkit" +version = "1.1.0-alpha.9" +dependencies = [ + "compact_jwt", + "futures", + "kanidm_client", + "kanidm_proto", + "kanidmd_core", + "kanidmd_lib", + "oauth2", + "profiles", + "reqwest", + "serde_json", + "sketching", + "testkit-macros", + "tokio", + "tracing", + "url", + "webauthn-authenticator-rs", +] + [[package]] name = "kanidmd_web_ui" version = "1.1.0-alpha.9" @@ -3582,9 +3607,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.20.6" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -4157,6 +4182,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "testkit-macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "textwrap" version = "0.15.1" diff --git a/Cargo.toml b/Cargo.toml index 951e5364a..dbf8cdc36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,10 @@ members = [ "kanidmd_web_ui", "kanidmd/daemon", "kanidmd/lib", + "kanidmd/lib-macros", "kanidmd/core", + "kanidmd/testkit", + "kanidmd/testkit-macros", "orca", "profiles", "sketching" @@ -72,6 +75,8 @@ js-sys = "^0.3.58" kanidmd_core = { path = "./kanidmd/core" } kanidmd_idm = { path = "./kanidmd/idm" } kanidmd_lib = { path = "./kanidmd/lib" } +kanidmd_lib_macros = { path = "./kanidmd/lib-macros" } +kanidmd_testkit = { path = "./kanidmd/testkit" } kanidm_client = { path = "./kanidm_client" } kanidm_proto = { path = "./kanidm_proto" } kanidm_unix_int = { path = "./kanidm_unix_int" } @@ -89,8 +94,10 @@ oauth2_ext = { version = "^4.1.0", package = "oauth2" } openssl = "^0.10.41" paste = "^1.0.9" pkg-config = "^0.3.25" +proc-macro2 = "1.0.7" profiles = { path = "./profiles" } qrcode = "^0.12.0" +quote = "1" r2d2 = "^0.8.9" r2d2_sqlite = "^0.21.0" rand = "^0.8.5" @@ -108,12 +115,12 @@ sketching = { path = "./sketching" } smartstring = "^1.0.1" smolset = "^1.3.1" sshkeys = "^0.3.1" - +syn = { version = "1.0.56", features = ["full"] } +testkit-macros = { path = "./kanidmd/testkit-macros" } tide = "^0.16.0" # Including brotli *very* slow, so don't do that. Including the "default" feature pulls a mime-type list from the internet on build, which isn't used. tide-compress = { version="0.10.6", default-features = false, features = [ "gzip", "regex-check" ] } tide-openssl = "^0.1.1" - # Unable to increase version due to removing ability to detect # local platform time. time = "=0.2.27" diff --git a/kanidmd/core/Cargo.toml b/kanidmd/core/Cargo.toml index d489251bc..9663790d9 100644 --- a/kanidmd/core/Cargo.toml +++ b/kanidmd/core/Cargo.toml @@ -2,6 +2,7 @@ name = "kanidmd_core" description = "Kanidm Server Core and Library" documentation = "https://docs.rs/kanidm/latest/kanidm/" +autotests = false version.workspace = true authors.workspace = true @@ -12,7 +13,6 @@ homepage.workspace = true repository.workspace = true [dependencies] -async-std = { workspace = true, features = ["tokio1"] } async-trait.workspace = true chrono.workspace = true compact_jwt.workspace = true @@ -41,13 +41,3 @@ uuid = { workspace = true, features = ["serde", "v4" ] } [build-dependencies] profiles.workspace = true - -[dev-dependencies] -kanidm_client.workspace = true -futures.workspace = true - -webauthn-authenticator-rs.workspace = true -oauth2_ext = { workspace = true, default-features = false } - -url = { workspace = true, features = ["serde"] } -reqwest = { workspace = true, features=["cookies", "json", "native-tls"] } diff --git a/kanidmd/core/src/actors/v1_read.rs b/kanidmd/core/src/actors/v1_read.rs index 91dcbbb12..55813a136 100644 --- a/kanidmd/core/src/actors/v1_read.rs +++ b/kanidmd/core/src/actors/v1_read.rs @@ -77,7 +77,7 @@ impl QueryServerReadV1 { ) -> Result { // Begin a read let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -173,7 +173,7 @@ impl QueryServerReadV1 { // Scope to limit the read txn. { - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; idms_prox_read .qs_read .get_be_txn() @@ -286,7 +286,7 @@ impl QueryServerReadV1 { // TODO #62: Move this to IdmServer!!! // Begin a read let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; // Make an event from the whoami request. This will process the event and // generate a selfuuid search. // @@ -333,7 +333,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -376,7 +376,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -419,7 +419,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -479,7 +479,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_read = self.idms.proxy_read_async().await; + let mut idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -526,7 +526,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_read = self.idms.proxy_read_async().await; + let mut idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -573,7 +573,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_read = self.idms.proxy_read_async().await; + let mut idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -619,7 +619,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -682,7 +682,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -746,7 +746,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -778,7 +778,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -860,7 +860,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_read = self.idms.proxy_read_async().await; + let mut idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -906,7 +906,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_read = self.idms.proxy_read_async().await; + let mut idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -1112,7 +1112,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result, OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let ident = idms_prox_read .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -1162,7 +1162,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let (ident, uat) = idms_prox_read .validate_and_parse_uat(uat.as_deref(), ct) .and_then(|uat| { @@ -1191,7 +1191,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let (ident, uat) = idms_prox_read .validate_and_parse_uat(uat.as_deref(), ct) .and_then(|uat| { @@ -1219,7 +1219,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; let (ident, uat) = idms_prox_read .validate_and_parse_uat(uat.as_deref(), ct) .and_then(|uat| { @@ -1247,7 +1247,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; // Now we can send to the idm server for authorisation checking. idms_prox_read.check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct) } @@ -1264,7 +1264,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; // Now we can send to the idm server for introspection checking. idms_prox_read.check_oauth2_token_introspect(&client_authz, &intr_req, ct) } @@ -1281,7 +1281,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; idms_prox_read.oauth2_openid_userinfo(&client_id, &client_authz, ct) } @@ -1295,7 +1295,7 @@ impl QueryServerReadV1 { client_id: String, eventid: Uuid, ) -> Result { - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; idms_prox_read.oauth2_openid_discovery(&client_id) } @@ -1309,7 +1309,7 @@ impl QueryServerReadV1 { client_id: String, eventid: Uuid, ) -> Result { - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; idms_prox_read.oauth2_openid_publickey(&client_id) } @@ -1319,7 +1319,7 @@ impl QueryServerReadV1 { fields(uuid = ?eventid) )] pub async fn get_domain_display_name(&self, eventid: Uuid) -> String { - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; idms_prox_read.qs_read.get_domain_display_name().to_string() } @@ -1334,7 +1334,7 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_read = self.idms.proxy_read_async().await; + let idms_prox_read = self.idms.proxy_read().await; idms_prox_read .validate_and_parse_uat(uat.as_deref(), ct) diff --git a/kanidmd/core/src/actors/v1_write.rs b/kanidmd/core/src/actors/v1_write.rs index fe34a2117..b5213350f 100644 --- a/kanidmd/core/src/actors/v1_write.rs +++ b/kanidmd/core/src/actors/v1_write.rs @@ -58,7 +58,7 @@ impl QueryServerWriteV1 { proto_ml: &ProtoModifyList, filter: Filter, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -106,7 +106,7 @@ impl QueryServerWriteV1 { ml: &ModifyList, filter: Filter, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -160,7 +160,7 @@ impl QueryServerWriteV1 { req: CreateRequest, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -197,7 +197,7 @@ impl QueryServerWriteV1 { req: ModifyRequest, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -233,7 +233,7 @@ impl QueryServerWriteV1 { req: DeleteRequest, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -270,7 +270,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { // Given a protoEntry, turn this into a modification set. - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -311,7 +311,7 @@ impl QueryServerWriteV1 { filter: Filter, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -346,7 +346,7 @@ impl QueryServerWriteV1 { filter: Filter, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -382,7 +382,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -430,7 +430,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -472,7 +472,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -512,7 +512,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -550,7 +550,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; // We specifically need a uat here to assess the auth type! let (ident, uat) = idms_prox_write @@ -596,7 +596,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(CUSessionToken, CUStatus), OperationError> { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -645,7 +645,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -690,7 +690,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(CUSessionToken, CUStatus), OperationError> { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let intent_token = CredentialUpdateIntentToken { intent_id: intent_token.token, }; @@ -726,7 +726,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let session_token = CredentialUpdateSessionToken { token_enc: session_token.token, }; @@ -754,7 +754,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let session_token = CredentialUpdateSessionToken { token_enc: session_token.token, }; @@ -783,7 +783,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -815,7 +815,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -863,7 +863,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -914,7 +914,7 @@ impl QueryServerWriteV1 { filter: Filter, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -1130,7 +1130,7 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; let ident = idms_prox_write .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { @@ -1179,7 +1179,7 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { // Because this is from internal, we can generate a real modlist, rather // than relying on the proto ones. - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -1237,7 +1237,7 @@ impl QueryServerWriteV1 { filter: Filter, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -1293,7 +1293,7 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { // Because this is from internal, we can generate a real modlist, rather // than relying on the proto ones. - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -1351,7 +1351,7 @@ impl QueryServerWriteV1 { filter: Filter, eventid: Uuid, ) -> Result<(), OperationError> { - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let ct = duration_from_epoch_now(); let ident = idms_prox_write @@ -1400,7 +1400,7 @@ impl QueryServerWriteV1 { )] pub async fn handle_purgetombstoneevent(&self, msg: PurgeTombstoneEvent) { trace!(?msg, "Begin purge tombstone event"); - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let res = idms_prox_write .qs_write @@ -1418,7 +1418,7 @@ impl QueryServerWriteV1 { )] pub async fn handle_purgerecycledevent(&self, msg: PurgeRecycledEvent) { trace!(?msg, "Begin purge recycled event"); - let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; + let idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await; let res = idms_prox_write .qs_write .purge_recycled() @@ -1435,7 +1435,7 @@ impl QueryServerWriteV1 { trace!("Begin delayed action ..."); let ct = duration_from_epoch_now(); - let mut idms_prox_write = self.idms.proxy_write_async(ct).await; + let mut idms_prox_write = self.idms.proxy_write(ct).await; if let Err(res) = idms_prox_write .process_delayedaction(da) .and_then(|_| idms_prox_write.commit()) diff --git a/kanidmd/core/src/https/v1.rs b/kanidmd/core/src/https/v1.rs index cf969dcee..ab7d8659e 100644 --- a/kanidmd/core/src/https/v1.rs +++ b/kanidmd/core/src/https/v1.rs @@ -1,7 +1,6 @@ use std::str::FromStr; use std::time::Duration; -use async_std::task; use compact_jwt::Jws; use kanidm_proto::v1::{ AccountUnixExtend, ApiTokenGenerate, AuthRequest, AuthResponse, AuthState as ProtoAuthState, @@ -1068,12 +1067,8 @@ pub async fn auth(mut req: tide::Request) -> tide::Result { let AuthResult { state, sessionid, - delay, + delay: _, } = ar; - // If there is a delay, honour it now. - if let Some(delay_timer) = delay { - task::sleep(delay_timer).await; - } // Do some response/state management. match state { AuthState::Choose(allowed) => { diff --git a/kanidmd/core/src/lib.rs b/kanidmd/core/src/lib.rs index 98fb5c83c..cf3923395 100644 --- a/kanidmd/core/src/lib.rs +++ b/kanidmd/core/src/lib.rs @@ -34,7 +34,6 @@ mod ldaps; use std::sync::Arc; -use async_std::task; use compact_jwt::JwsSigner; use kanidm_proto::messages::{AccountChangeMessage, MessageStatus}; use kanidm_proto::v1::OperationError; @@ -96,7 +95,7 @@ fn setup_backend_vacuum( // outside of this call, then pass in "what we need" in a cloneable // form, this way we could have seperate Idm vs Qs threads, and dedicated // threads for write vs read -fn setup_qs_idms( +async fn setup_qs_idms( be: Backend, schema: Schema, config: &Configuration, @@ -112,7 +111,9 @@ fn setup_qs_idms( // Now search for the schema itself, and validate that the system // in memory matches the BE on disk, and that it's syntactically correct. // Write it out if changes are needed. - query_server.initialise_helper(duration_from_epoch_now())?; + query_server + .initialise_helper(duration_from_epoch_now()) + .await?; // We generate a SINGLE idms only! @@ -121,7 +122,7 @@ fn setup_qs_idms( Ok((query_server, idms, idms_delayed)) } -fn setup_qs( +async fn setup_qs( be: Backend, schema: Schema, config: &Configuration, @@ -137,7 +138,9 @@ fn setup_qs( // Now search for the schema itself, and validate that the system // in memory matches the BE on disk, and that it's syntactically correct. // Write it out if changes are needed. - query_server.initialise_helper(duration_from_epoch_now())?; + query_server + .initialise_helper(duration_from_epoch_now()) + .await?; Ok(query_server) } @@ -261,7 +264,7 @@ pub fn backup_server_core(config: &Configuration, dst_path: &str) { // Let the txn abort, even on success. } -pub fn restore_server_core(config: &Configuration, dst_path: &str) { +pub async fn restore_server_core(config: &Configuration, dst_path: &str) { touch_file_or_quit(config.db_path.as_str()); // First, we provide the in-memory schema so that core attrs are indexed correctly. @@ -292,7 +295,7 @@ pub fn restore_server_core(config: &Configuration, dst_path: &str) { info!("Attempting to init query server ..."); - let (qs, _idms, _idms_delayed) = match setup_qs_idms(be, schema, config) { + let (qs, _idms, _idms_delayed) = match setup_qs_idms(be, schema, config).await { Ok(t) => t, Err(e) => { error!("Unable to setup query server or idm server -> {:?}", e); @@ -303,7 +306,7 @@ pub fn restore_server_core(config: &Configuration, dst_path: &str) { info!("Start reindex phase ..."); - let qs_write = task::block_on(qs.write_async(duration_from_epoch_now())); + let qs_write = qs.write(duration_from_epoch_now()).await; let r = qs_write.reindex().and_then(|_| qs_write.commit()); match r { @@ -317,7 +320,7 @@ pub fn restore_server_core(config: &Configuration, dst_path: &str) { info!("✅ Restore Success!"); } -pub fn reindex_server_core(config: &Configuration) { +pub async fn reindex_server_core(config: &Configuration) { eprintln!("Start Index Phase 1 ..."); // First, we provide the in-memory schema so that core attrs are indexed correctly. let schema = match Schema::new() { @@ -349,7 +352,7 @@ pub fn reindex_server_core(config: &Configuration) { eprintln!("Attempting to init query server ..."); - let (qs, _idms, _idms_delayed) = match setup_qs_idms(be, schema, config) { + let (qs, _idms, _idms_delayed) = match setup_qs_idms(be, schema, config).await { Ok(t) => t, Err(e) => { error!("Unable to setup query server or idm server -> {:?}", e); @@ -360,7 +363,7 @@ pub fn reindex_server_core(config: &Configuration) { eprintln!("Start Index Phase 2 ..."); - let qs_write = task::block_on(qs.write_async(duration_from_epoch_now())); + let qs_write = qs.write(duration_from_epoch_now()).await; let r = qs_write.reindex().and_then(|_| qs_write.commit()); match r { @@ -394,7 +397,7 @@ pub fn vacuum_server_core(config: &Configuration) { }; } -pub fn domain_rename_core(config: &Configuration) { +pub async fn domain_rename_core(config: &Configuration) { let schema = match Schema::new() { Ok(s) => s, Err(e) => { @@ -413,7 +416,7 @@ pub fn domain_rename_core(config: &Configuration) { }; // Setup the qs, and perform any migrations and changes we may have. - let qs = match setup_qs(be, schema, config) { + let qs = match setup_qs(be, schema, config).await { Ok(t) => t, Err(e) => { error!("Unable to setup query server -> {:?}", e); @@ -424,7 +427,7 @@ pub fn domain_rename_core(config: &Configuration) { let new_domain_name = config.domain.as_str(); // make sure we're actually changing the domain name... - match task::block_on(qs.read_async()).get_db_domain_name() { + match qs.read().await.get_db_domain_name() { Ok(old_domain_name) => { admin_info!(?old_domain_name, ?new_domain_name); if &old_domain_name == &new_domain_name { @@ -443,7 +446,7 @@ pub fn domain_rename_core(config: &Configuration) { } } - let qs_write = task::block_on(qs.write_async(duration_from_epoch_now())); + let mut qs_write = qs.write(duration_from_epoch_now()).await; let r = qs_write .domain_rename(new_domain_name) .and_then(|_| qs_write.commit()); @@ -457,7 +460,7 @@ pub fn domain_rename_core(config: &Configuration) { }; } -pub fn verify_server_core(config: &Configuration) { +pub async fn verify_server_core(config: &Configuration) { // setup the qs - without initialise! let schema_mem = match Schema::new() { Ok(sc) => sc, @@ -477,7 +480,7 @@ pub fn verify_server_core(config: &Configuration) { let server = QueryServer::new(be, schema_mem, config.domain.clone()); // Run verifications. - let r = server.verify(); + let r = server.verify().await; if r.is_empty() { eprintln!("Verification passed!"); @@ -492,7 +495,7 @@ pub fn verify_server_core(config: &Configuration) { // Now add IDM server verifications? } -pub fn recover_account_core(config: &Configuration, name: &str) { +pub async fn recover_account_core(config: &Configuration, name: &str) { let schema = match Schema::new() { Ok(s) => s, Err(e) => { @@ -510,7 +513,7 @@ pub fn recover_account_core(config: &Configuration, name: &str) { } }; // setup the qs - *with* init of the migrations and schema. - let (_qs, idms, _idms_delayed) = match setup_qs_idms(be, schema, config) { + let (_qs, idms, _idms_delayed) = match setup_qs_idms(be, schema, config).await { Ok(t) => t, Err(e) => { error!("Unable to setup query server or idm server -> {:?}", e); @@ -519,7 +522,7 @@ pub fn recover_account_core(config: &Configuration, name: &str) { }; // Run the password change. - let mut idms_prox_write = task::block_on(idms.proxy_write_async(duration_from_epoch_now())); + let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()).await; let new_pw = match idms_prox_write.recover_account(name, None) { Ok(new_pw) => match idms_prox_write.commit() { Ok(_) => new_pw, @@ -601,7 +604,7 @@ pub async fn create_server_core(config: Configuration, config_test: bool) -> Res } }; // Start the IDM server. - let (_qs, idms, mut idms_delayed) = match setup_qs_idms(be, schema, &config) { + let (_qs, idms, mut idms_delayed) = match setup_qs_idms(be, schema, &config).await { Ok(t) => t, Err(e) => { error!("Unable to setup query server or idm server -> {:?}", e); @@ -622,8 +625,7 @@ pub async fn create_server_core(config: Configuration, config_test: bool) -> Res // Any pre-start tasks here. match &config.integration_test_config { Some(itc) => { - let mut idms_prox_write = - task::block_on(idms.proxy_write_async(duration_from_epoch_now())); + let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()).await; match idms_prox_write.recover_account("admin", Some(&itc.admin_password)) { Ok(_) => {} Err(e) => { diff --git a/kanidmd/core/tests/https_middleware.rs b/kanidmd/core/tests/https_middleware.rs deleted file mode 100644 index 9b7bbf9d1..000000000 --- a/kanidmd/core/tests/https_middleware.rs +++ /dev/null @@ -1,80 +0,0 @@ -use std::sync::atomic::Ordering; - -mod common; -use kanidmd_core::config::{Configuration, IntegrationTestConfig, ServerRole}; -use kanidmd_core::create_server_core; -use tokio::task; - -use crate::common::{is_free_port, ADMIN_TEST_PASSWORD, ADMIN_TEST_USER, PORT_ALLOC}; - -#[tokio::test] -async fn test_https_middleware_headers() { - // tests stuff - let _ = sketching::test_init(); - - let mut counter = 0; - let port = loop { - let possible_port = PORT_ALLOC.fetch_add(1, Ordering::SeqCst); - if is_free_port(possible_port) { - break possible_port; - } - counter += 1; - if counter >= 5 { - eprintln!("Unable to allocate port!"); - assert!(false); - } - }; - - let int_config = Box::new(IntegrationTestConfig { - admin_user: ADMIN_TEST_USER.to_string(), - admin_password: ADMIN_TEST_PASSWORD.to_string(), - }); - - // Setup the config ... - let mut config = Configuration::new(); - config.address = format!("127.0.0.1:{}", port); - config.secure_cookies = false; - config.integration_test_config = Some(int_config); - config.role = ServerRole::WriteReplica; - config.threads = 1; - - create_server_core(config, false) - .await - .expect("failed to start server core"); - // We have to yield now to guarantee that the tide elements are setup. - task::yield_now().await; - - let addr = format!("http://127.0.0.1:{}/", port); - - // here we test the /ui/ endpoint which should have the headers - let response = match reqwest::get(format!("{}ui/", &addr)).await { - Ok(value) => value, - Err(error) => { - panic!("Failed to query {:?} : {:#?}", addr, error); - } - }; - eprintln!("response: {:#?}", response); - assert_eq!(response.status(), 200); - - eprintln!( - "csp headers: {:#?}", - response.headers().get("content-security-policy") - ); - assert_ne!(response.headers().get("content-security-policy"), None); - - // here we test the /pkg/ endpoint which shouldn't have the headers - let response = - match reqwest::get(format!("{}pkg/external/bootstrap.bundle.min.js", &addr)).await { - Ok(value) => value, - Err(error) => { - panic!("Failed to query {:?} : {:#?}", addr, error); - } - }; - eprintln!("response: {:#?}", response); - assert_eq!(response.status(), 200); - eprintln!( - "csp headers: {:#?}", - response.headers().get("content-security-policy") - ); - assert_eq!(response.headers().get("content-security-policy"), None); -} diff --git a/kanidmd/daemon/src/main.rs b/kanidmd/daemon/src/main.rs index 9dea7955b..f90c4d0ed 100644 --- a/kanidmd/daemon/src/main.rs +++ b/kanidmd/daemon/src/main.rs @@ -374,23 +374,23 @@ async fn main() { std::process::exit(1); } }; - restore_server_core(&config, p); + restore_server_core(&config, p).await; } KanidmdOpt::Database { commands: DbCommands::Verify(_vopt), } => { eprintln!("Running in db verification mode ..."); - verify_server_core(&config); + verify_server_core(&config).await; } KanidmdOpt::RecoverAccount(raopt) => { eprintln!("Running account recovery ..."); - recover_account_core(&config, &raopt.name); + recover_account_core(&config, &raopt.name).await; } KanidmdOpt::Database { commands: DbCommands::Reindex(_copt), } => { eprintln!("Running in reindex mode ..."); - reindex_server_core(&config); + reindex_server_core(&config).await; } KanidmdOpt::DbScan { commands: DbScanOpt::ListIndexes(_), @@ -426,7 +426,7 @@ async fn main() { commands: DomainSettingsCmds::DomainChange(_dopt), } => { eprintln!("Running in domain name change mode ... this may take a long time ..."); - domain_rename_core(&config); + domain_rename_core(&config).await; } KanidmdOpt::Database { commands: DbCommands::Vacuum(_copt), diff --git a/kanidmd/lib-macros/Cargo.toml b/kanidmd/lib-macros/Cargo.toml new file mode 100644 index 000000000..a3390a5a5 --- /dev/null +++ b/kanidmd/lib-macros/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "kanidmd_lib_macros" +version = "0.1.0" +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2.workspace = true +quote.workspace = true +syn.workspace = true + diff --git a/kanidmd/lib-macros/src/entry.rs b/kanidmd/lib-macros/src/entry.rs new file mode 100644 index 000000000..7ed1d8105 --- /dev/null +++ b/kanidmd/lib-macros/src/entry.rs @@ -0,0 +1,99 @@ +use proc_macro::TokenStream; +use proc_macro2::{Ident, Span}; +use quote::{quote, quote_spanned, ToTokens}; +use syn::spanned::Spanned; + +fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream { + tokens.extend(TokenStream::from(error.into_compile_error())); + tokens +} + +pub(crate) fn qs_test(_args: TokenStream, item: TokenStream, with_init: bool) -> TokenStream { + let input: syn::ItemFn = match syn::parse(item.clone()) { + Ok(it) => it, + Err(e) => return token_stream_with_error(item, e), + }; + + if let Some(attr) = input.attrs.iter().find(|attr| attr.path.is_ident("test")) { + let msg = "second test attribute is supplied"; + return token_stream_with_error(item, syn::Error::new_spanned(&attr, msg)); + }; + + if input.sig.asyncness.is_none() { + let msg = "the `async` keyword is missing from the function declaration"; + return token_stream_with_error(item, syn::Error::new_spanned(input.sig.fn_token, msg)); + } + + // If type mismatch occurs, the current rustc points to the last statement. + let (last_stmt_start_span, _last_stmt_end_span) = { + let mut last_stmt = input + .block + .stmts + .last() + .map(ToTokens::into_token_stream) + .unwrap_or_default() + .into_iter(); + // `Span` on stable Rust has a limitation that only points to the first + // token, not the whole tokens. We can work around this limitation by + // using the first/last span of the tokens like + // `syn::Error::new_spanned` does. + let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span()); + let end = last_stmt.last().map_or(start, |t| t.span()); + (start, end) + }; + + let rt = quote_spanned! {last_stmt_start_span=> + tokio::runtime::Builder::new_current_thread() + }; + + let header = quote! { + #[::core::prelude::v1::test] + }; + + let init = if with_init { + quote! { + test_server.initialise_helper(duration_from_epoch_now()) + .await + .expect("init failed!"); + } + } else { + quote! {} + }; + + let test_fn = &input.sig.ident; + let test_driver = Ident::new(&format!("qs_{}", test_fn), input.sig.span()); + + // Effectively we are just injecting a real test function around this which we will + // call. + + let result = quote! { + #input + + #header + fn #test_driver() { + let body = async { + let test_server = crate::testkit::setup_test().await; + + #init + + #test_fn(&test_server).await; + + // Any needed teardown? + // Make sure there are no errors. + let verifications = test_server.verify().await; + trace!("Verification result: {:?}", verifications); + assert!(verifications.len() == 0); + }; + #[allow(clippy::expect_used, clippy::diverging_sub_expression)] + { + return #rt + .enable_all() + .build() + .expect("Failed building the Runtime") + .block_on(body); + } + } + }; + + result.into() +} diff --git a/kanidmd/lib-macros/src/lib.rs b/kanidmd/lib-macros/src/lib.rs new file mode 100644 index 000000000..a36607def --- /dev/null +++ b/kanidmd/lib-macros/src/lib.rs @@ -0,0 +1,28 @@ +#![deny(warnings)] +#![warn(unused_extern_crates)] +#![deny(clippy::todo)] +#![deny(clippy::unimplemented)] +#![deny(clippy::unwrap_used)] +#![deny(clippy::expect_used)] +#![deny(clippy::panic)] +#![deny(clippy::unreachable)] +#![deny(clippy::await_holding_lock)] +#![deny(clippy::needless_pass_by_value)] +#![deny(clippy::trivially_copy_pass_by_ref)] + +mod entry; + +#[allow(unused_extern_crates)] +extern crate proc_macro; + +use proc_macro::TokenStream; + +#[proc_macro_attribute] +pub fn qs_test(args: TokenStream, item: TokenStream) -> TokenStream { + entry::qs_test(args, item, true) +} + +#[proc_macro_attribute] +pub fn qs_test_no_init(args: TokenStream, item: TokenStream) -> TokenStream { + entry::qs_test(args, item, false) +} diff --git a/kanidmd/lib/Cargo.toml b/kanidmd/lib/Cargo.toml index ec12533ae..52c8ede91 100644 --- a/kanidmd/lib/Cargo.toml +++ b/kanidmd/lib/Cargo.toml @@ -35,6 +35,7 @@ hashbrown.workspace = true hex.workspace = true idlset.workspace = true kanidm_proto.workspace = true +kanidmd_lib_macros.workspace = true lazy_static.workspace = true ldap3_proto.workspace = true libc.workspace = true diff --git a/kanidmd/lib/src/access.rs b/kanidmd/lib/src/access.rs index 2e0e83940..981019ad0 100644 --- a/kanidmd/lib/src/access.rs +++ b/kanidmd/lib/src/access.rs @@ -1571,46 +1571,45 @@ mod tests { }}; } - #[test] - fn test_access_acp_parser() { - run_test!(|qs: &QueryServer| { - // Test parsing entries to acp. There so no point testing schema violations - // because the schema system is well tested an robust. Instead we target - // entry misconfigurations, such as missing classes required. + #[qs_test] + async fn test_access_acp_parser(qs: &QueryServer) { + // Test parsing entries to acp. There so no point testing schema violations + // because the schema system is well tested an robust. Instead we target + // entry misconfigurations, such as missing classes required. - // Generally, we are testing the *positive* cases here, because schema - // really protects us *a lot* here, but it's nice to have defence and - // layers of validation. + // Generally, we are testing the *positive* cases here, because schema + // really protects us *a lot* here, but it's nice to have defence and + // layers of validation. - let mut qs_write = qs.write(duration_from_epoch_now()); + let mut qs_write = qs.write(duration_from_epoch_now()).await; - acp_from_entry_err!( - &mut qs_write, - r#"{ + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object"], "name": ["acp_invalid"], "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"] } }"#, - AccessControlProfile - ); + AccessControlProfile + ); - acp_from_entry_err!( - &mut qs_write, - r#"{ + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object", "access_control_profile"], "name": ["acp_invalid"], "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"] } }"#, - AccessControlProfile - ); + AccessControlProfile + ); - acp_from_entry_err!( - &mut qs_write, - r#"{ + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object", "access_control_profile"], "name": ["acp_invalid"], @@ -1619,42 +1618,40 @@ mod tests { "acp_targetscope": [""] } }"#, - AccessControlProfile - ); + AccessControlProfile + ); - // "\"Self\"" - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ) + // "\"Self\"" + acp_from_entry_ok!( + &mut qs_write, + entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") ), - AccessControlProfile - ); - }) + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ) + ), + AccessControlProfile + ); } - #[test] - fn test_access_acp_delete_parser() { - run_test!(|qs: &QueryServer| { - let mut qs_write = qs.write(duration_from_epoch_now()); + #[qs_test] + async fn test_access_acp_delete_parser(qs: &QueryServer) { + let mut qs_write = qs.write(duration_from_epoch_now()).await; - acp_from_entry_err!( - &mut qs_write, - r#"{ + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object", "access_control_profile"], "name": ["acp_valid"], @@ -1667,44 +1664,42 @@ mod tests { ] } }"#, - AccessControlDelete - ); + AccessControlDelete + ); - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_delete")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ) + acp_from_entry_ok!( + &mut qs_write, + entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("class", Value::new_class("access_control_delete")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") ), - AccessControlDelete - ); - }) + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ) + ), + AccessControlDelete + ); } - #[test] - fn test_access_acp_search_parser() { - run_test!(|qs: &QueryServer| { - // Test that parsing search access controls works. - let mut qs_write = qs.write(duration_from_epoch_now()); + #[qs_test] + async fn test_access_acp_search_parser(qs: &QueryServer) { + // Test that parsing search access controls works. + let mut qs_write = qs.write(duration_from_epoch_now()).await; - // Missing class acp - acp_from_entry_err!( - &mut qs_write, - r#"{ + // Missing class acp + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object", "access_control_search"], "name": ["acp_invalid"], @@ -1718,13 +1713,13 @@ mod tests { "acp_search_attr": ["name", "class"] } }"#, - AccessControlSearch - ); + AccessControlSearch + ); - // Missing class acs - acp_from_entry_err!( - &mut qs_write, - r#"{ + // Missing class acs + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object", "access_control_profile"], "name": ["acp_invalid"], @@ -1738,13 +1733,13 @@ mod tests { "acp_search_attr": ["name", "class"] } }"#, - AccessControlSearch - ); + AccessControlSearch + ); - // Missing attr acp_search_attr - acp_from_entry_err!( - &mut qs_write, - r#"{ + // Missing attr acp_search_attr + acp_from_entry_err!( + &mut qs_write, + r#"{ "attrs": { "class": ["object", "access_control_profile", "access_control_search"], "name": ["acp_invalid"], @@ -1757,206 +1752,15 @@ mod tests { ] } }"#, - AccessControlSearch - ); + AccessControlSearch + ); - // All good! - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_search")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ("acp_search_attr", Value::new_iutf8("name")), - ("acp_search_attr", Value::new_iutf8("class")) - ), - AccessControlSearch - ); - }) - } - - #[test] - fn test_access_acp_modify_parser() { - run_test!(|qs: &QueryServer| { - // Test that parsing modify access controls works. - let mut qs_write = qs.write(duration_from_epoch_now()); - - acp_from_entry_err!( - &mut qs_write, - r#"{ - "attrs": { - "class": ["object", "access_control_profile"], - "name": ["acp_valid"], - "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"], - "acp_receiver": [ - "{\"eq\":[\"name\",\"a\"]}" - ], - "acp_targetscope": [ - "{\"eq\":[\"name\",\"a\"]}" - ], - "acp_modify_removedattr": ["name"], - "acp_modify_presentattr": ["name"], - "acp_modify_class": ["object"] - } - }"#, - AccessControlModify - ); - - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_modify")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ) - ), - AccessControlModify - ); - - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_modify")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ("acp_modify_removedattr", Value::new_iutf8("name")), - ("acp_modify_presentattr", Value::new_iutf8("name")), - ("acp_modify_class", Value::new_iutf8("object")) - ), - AccessControlModify - ); - }) - } - - #[test] - fn test_access_acp_create_parser() { - run_test!(|qs: &QueryServer| { - // Test that parsing create access controls works. - let mut qs_write = qs.write(duration_from_epoch_now()); - - acp_from_entry_err!( - &mut qs_write, - r#"{ - "attrs": { - "class": ["object", "access_control_profile"], - "name": ["acp_valid"], - "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"], - "acp_receiver": [ - "{\"eq\":[\"name\",\"a\"]}" - ], - "acp_targetscope": [ - "{\"eq\":[\"name\",\"a\"]}" - ], - "acp_create_class": ["object"], - "acp_create_attr": ["name"] - } - }"#, - AccessControlCreate - ); - - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_create")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ) - ), - AccessControlCreate - ); - - acp_from_entry_ok!( - &mut qs_write, - entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_create")), - ("name", Value::new_iname("acp_valid")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ( - "acp_receiver", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ( - "acp_targetscope", - Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") - ), - ("acp_create_attr", Value::new_iutf8("name")), - ("acp_create_class", Value::new_iutf8("object")) - ), - AccessControlCreate - ); - }) - } - - #[test] - fn test_access_acp_compound_parser() { - run_test!(|qs: &QueryServer| { - // Test that parsing compound access controls works. This means that - // given a single &str, we can evaluate all types from a single record. - // This is valid, and could exist, IE a rule to allow create, search and modify - // over a single scope. - let mut qs_write = qs.write(duration_from_epoch_now()); - - let e = entry_init!( + // All good! + acp_from_entry_ok!( + &mut qs_write, + entry_init!( ("class", Value::new_class("object")), ("class", Value::new_class("access_control_profile")), - ("class", Value::new_class("access_control_create")), - ("class", Value::new_class("access_control_delete")), - ("class", Value::new_class("access_control_modify")), ("class", Value::new_class("access_control_search")), ("name", Value::new_iname("acp_valid")), ( @@ -1972,18 +1776,202 @@ mod tests { Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") ), ("acp_search_attr", Value::new_iutf8("name")), - ("acp_create_class", Value::new_iutf8("class")), - ("acp_create_attr", Value::new_iutf8("name")), + ("acp_search_attr", Value::new_iutf8("class")) + ), + AccessControlSearch + ); + } + + #[qs_test] + async fn test_access_acp_modify_parser(qs: &QueryServer) { + // Test that parsing modify access controls works. + let mut qs_write = qs.write(duration_from_epoch_now()).await; + + acp_from_entry_err!( + &mut qs_write, + r#"{ + "attrs": { + "class": ["object", "access_control_profile"], + "name": ["acp_valid"], + "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"], + "acp_receiver": [ + "{\"eq\":[\"name\",\"a\"]}" + ], + "acp_targetscope": [ + "{\"eq\":[\"name\",\"a\"]}" + ], + "acp_modify_removedattr": ["name"], + "acp_modify_presentattr": ["name"], + "acp_modify_class": ["object"] + } + }"#, + AccessControlModify + ); + + acp_from_entry_ok!( + &mut qs_write, + entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("class", Value::new_class("access_control_modify")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ) + ), + AccessControlModify + ); + + acp_from_entry_ok!( + &mut qs_write, + entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("class", Value::new_class("access_control_modify")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), ("acp_modify_removedattr", Value::new_iutf8("name")), ("acp_modify_presentattr", Value::new_iutf8("name")), ("acp_modify_class", Value::new_iutf8("object")) - ); + ), + AccessControlModify + ); + } - acp_from_entry_ok!(&mut qs_write, e.clone(), AccessControlCreate); - acp_from_entry_ok!(&mut qs_write, e.clone(), AccessControlDelete); - acp_from_entry_ok!(&mut qs_write, e.clone(), AccessControlModify); - acp_from_entry_ok!(&mut qs_write, e, AccessControlSearch); - }) + #[qs_test] + async fn test_access_acp_create_parser(qs: &QueryServer) { + // Test that parsing create access controls works. + let mut qs_write = qs.write(duration_from_epoch_now()).await; + + acp_from_entry_err!( + &mut qs_write, + r#"{ + "attrs": { + "class": ["object", "access_control_profile"], + "name": ["acp_valid"], + "uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"], + "acp_receiver": [ + "{\"eq\":[\"name\",\"a\"]}" + ], + "acp_targetscope": [ + "{\"eq\":[\"name\",\"a\"]}" + ], + "acp_create_class": ["object"], + "acp_create_attr": ["name"] + } + }"#, + AccessControlCreate + ); + + acp_from_entry_ok!( + &mut qs_write, + entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("class", Value::new_class("access_control_create")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ) + ), + AccessControlCreate + ); + + acp_from_entry_ok!( + &mut qs_write, + entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("class", Value::new_class("access_control_create")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ("acp_create_attr", Value::new_iutf8("name")), + ("acp_create_class", Value::new_iutf8("object")) + ), + AccessControlCreate + ); + } + + #[qs_test] + async fn test_access_acp_compound_parser(qs: &QueryServer) { + // Test that parsing compound access controls works. This means that + // given a single &str, we can evaluate all types from a single record. + // This is valid, and could exist, IE a rule to allow create, search and modify + // over a single scope. + let mut qs_write = qs.write(duration_from_epoch_now()).await; + + let e = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("access_control_profile")), + ("class", Value::new_class("access_control_create")), + ("class", Value::new_class("access_control_delete")), + ("class", Value::new_class("access_control_modify")), + ("class", Value::new_class("access_control_search")), + ("name", Value::new_iname("acp_valid")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ( + "acp_receiver", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ( + "acp_targetscope", + Value::new_json_filter_s("{\"eq\":[\"name\",\"a\"]}").expect("filter") + ), + ("acp_search_attr", Value::new_iutf8("name")), + ("acp_create_class", Value::new_iutf8("class")), + ("acp_create_attr", Value::new_iutf8("name")), + ("acp_modify_removedattr", Value::new_iutf8("name")), + ("acp_modify_presentattr", Value::new_iutf8("name")), + ("acp_modify_class", Value::new_iutf8("object")) + ); + + acp_from_entry_ok!(&mut qs_write, e.clone(), AccessControlCreate); + acp_from_entry_ok!(&mut qs_write, e.clone(), AccessControlDelete); + acp_from_entry_ok!(&mut qs_write, e.clone(), AccessControlModify); + acp_from_entry_ok!(&mut qs_write, e, AccessControlSearch); } macro_rules! test_acp_search { diff --git a/kanidmd/lib/src/filter.rs b/kanidmd/lib/src/filter.rs index 9d7015ce5..78088fe68 100644 --- a/kanidmd/lib/src/filter.rs +++ b/kanidmd/lib/src/filter.rs @@ -1786,16 +1786,15 @@ mod tests { assert!(f_t2a.get_attr_set() == f_expect); } - #[test] - fn test_filter_resolve_value() { - run_test!(|server: &QueryServer| { - let time_p1 = duration_from_epoch_now(); - let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); - let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); + #[qs_test] + async fn test_filter_resolve_value(server: &QueryServer) { + let time_p1 = duration_from_epoch_now(); + let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); + let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); - let server_txn = server.write(time_p1); - let e1: Entry = Entry::unsafe_from_entry_str( - r#"{ + let mut server_txn = server.write(time_p1).await; + let e1: Entry = Entry::unsafe_from_entry_str( + r#"{ "attrs": { "class": ["object", "person", "account"], "name": ["testperson1"], @@ -1804,9 +1803,9 @@ mod tests { "displayname": ["testperson1"] } }"#, - ); - let e2: Entry = Entry::unsafe_from_entry_str( - r#"{ + ); + let e2: Entry = Entry::unsafe_from_entry_str( + r#"{ "attrs": { "class": ["object", "person"], "name": ["testperson2"], @@ -1815,152 +1814,147 @@ mod tests { "displayname": ["testperson2"] } }"#, - ); + ); - // We need to add these and then push through the state machine. - let e_ts = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson3")), - ( - "uuid", - Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid") - ), - ("description", Value::new_utf8s("testperson3")), - ("displayname", Value::new_utf8s("testperson3")) - ); + // We need to add these and then push through the state machine. + let e_ts = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson3")), + ( + "uuid", + Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid") + ), + ("description", Value::new_utf8s("testperson3")), + ("displayname", Value::new_utf8s("testperson3")) + ); - let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - let de_sin = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq( - "name", - PartialValue::new_iname("testperson3") - )]))) - }; - assert!(server_txn.delete(&de_sin).is_ok()); + let de_sin = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq( + "name", + PartialValue::new_iname("testperson3") + )]))) + }; + assert!(server_txn.delete(&de_sin).is_ok()); - // Commit - assert!(server_txn.commit().is_ok()); + // Commit + assert!(server_txn.commit().is_ok()); - // Now, establish enough time for the recycled items to be purged. - let server_txn = server.write(time_p2); - assert!(server_txn.purge_recycled().is_ok()); - assert!(server_txn.commit().is_ok()); + // Now, establish enough time for the recycled items to be purged. + let server_txn = server.write(time_p2).await; + assert!(server_txn.purge_recycled().is_ok()); + assert!(server_txn.commit().is_ok()); - let server_txn = server.write(time_p3); - assert!(server_txn.purge_tombstones().is_ok()); + let server_txn = server.write(time_p3).await; + assert!(server_txn.purge_tombstones().is_ok()); - // ===== ✅ now ready to test! + // ===== ✅ now ready to test! - // Resolving most times should yield expected results - let t1 = vs_utf8!["teststring".to_string()] as _; - let r1 = server_txn.resolve_valueset(&t1); - assert!(r1 == Ok(vec!["teststring".to_string()])); + // Resolving most times should yield expected results + let t1 = vs_utf8!["teststring".to_string()] as _; + let r1 = server_txn.resolve_valueset(&t1); + assert!(r1 == Ok(vec!["teststring".to_string()])); - // Resolve UUID with matching spn - let t_uuid = - vs_refer![Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()] as _; - let r_uuid = server_txn.resolve_valueset(&t_uuid); - debug!("{:?}", r_uuid); - assert!(r_uuid == Ok(vec!["testperson1@example.com".to_string()])); + // Resolve UUID with matching spn + let t_uuid = + vs_refer![Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()] as _; + let r_uuid = server_txn.resolve_valueset(&t_uuid); + debug!("{:?}", r_uuid); + assert!(r_uuid == Ok(vec!["testperson1@example.com".to_string()])); - // Resolve UUID with matching name - let t_uuid = - vs_refer![Uuid::parse_str("a67c0c71-0b35-4218-a6b0-22d23d131d27").unwrap()] as _; - let r_uuid = server_txn.resolve_valueset(&t_uuid); - debug!("{:?}", r_uuid); - assert!(r_uuid == Ok(vec!["testperson2".to_string()])); + // Resolve UUID with matching name + let t_uuid = + vs_refer![Uuid::parse_str("a67c0c71-0b35-4218-a6b0-22d23d131d27").unwrap()] as _; + let r_uuid = server_txn.resolve_valueset(&t_uuid); + debug!("{:?}", r_uuid); + assert!(r_uuid == Ok(vec!["testperson2".to_string()])); - // Resolve UUID non-exist - let t_uuid_non = - vs_refer![Uuid::parse_str("b83e98f0-3d2e-41d2-9796-d8d993289c86").unwrap()] as _; - let r_uuid_non = server_txn.resolve_valueset(&t_uuid_non); - debug!("{:?}", r_uuid_non); - assert!(r_uuid_non == Ok(vec!["b83e98f0-3d2e-41d2-9796-d8d993289c86".to_string()])); + // Resolve UUID non-exist + let t_uuid_non = + vs_refer![Uuid::parse_str("b83e98f0-3d2e-41d2-9796-d8d993289c86").unwrap()] as _; + let r_uuid_non = server_txn.resolve_valueset(&t_uuid_non); + debug!("{:?}", r_uuid_non); + assert!(r_uuid_non == Ok(vec!["b83e98f0-3d2e-41d2-9796-d8d993289c86".to_string()])); - // Resolve UUID to tombstone/recycled (same an non-exst) - let t_uuid_ts = - vs_refer![Uuid::parse_str("9557f49c-97a5-4277-a9a5-097d17eb8317").unwrap()] as _; - let r_uuid_ts = server_txn.resolve_valueset(&t_uuid_ts); - debug!("{:?}", r_uuid_ts); - assert!(r_uuid_ts == Ok(vec!["9557f49c-97a5-4277-a9a5-097d17eb8317".to_string()])); - }) + // Resolve UUID to tombstone/recycled (same an non-exst) + let t_uuid_ts = + vs_refer![Uuid::parse_str("9557f49c-97a5-4277-a9a5-097d17eb8317").unwrap()] as _; + let r_uuid_ts = server_txn.resolve_valueset(&t_uuid_ts); + debug!("{:?}", r_uuid_ts); + assert!(r_uuid_ts == Ok(vec!["9557f49c-97a5-4277-a9a5-097d17eb8317".to_string()])); } - #[test] - fn test_filter_depth_limits() { - run_test!(|server: &QueryServer| { - let r_txn = server.read(); + #[qs_test] + async fn test_filter_depth_limits(server: &QueryServer) { + let r_txn = server.read().await; - let mut inv_proto = ProtoFilter::Pres("class".to_string()); - for _i in 0..(FILTER_DEPTH_MAX + 1) { - inv_proto = ProtoFilter::And(vec![inv_proto]); - } + let mut inv_proto = ProtoFilter::Pres("class".to_string()); + for _i in 0..(FILTER_DEPTH_MAX + 1) { + inv_proto = ProtoFilter::And(vec![inv_proto]); + } - let mut inv_ldap = LdapFilter::Present("class".to_string()); - for _i in 0..(FILTER_DEPTH_MAX + 1) { - inv_ldap = LdapFilter::And(vec![inv_ldap]); - } + let mut inv_ldap = LdapFilter::Present("class".to_string()); + for _i in 0..(FILTER_DEPTH_MAX + 1) { + inv_ldap = LdapFilter::And(vec![inv_ldap]); + } - let ev = Identity::from_internal(); + let ev = Identity::from_internal(); - // Test proto + read - let res = Filter::from_ro(&ev, &inv_proto, &r_txn); - assert!(res == Err(OperationError::ResourceLimit)); + // Test proto + read + let res = Filter::from_ro(&ev, &inv_proto, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); - // ldap - let res = Filter::from_ldap_ro(&ev, &inv_ldap, &r_txn); - assert!(res == Err(OperationError::ResourceLimit)); + // ldap + let res = Filter::from_ldap_ro(&ev, &inv_ldap, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); - // Can only have one db conn at a time. - std::mem::drop(r_txn); + // Can only have one db conn at a time. + std::mem::drop(r_txn); - // proto + write - let wr_txn = server.write(duration_from_epoch_now()); - let res = Filter::from_rw(&ev, &inv_proto, &wr_txn); - assert!(res == Err(OperationError::ResourceLimit)); - }) + // proto + write + let wr_txn = server.write(duration_from_epoch_now()).await; + let res = Filter::from_rw(&ev, &inv_proto, &wr_txn); + assert!(res == Err(OperationError::ResourceLimit)); } - #[test] - fn test_filter_max_element_limits() { - run_test!(|server: &QueryServer| { - const LIMIT: usize = 4; - let r_txn = server.read(); + #[qs_test] + async fn test_filter_max_element_limits(server: &QueryServer) { + const LIMIT: usize = 4; + let r_txn = server.read().await; - let inv_proto = ProtoFilter::And( - (0..(LIMIT * 2)) - .map(|_| ProtoFilter::Pres("class".to_string())) - .collect(), - ); + let inv_proto = ProtoFilter::And( + (0..(LIMIT * 2)) + .map(|_| ProtoFilter::Pres("class".to_string())) + .collect(), + ); - let inv_ldap = LdapFilter::And( - (0..(LIMIT * 2)) - .map(|_| LdapFilter::Present("class".to_string())) - .collect(), - ); + let inv_ldap = LdapFilter::And( + (0..(LIMIT * 2)) + .map(|_| LdapFilter::Present("class".to_string())) + .collect(), + ); - let mut ev = Identity::from_internal(); - ev.limits.filter_max_elements = LIMIT; + let mut ev = Identity::from_internal(); + ev.limits.filter_max_elements = LIMIT; - // Test proto + read - let res = Filter::from_ro(&ev, &inv_proto, &r_txn); - assert!(res == Err(OperationError::ResourceLimit)); + // Test proto + read + let res = Filter::from_ro(&ev, &inv_proto, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); - // ldap - let res = Filter::from_ldap_ro(&ev, &inv_ldap, &r_txn); - assert!(res == Err(OperationError::ResourceLimit)); + // ldap + let res = Filter::from_ldap_ro(&ev, &inv_ldap, &r_txn); + assert!(res == Err(OperationError::ResourceLimit)); - // Can only have one db conn at a time. - std::mem::drop(r_txn); + // Can only have one db conn at a time. + std::mem::drop(r_txn); - // proto + write - let wr_txn = server.write(duration_from_epoch_now()); - let res = Filter::from_rw(&ev, &inv_proto, &wr_txn); - assert!(res == Err(OperationError::ResourceLimit)); - }) + // proto + write + let wr_txn = server.write(duration_from_epoch_now()).await; + let res = Filter::from_rw(&ev, &inv_proto, &wr_txn); + assert!(res == Err(OperationError::ResourceLimit)); } } diff --git a/kanidmd/lib/src/idm/account.rs b/kanidmd/lib/src/idm/account.rs index 759cd4dca..a89dbfd7b 100644 --- a/kanidmd/lib/src/idm/account.rs +++ b/kanidmd/lib/src/idm/account.rs @@ -502,7 +502,7 @@ impl DestroySessionTokenEvent { impl<'a> IdmServerProxyWriteTransaction<'a> { pub fn account_destroy_session_token( - &self, + &mut self, dte: &DestroySessionTokenEvent, ) -> Result<(), OperationError> { // Delete the attribute with uuid. @@ -534,7 +534,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { } pub fn service_account_into_person( - &self, + &mut self, ident: &Identity, target_uuid: Uuid, ) -> Result<(), OperationError> { diff --git a/kanidmd/lib/src/idm/credupdatesession.rs b/kanidmd/lib/src/idm/credupdatesession.rs index 1e185d669..b7883e1d2 100644 --- a/kanidmd/lib/src/idm/credupdatesession.rs +++ b/kanidmd/lib/src/idm/credupdatesession.rs @@ -1500,7 +1500,7 @@ mod tests { idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { let ct = Duration::from_secs(TEST_CURRENT_TIME); - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let testaccount_uuid = Uuid::new_v4(); @@ -1607,7 +1607,7 @@ mod tests { idms: &IdmServer, ct: Duration, ) -> (CredentialUpdateSessionToken, CredentialUpdateSessionStatus) { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let e2 = entry_init!( ("class", Value::new_class("object")), @@ -1642,7 +1642,7 @@ mod tests { idms: &IdmServer, ct: Duration, ) -> (CredentialUpdateSessionToken, CredentialUpdateSessionStatus) { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let testperson = idms_prox_write .qs_write @@ -1660,7 +1660,7 @@ mod tests { } fn commit_session(idms: &IdmServer, ct: Duration, cust: CredentialUpdateSessionToken) { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); idms_prox_write .commit_credential_update(&cust, ct) diff --git a/kanidmd/lib/src/idm/oauth2.rs b/kanidmd/lib/src/idm/oauth2.rs index b59a10ac9..30c07e81f 100644 --- a/kanidmd/lib/src/idm/oauth2.rs +++ b/kanidmd/lib/src/idm/oauth2.rs @@ -1416,6 +1416,8 @@ mod tests { use crate::idm::server::{IdmServer, IdmServerTransaction}; use crate::prelude::*; + use async_std::task; + const TEST_CURRENT_TIME: u64 = 6000; const UAT_EXPIRE: u64 = 5; const TOKEN_EXPIRE: u64 = 900; @@ -1466,7 +1468,7 @@ mod tests { enable_pkce: bool, enable_legacy_crypto: bool, ) -> (String, UserAuthToken, Identity, Uuid) { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let uuid = Uuid::new_v4(); @@ -1543,7 +1545,7 @@ mod tests { ct: Duration, authtype: AuthType, ) -> (UserAuthToken, Identity) { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let account = idms_prox_write .target_to_account(&UUID_IDM_ADMIN) .expect("account must exist"); @@ -1567,7 +1569,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // Get an ident/uat for now. @@ -1636,7 +1638,7 @@ mod tests { let (idm_admin_uat, idm_admin_ident) = setup_idm_admin(idms, ct, AuthType::PasswordMfa); // Need a uat from a user not in the group. Probs anonymous. - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let (_code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -1797,7 +1799,7 @@ mod tests { let (_secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false); let (uat2, ident2) = { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let account = idms_prox_write .target_to_account(&UUID_IDM_ADMIN) .expect("account must exist"); @@ -1811,7 +1813,7 @@ mod tests { (uat2, ident2) }; - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let (_code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -1884,7 +1886,7 @@ mod tests { + Duration::from_secs(TEST_CURRENT_TIME + UAT_EXPIRE - 1), ); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // == Setup the authorisation request let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -2045,7 +2047,7 @@ mod tests { let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false); let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // == Setup the authorisation request let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -2109,7 +2111,7 @@ mod tests { drop(idms_prox_read); // start a write, - let idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); // Expire the account, should cause introspect to return inactive. let v_expire = Value::new_datetime_epoch(Duration::from_secs(TEST_CURRENT_TIME - 1)); @@ -2128,7 +2130,7 @@ mod tests { // start a new read // check again. - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let intr_response = idms_prox_read .check_oauth2_token_introspect(&client_authz.unwrap(), &intr_request, ct) .expect("Failed to inspect token"); @@ -2147,7 +2149,7 @@ mod tests { let (_secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false); let (uat2, ident2) = { - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let account = idms_prox_write .target_to_account(&UUID_IDM_ADMIN) .expect("account must exist"); @@ -2161,7 +2163,7 @@ mod tests { (uat2, ident2) }; - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let redirect_uri = Url::parse("https://demo.example.com/oauth2/result").unwrap(); let (_code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -2226,7 +2228,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (_secret, _uat, _ident, _) = setup_oauth2_resource_server(idms, ct, true, false); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // check the discovery end point works as we expect assert!( @@ -2367,7 +2369,7 @@ mod tests { let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false); let client_authz = Some(base64::encode(format!("test_resource_server:{}", secret))); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -2491,7 +2493,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (_secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, false, false); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // == Setup the authorisation request let (_code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); @@ -2525,7 +2527,7 @@ mod tests { |_qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, false, true); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // The public key url should offer an rs key // discovery should offer RS256 let discovery = idms_prox_read @@ -2623,7 +2625,7 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let (_secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, true, false); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let (_code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); let consent_request = @@ -2653,12 +2655,12 @@ mod tests { }; // Manually submit the consent. - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); assert!(idms_prox_write.process_oauth2consentgrant(&o2cg).is_ok()); assert!(idms_prox_write.commit().is_ok()); // == Now try the authorise again, should be in the permitted state. - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // We need to reload our identity let ident = idms_prox_read @@ -2680,7 +2682,7 @@ mod tests { drop(idms_prox_read); // Great! Now change the scopes on the oauth2 instance, this revokes the permit. - let idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let me_extend_scopes = unsafe { ModifyEvent::new_internal_invalid( @@ -2704,7 +2706,7 @@ mod tests { // And do the workflow once more to see if we need to consent again. - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // We need to reload our identity let ident = idms_prox_read @@ -2747,7 +2749,7 @@ mod tests { // Success! We had to consent again due to the change :) // Now change the supplemental scopes on the oauth2 instance, this revokes the permit. - let idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let me_extend_scopes = unsafe { ModifyEvent::new_internal_invalid( @@ -2771,7 +2773,7 @@ mod tests { // And do the workflow once more to see if we need to consent again. - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // We need to reload our identity let ident = idms_prox_read @@ -2827,7 +2829,7 @@ mod tests { // Assert there are no consent maps yet. assert!(ident.get_oauth2_consent_scopes(o2rs_uuid).is_none()); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); let (_code_verifier, code_challenge) = create_code_verifier!("Whar Garble"); let consent_request = @@ -2857,7 +2859,7 @@ mod tests { }; // Manually submit the consent. - let mut idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); assert!(idms_prox_write.process_oauth2consentgrant(&o2cg).is_ok()); let ident = idms_prox_write @@ -2917,7 +2919,7 @@ mod tests { // Enable pkce is set to FALSE let (secret, uat, ident, _) = setup_oauth2_resource_server(idms, ct, false, false); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // Get an ident/uat for now. diff --git a/kanidmd/lib/src/idm/server.rs b/kanidmd/lib/src/idm/server.rs index e00230310..200a1d20b 100644 --- a/kanidmd/lib/src/idm/server.rs +++ b/kanidmd/lib/src/idm/server.rs @@ -162,7 +162,7 @@ impl IdmServer { // Get the domain name, as the relying party id. let (rp_id, rp_name, fernet_private_key, es256_private_key, pw_badlist_set, oauth2rs_set) = { - let qs_read = task::block_on(qs.read_async()); + let qs_read = task::block_on(qs.read()); ( qs_read.get_domain_name().to_string(), qs_read.get_domain_display_name().to_string(), @@ -257,12 +257,12 @@ impl IdmServer { } pub async fn auth_async(&self) -> IdmServerAuthTransaction<'_> { + let qs_read = self.qs.read().await; + let mut sid = [0; 4]; let mut rng = StdRng::from_entropy(); rng.fill(&mut sid); - let qs_read = self.qs.read_async().await; - IdmServerAuthTransaction { session_ticket: &self.session_ticket, sessions: &self.sessions, @@ -277,34 +277,24 @@ impl IdmServer { } } - /// Perform a blocking read transaction on the database. - #[cfg(test)] - pub fn proxy_read<'a>(&'a self) -> IdmServerProxyReadTransaction<'a> { - task::block_on(self.proxy_read_async()) - } - /// Read from the database, in a transaction. #[instrument(level = "debug", skip_all)] - pub async fn proxy_read_async(&self) -> IdmServerProxyReadTransaction<'_> { + pub async fn proxy_read(&self) -> IdmServerProxyReadTransaction<'_> { IdmServerProxyReadTransaction { - qs_read: self.qs.read_async().await, + qs_read: self.qs.read().await, uat_jwt_validator: self.uat_jwt_validator.read(), oauth2rs: self.oauth2rs.read(), async_tx: self.async_tx.clone(), } } - #[cfg(test)] - pub fn proxy_write(&self, ts: Duration) -> IdmServerProxyWriteTransaction { - task::block_on(self.proxy_write_async(ts)) - } - #[instrument(level = "debug", skip_all)] - pub async fn proxy_write_async(&self, ts: Duration) -> IdmServerProxyWriteTransaction<'_> { + pub async fn proxy_write(&self, ts: Duration) -> IdmServerProxyWriteTransaction<'_> { + let qs_write = self.qs.write(ts).await; + let mut sid = [0; 4]; let mut rng = StdRng::from_entropy(); rng.fill(&mut sid); - let qs_write = self.qs.write_async(ts).await; IdmServerProxyWriteTransaction { cred_update_sessions: self.cred_update_sessions.write(), @@ -327,7 +317,7 @@ impl IdmServer { pub async fn cred_update_transaction_async(&self) -> IdmServerCredUpdateTransaction<'_> { IdmServerCredUpdateTransaction { - _qs_read: self.qs.read_async().await, + _qs_read: self.qs.read().await, // sid: Sid, webauthn: &self.webauthn, pw_badlist_cache: self.pw_badlist_cache.read(), @@ -343,7 +333,7 @@ impl IdmServer { ts: Duration, da: DelayedAction, ) -> Result { - let mut pw = self.proxy_write_async(ts).await; + let mut pw = self.proxy_write(ts).await; pw.process_delayedaction(da) .and_then(|_| pw.commit()) .map(|()| true) @@ -2347,11 +2337,11 @@ mod tests { ) } - fn init_admin_w_password(qs: &QueryServer, pw: &str) -> Result<(), OperationError> { + async fn init_admin_w_password(qs: &QueryServer, pw: &str) -> Result<(), OperationError> { let p = CryptoPolicy::minimum(); let cred = Credential::new_password_only(&p, pw)?; let v_cred = Value::new_credential("primary", cred); - let qs_write = qs.write(duration_from_epoch_now()); + let mut qs_write = qs.write(duration_from_epoch_now()).await; // now modify and provide a primary credential. let me_inv_m = unsafe { @@ -2457,7 +2447,8 @@ mod tests { fn test_idm_simple_password_auth() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); check_admin_password(idms, TEST_PASSWORD); // Clear our the session record @@ -2472,7 +2463,8 @@ mod tests { fn test_idm_simple_password_spn_auth() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); let sid = init_admin_authsession_sid( idms, @@ -2530,7 +2522,8 @@ mod tests { fn test_idm_simple_password_invalid() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); let sid = init_admin_authsession_sid( idms, Duration::from_secs(TEST_CURRENT_TIME), @@ -2583,7 +2576,8 @@ mod tests { |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { let pce = PasswordChangeEvent::new_internal(&UUID_ADMIN, TEST_PASSWORD); - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); assert!(idms_prox_write.set_account_password(&pce).is_ok()); assert!(idms_prox_write.set_account_password(&pce).is_ok()); assert!(idms_prox_write.commit().is_ok()); @@ -2597,7 +2591,8 @@ mod tests { |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { let pce = PasswordChangeEvent::new_internal(&UUID_ANONYMOUS, TEST_PASSWORD); - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); assert!(idms_prox_write.set_account_password(&pce).is_err()); assert!(idms_prox_write.commit().is_ok()); } @@ -2608,7 +2603,8 @@ mod tests { fn test_idm_session_expire() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); let sid = init_admin_authsession_sid( idms, Duration::from_secs(TEST_CURRENT_TIME), @@ -2637,7 +2633,8 @@ mod tests { fn test_idm_regenerate_radius_secret() { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let rrse = RegenerateRadiusSecretEvent::new_internal(UUID_ADMIN.clone()); // Generates a new credential when none exists @@ -2657,7 +2654,8 @@ mod tests { fn test_idm_radius_secret_rejected_from_account_credential() { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let rrse = RegenerateRadiusSecretEvent::new_internal(UUID_ADMIN.clone()); let r1 = idms_prox_write @@ -2682,14 +2680,15 @@ mod tests { fn test_idm_radiusauthtoken() { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let rrse = RegenerateRadiusSecretEvent::new_internal(UUID_ADMIN.clone()); let r1 = idms_prox_write .regenerate_radius_secret(&rrse) .expect("Failed to reset radius credential 1"); idms_prox_write.commit().expect("failed to commit"); - let mut idms_prox_read = idms.proxy_read(); + let mut idms_prox_read = task::block_on(idms.proxy_read()); let rate = RadiusAuthTokenEvent::new_internal(UUID_ADMIN.clone()); let tok_r = idms_prox_read .get_radiusauthtoken(&rate, duration_from_epoch_now()) @@ -2706,7 +2705,8 @@ mod tests { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { // len check - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let pce = PasswordChangeEvent::new_internal(&UUID_ADMIN, "password"); let e = idms_prox_write.set_account_password(&pce); @@ -2739,7 +2739,8 @@ mod tests { fn test_idm_simple_password_reject_badlist() { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); // Check that the badlist password inserted is rejected. let pce = PasswordChangeEvent::new_internal(&UUID_ADMIN, "bad@no3IBTyqHu$list"); @@ -2755,7 +2756,8 @@ mod tests { fn test_idm_unixusertoken() { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - let idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); // Modify admin to have posixaccount let me_posix = unsafe { ModifyEvent::new_internal_invalid( @@ -2793,7 +2795,7 @@ mod tests { idms_prox_write.commit().expect("failed to commit"); - let mut idms_prox_read = idms.proxy_read(); + let mut idms_prox_read = task::block_on(idms.proxy_read()); let ugte = UnixGroupTokenEvent::new_internal( Uuid::parse_str("01609135-a1c4-43d5-966b-a28227644445") @@ -2837,7 +2839,8 @@ mod tests { fn test_idm_simple_unix_password_reset() { run_idm_test!( |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); // make the admin a valid posix account let me_posix = unsafe { ModifyEvent::new_internal_invalid( @@ -2881,7 +2884,8 @@ mod tests { assert!(idms_auth.commit().is_ok()); // Check deleting the password - let idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let me_purge_up = unsafe { ModifyEvent::new_internal_invalid( filter!(f_eq("name", PartialValue::new_iname("admin"))), @@ -2911,12 +2915,13 @@ mod tests { #[test] fn test_idm_simple_password_upgrade() { run_idm_test!( - |qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { + |_qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { // Assert the delayed action queue is empty idms_delayed.check_is_empty_or_panic(); // Setup the admin w_ an imported password. { - let qs_write = qs.write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); // now modify and provide a primary credential. let me_inv_m = unsafe { ModifyEvent::new_internal_invalid( @@ -2928,8 +2933,8 @@ mod tests { ) }; // go! - assert!(qs_write.modify(&me_inv_m).is_ok()); - qs_write.commit().expect("failed to commit"); + assert!(idms_prox_write.qs_write.modify(&me_inv_m).is_ok()); + assert!(idms_prox_write.commit().is_ok()); } // Still empty idms_delayed.check_is_empty_or_panic(); @@ -2965,7 +2970,8 @@ mod tests { // Assert the delayed action queue is empty idms_delayed.check_is_empty_or_panic(); // Setup the admin with an imported unix pw. - let idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let im_pw = "{SSHA512}JwrSUHkI7FTAfHRVR6KoFlSN0E3dmaQWARjZ+/UsShYlENOqDtFVU77HJLLrY2MuSp0jve52+pwtdVl2QUAHukQ0XUf5LDtM"; let pw = Password::try_from(im_pw).expect("failed to parse"); @@ -3027,8 +3033,8 @@ mod tests { const TEST_EXPIRE_TIME: u64 = TEST_CURRENT_TIME + 120; const TEST_AFTER_EXPIRY: u64 = TEST_CURRENT_TIME + 240; - fn set_admin_valid_time(qs: &QueryServer) { - let qs_write = qs.write(duration_from_epoch_now()); + async fn set_admin_valid_time(qs: &QueryServer) { + let mut qs_write = qs.write(duration_from_epoch_now()).await; let v_valid_from = Value::new_datetime_epoch(Duration::from_secs(TEST_VALID_FROM_TIME)); let v_expire = Value::new_datetime_epoch(Duration::from_secs(TEST_EXPIRE_TIME)); @@ -3055,10 +3061,11 @@ mod tests { |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { // Any account taht is not yet valrid / expired can't auth. - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); // Set the valid bounds high/low // TEST_VALID_FROM_TIME/TEST_EXPIRE_TIME - set_admin_valid_time(qs); + task::block_on(set_admin_valid_time(qs)); let time_low = Duration::from_secs(TEST_NOT_YET_VALID_TIME); let time_high = Duration::from_secs(TEST_AFTER_EXPIRY); @@ -3114,14 +3121,16 @@ mod tests { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { // Any account that is expired can't unix auth. - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); - set_admin_valid_time(qs); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); + task::block_on(set_admin_valid_time(qs)); let time_low = Duration::from_secs(TEST_NOT_YET_VALID_TIME); let time_high = Duration::from_secs(TEST_AFTER_EXPIRY); // make the admin a valid posix account - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let me_posix = unsafe { ModifyEvent::new_internal_invalid( filter!(f_eq("name", PartialValue::new_iname("admin"))), @@ -3161,7 +3170,7 @@ mod tests { idms_auth.commit().expect("Must not fail"); // Also check the generated unix tokens are invalid. - let mut idms_prox_read = idms.proxy_read(); + let mut idms_prox_read = task::block_on(idms.proxy_read()); let uute = UnixUserTokenEvent::new_internal(UUID_ADMIN.clone()); let tok_r = idms_prox_read @@ -3187,20 +3196,22 @@ mod tests { |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { // Any account not valid/expiry should not return // a radius packet. - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); - set_admin_valid_time(qs); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); + task::block_on(set_admin_valid_time(qs)); let time_low = Duration::from_secs(TEST_NOT_YET_VALID_TIME); let time_high = Duration::from_secs(TEST_AFTER_EXPIRY); - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let rrse = RegenerateRadiusSecretEvent::new_internal(UUID_ADMIN.clone()); let _r1 = idms_prox_write .regenerate_radius_secret(&rrse) .expect("Failed to reset radius credential 1"); idms_prox_write.commit().expect("failed to commit"); - let mut idms_prox_read = idms.proxy_read(); + let mut idms_prox_read = task::block_on(idms.proxy_read()); let rate = RadiusAuthTokenEvent::new_internal(UUID_ADMIN.clone()); let tok_r = idms_prox_read.get_radiusauthtoken(&rate, time_low); @@ -3225,7 +3236,8 @@ mod tests { fn test_idm_account_softlocking() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); // Auth invalid, no softlock present. let sid = init_admin_authsession_sid( @@ -3378,7 +3390,8 @@ mod tests { fn test_idm_account_softlocking_interleaved() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); // Start an *early* auth session. let sid_early = init_admin_authsession_sid( @@ -3472,9 +3485,11 @@ mod tests { fn test_idm_account_unix_softlocking() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); // make the admin a valid posix account - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let me_posix = unsafe { ModifyEvent::new_internal_invalid( filter!(f_eq("name", PartialValue::new_iname("admin"))), @@ -3535,7 +3550,8 @@ mod tests { let ct = Duration::from_secs(TEST_CURRENT_TIME); let expiry = ct + Duration::from_secs(AUTH_SESSION_EXPIRY + 1); // Do an authenticate - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); let token = check_admin_password(idms, TEST_PASSWORD); // Clear our the session record @@ -3545,7 +3561,7 @@ mod tests { assert!(Ok(true) == r); idms_delayed.check_is_empty_or_panic(); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // Check it's valid. idms_prox_read @@ -3580,7 +3596,8 @@ mod tests { assert!(post_grace < expiry); // Do an authenticate - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); let token = check_admin_password(idms, TEST_PASSWORD); // Process the session info. @@ -3596,7 +3613,7 @@ mod tests { .expect("Embedded jwk not found"); let uat_inner = uat_inner.into_inner(); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // Check it's valid. idms_prox_read @@ -3611,14 +3628,14 @@ mod tests { drop(idms_prox_read); // Mark the session as invalid now. - let idms_prox_write = idms.proxy_write(ct.clone()); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct.clone())); let dte = DestroySessionTokenEvent::new_internal(uat_inner.uuid, uat_inner.session_id); assert!(idms_prox_write.account_destroy_session_token(&dte).is_ok()); assert!(idms_prox_write.commit().is_ok()); // Now check again with the session destroyed. - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // Now, within gracewindow, it's still valid. idms_prox_read @@ -3642,7 +3659,7 @@ mod tests { idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { let ct = Duration::from_secs(TEST_CURRENT_TIME); - let mut idms_prox_write = idms.proxy_write(ct.clone()); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct.clone())); // get an account. let account = idms_prox_write @@ -3746,7 +3763,8 @@ mod tests { |qs: &QueryServer, idms: &IdmServer, idms_delayed: &mut IdmServerDelayed| { let ct = Duration::from_secs(TEST_CURRENT_TIME); - init_admin_w_password(qs, TEST_PASSWORD).expect("Failed to setup admin account"); + task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) + .expect("Failed to setup admin account"); let token = check_admin_password(idms, TEST_PASSWORD); // Clear the session record @@ -3754,7 +3772,7 @@ mod tests { assert!(matches!(da, DelayedAction::AuthSessionRecord(_))); idms_delayed.check_is_empty_or_panic(); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); // Check it's valid. idms_prox_read @@ -3768,7 +3786,7 @@ mod tests { // // fernet_private_key_str // es256_private_key_der - let idms_prox_write = idms.proxy_write(ct.clone()); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct.clone())); let me_reset_tokens = unsafe { ModifyEvent::new_internal_invalid( filter!(f_eq("uuid", PartialValue::new_uuid(UUID_DOMAIN_INFO))), @@ -3789,7 +3807,7 @@ mod tests { assert!(matches!(da, DelayedAction::AuthSessionRecord(_))); idms_delayed.check_is_empty_or_panic(); - let idms_prox_read = idms.proxy_read(); + let idms_prox_read = task::block_on(idms.proxy_read()); assert!(idms_prox_read .validate_and_parse_token_to_ident(Some(token.as_str()), ct) .is_err()); @@ -3807,7 +3825,7 @@ mod tests { idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { let ct = Duration::from_secs(TEST_CURRENT_TIME); - let idms_prox_write = idms.proxy_write(ct.clone()); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct.clone())); let ident = Identity::from_internal(); let target_uuid = Uuid::new_v4(); diff --git a/kanidmd/lib/src/idm/serviceaccount.rs b/kanidmd/lib/src/idm/serviceaccount.rs index c3590196d..b23d82d8f 100644 --- a/kanidmd/lib/src/idm/serviceaccount.rs +++ b/kanidmd/lib/src/idm/serviceaccount.rs @@ -188,7 +188,7 @@ impl DestroyApiTokenEvent { impl<'a> IdmServerProxyWriteTransaction<'a> { pub fn service_account_generate_api_token( - &self, + &mut self, gte: &GenerateApiTokenEvent, ct: Duration, ) -> Result { @@ -277,7 +277,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { } pub fn service_account_destroy_api_token( - &self, + &mut self, dte: &DestroyApiTokenEvent, ) -> Result<(), OperationError> { // Delete the attribute with uuid. @@ -379,6 +379,8 @@ mod tests { use crate::event::CreateEvent; use crate::idm::server::IdmServerTransaction; + use async_std::task; + const TEST_CURRENT_TIME: u64 = 6000; #[test] @@ -390,7 +392,7 @@ mod tests { let past_grc = Duration::from_secs(TEST_CURRENT_TIME + 1) + GRACE_WINDOW; let exp = Duration::from_secs(TEST_CURRENT_TIME + 6000); let post_exp = Duration::from_secs(TEST_CURRENT_TIME + 6010); - let idms_prox_write = idms.proxy_write(ct); + let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let testaccount_uuid = Uuid::new_v4(); diff --git a/kanidmd/lib/src/ldap.rs b/kanidmd/lib/src/ldap.rs index 927fce081..c6d2e6f56 100644 --- a/kanidmd/lib/src/ldap.rs +++ b/kanidmd/lib/src/ldap.rs @@ -61,7 +61,7 @@ pub struct LdapServer { impl LdapServer { pub fn new(idms: &IdmServer) -> Result { // let ct = duration_from_epoch_now(); - let idms_prox_read = task::block_on(idms.proxy_read_async()); + let idms_prox_read = task::block_on(idms.proxy_read()); // This is the rootdse path. // get the domain_info item let domain_entry = idms_prox_read @@ -254,7 +254,7 @@ impl LdapServer { admin_info!(attr = ?k_attrs, "LDAP Search Request Mapped Attrs"); let ct = duration_from_epoch_now(); - let idm_read = idms.proxy_read_async().await; + let idm_read = idms.proxy_read().await; // Now start the txn - we need it for resolving filter components. // join the filter, with ext_filter @@ -564,7 +564,8 @@ mod tests { |_qs: &QueryServer, idms: &IdmServer, _idms_delayed: &IdmServerDelayed| { let ldaps = LdapServer::new(idms).expect("failed to start ldap"); - let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()); + let mut idms_prox_write = + task::block_on(idms.proxy_write(duration_from_epoch_now())); // make the admin a valid posix account let me_posix = unsafe { ModifyEvent::new_internal_invalid( @@ -755,7 +756,8 @@ mod tests { ("ssh_publickey", Value::new_sshkey_str("test", ssh_ed25519)) ); - let server_txn = idms.proxy_write(duration_from_epoch_now()); + let mut server_txn = + task::block_on(idms.proxy_write(duration_from_epoch_now())); let ce = CreateEvent::new_internal(vec![e1]); assert!(server_txn .qs_write @@ -925,7 +927,7 @@ mod tests { let ct = duration_from_epoch_now(); - let server_txn = idms.proxy_write(ct); + let mut server_txn = task::block_on(idms.proxy_write(ct)); let ce = CreateEvent::new_internal(vec![e1, e2]); assert!(server_txn.qs_write.create(&ce).is_ok()); diff --git a/kanidmd/lib/src/lib.rs b/kanidmd/lib/src/lib.rs index 221c18d25..9741a2820 100644 --- a/kanidmd/lib/src/lib.rs +++ b/kanidmd/lib/src/lib.rs @@ -52,6 +52,8 @@ mod repl; pub mod schema; pub mod server; pub mod status; +#[cfg(test)] +mod testkit; /// A prelude of imports that should be imported by all other Kanidm modules to /// help make imports cleaner. @@ -89,4 +91,7 @@ pub mod prelude { ValueSetSecret, ValueSetSpn, ValueSetSyntax, ValueSetT, ValueSetUint32, ValueSetUtf8, ValueSetUuid, }; + + #[cfg(test)] + pub use kanidmd_lib_macros::*; } diff --git a/kanidmd/lib/src/macros.rs b/kanidmd/lib/src/macros.rs index 217cf4fab..f1f333400 100644 --- a/kanidmd/lib/src/macros.rs +++ b/kanidmd/lib/src/macros.rs @@ -12,15 +12,13 @@ macro_rules! setup_test { Backend::new(BackendConfig::new_test(), idxmeta, false).expect("Failed to init BE"); let qs = QueryServer::new(be, schema_outer, "example.com".to_string()); - qs.initialise_helper(duration_from_epoch_now()) + async_std::task::block_on(qs.initialise_helper(duration_from_epoch_now())) .expect("init failed!"); qs }}; ( $preload_entries:expr ) => {{ - use async_std::task; - use crate::utils::duration_from_epoch_now; let _ = sketching::test_init(); @@ -28,18 +26,18 @@ macro_rules! setup_test { // Create an in memory BE let schema_outer = Schema::new().expect("Failed to init schema"); let idxmeta = { - let schema_txn = schema_outer.write_blocking(); + let schema_txn = schema_outer.write(); schema_txn.reload_idxmeta() }; let be = Backend::new(BackendConfig::new_test(), idxmeta, false).expect("Failed to init BE"); let qs = QueryServer::new(be, schema_outer, "example.com".to_string()); - qs.initialise_helper(duration_from_epoch_now()) + async_std::task::block_on(qs.initialise_helper(duration_from_epoch_now())) .expect("init failed!"); if !$preload_entries.is_empty() { - let qs_write = task::block_on(qs.write_async(duration_from_epoch_now())); + let mut qs_write = async_std::task::block_on(qs.write(duration_from_epoch_now())); qs_write .internal_create($preload_entries) .expect("Failed to preload entries"); @@ -49,60 +47,6 @@ macro_rules! setup_test { }}; } -#[cfg(test)] -macro_rules! run_test_no_init { - ($test_fn:expr) => {{ - use crate::be::{Backend, BackendConfig}; - use crate::prelude::*; - use crate::schema::Schema; - use crate::utils::duration_from_epoch_now; - - let _ = sketching::test_init(); - - let schema_outer = Schema::new().expect("Failed to init schema"); - let idxmeta = { - let schema_txn = schema_outer.write_blocking(); - schema_txn.reload_idxmeta() - }; - let be = match Backend::new(BackendConfig::new_test(), idxmeta, false) { - Ok(be) => be, - Err(e) => { - error!("{:?}", e); - panic!() - } - }; - let test_server = QueryServer::new(be, schema_outer, "example.com".to_string()); - - $test_fn(&test_server); - // Any needed teardown? - // Make sure there are no errors. - // let verifications = test_server.verify(); - // assert!(verifications.len() == 0); - }}; -} - -#[cfg(test)] -macro_rules! run_test { - ($test_fn:expr) => {{ - use crate::be::{Backend, BackendConfig}; - use crate::prelude::*; - use crate::schema::Schema; - #[allow(unused_imports)] - use crate::utils::duration_from_epoch_now; - - let _ = sketching::test_init(); - - let test_server = setup_test!(); - - $test_fn(&test_server); - // Any needed teardown? - // Make sure there are no errors. - let verifications = test_server.verify(); - trace!("Verification result: {:?}", verifications); - assert!(verifications.len() == 0); - }}; -} - #[cfg(test)] macro_rules! entry_str_to_account { ($entry_str:expr) => {{ @@ -155,7 +99,7 @@ macro_rules! run_idm_test_inner { $test_fn(&test_server, &test_idm_server, &mut idms_delayed); // Any needed teardown? // Make sure there are no errors. - assert!(test_server.verify().len() == 0); + assert!(async_std::task::block_on(test_server.verify()).len() == 0); idms_delayed.check_is_empty_or_panic(); }}; } @@ -207,7 +151,7 @@ macro_rules! run_create_test { }; { - let qs_write = qs.write(duration_from_epoch_now()); + let mut qs_write = async_std::task::block_on(qs.write(duration_from_epoch_now())); let r = qs_write.create(&ce); trace!("test result: {:?}", r); assert!(r == $expect); @@ -223,7 +167,7 @@ macro_rules! run_create_test { } // Make sure there are no errors. trace!("starting verification"); - let ver = qs.verify(); + let ver = async_std::task::block_on(qs.verify()); trace!("verification -> {:?}", ver); assert!(ver.len() == 0); }}; @@ -249,8 +193,8 @@ macro_rules! run_modify_test { let qs = setup_test!($preload_entries); { - let qs_write = qs.write(duration_from_epoch_now()); - $pre_hook(&qs_write); + let mut qs_write = async_std::task::block_on(qs.write(duration_from_epoch_now())); + $pre_hook(&mut qs_write); qs_write.commit().expect("commit failure!"); } @@ -262,7 +206,7 @@ macro_rules! run_modify_test { }; { - let qs_write = qs.write(duration_from_epoch_now()); + let mut qs_write = async_std::task::block_on(qs.write(duration_from_epoch_now())); let r = qs_write.modify(&me); $check(&qs_write); trace!("test result: {:?}", r); @@ -278,7 +222,7 @@ macro_rules! run_modify_test { } // Make sure there are no errors. trace!("starting verification"); - let ver = qs.verify(); + let ver = async_std::task::block_on(qs.verify()); trace!("verification -> {:?}", ver); assert!(ver.len() == 0); }}; @@ -310,10 +254,10 @@ macro_rules! run_delete_test { }; { - let qs_write = qs.write(duration_from_epoch_now()); + let mut qs_write = async_std::task::block_on(qs.write(duration_from_epoch_now())); let r = qs_write.delete(&de); trace!("test result: {:?}", r); - $check(&qs_write); + $check(&mut qs_write); assert!(r == $expect); match r { Ok(_) => { @@ -326,7 +270,7 @@ macro_rules! run_delete_test { } // Make sure there are no errors. trace!("starting verification"); - let ver = qs.verify(); + let ver = async_std::task::block_on(qs.verify()); trace!("verification -> {:?}", ver); assert!(ver.len() == 0); }}; diff --git a/kanidmd/lib/src/plugins/attrunique.rs b/kanidmd/lib/src/plugins/attrunique.rs index d74bc4891..3a58875c5 100644 --- a/kanidmd/lib/src/plugins/attrunique.rs +++ b/kanidmd/lib/src/plugins/attrunique.rs @@ -120,7 +120,7 @@ impl Plugin for AttrUnique { skip(qs, cand, _ce) )] fn pre_create_transform( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -138,7 +138,7 @@ impl Plugin for AttrUnique { #[instrument(level = "debug", name = "attrunique_pre_modify", skip(qs, cand, _me))] fn pre_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { @@ -155,7 +155,7 @@ impl Plugin for AttrUnique { } #[instrument(level = "debug", name = "attrunique_verify", skip(qs))] - fn verify(qs: &QueryServerReadTransaction) -> Vec> { + fn verify(qs: &mut QueryServerReadTransaction) -> Vec> { // Only check live entries, not recycled. let filt_in = filter!(f_pres("class")); diff --git a/kanidmd/lib/src/plugins/base.rs b/kanidmd/lib/src/plugins/base.rs index bffca82c7..5bb8c8ea3 100644 --- a/kanidmd/lib/src/plugins/base.rs +++ b/kanidmd/lib/src/plugins/base.rs @@ -33,7 +33,7 @@ impl Plugin for Base { )] #[allow(clippy::cognitive_complexity)] fn pre_create_transform( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -153,7 +153,7 @@ impl Plugin for Base { #[instrument(level = "debug", name = "base_pre_modify", skip(_qs, _cand, me))] fn pre_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, _cand: &mut Vec>, me: &ModifyEvent, ) -> Result<(), OperationError> { @@ -172,7 +172,7 @@ impl Plugin for Base { } #[instrument(level = "debug", name = "base_verify", skip(qs))] - fn verify(qs: &QueryServerReadTransaction) -> Vec> { + fn verify(qs: &mut QueryServerReadTransaction) -> Vec> { // Search for class = * let entries = match qs.internal_search(filter!(f_pres("class"))) { Ok(v) => v, diff --git a/kanidmd/lib/src/plugins/domain.rs b/kanidmd/lib/src/plugins/domain.rs index 335109869..ee8b67e2e 100644 --- a/kanidmd/lib/src/plugins/domain.rs +++ b/kanidmd/lib/src/plugins/domain.rs @@ -27,7 +27,7 @@ impl Plugin for Domain { skip(qs, cand, _ce) )] fn pre_create_transform( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -79,7 +79,7 @@ impl Plugin for Domain { #[instrument(level = "debug", name = "domain_pre_modify", skip(qs, cand, _me))] fn pre_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { @@ -129,20 +129,18 @@ impl Plugin for Domain { #[cfg(test)] mod tests { - // use crate::prelude::*; + use crate::prelude::*; // test we can create and generate the id - #[test] - fn test_domain_generate_uuid() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); - let e_dom = server_txn - .internal_search_uuid(&UUID_DOMAIN_INFO) - .expect("must not fail"); + #[qs_test] + async fn test_domain_generate_uuid(server: &QueryServer) { + let server_txn = server.write(duration_from_epoch_now()).await; + let e_dom = server_txn + .internal_search_uuid(&UUID_DOMAIN_INFO) + .expect("must not fail"); - let u_dom = server_txn.get_domain_uuid(); + let u_dom = server_txn.get_domain_uuid(); - assert!(e_dom.attribute_equality("domain_uuid", &PartialValue::new_uuid(u_dom))); - }) + assert!(e_dom.attribute_equality("domain_uuid", &PartialValue::new_uuid(u_dom))); } } diff --git a/kanidmd/lib/src/plugins/dyngroup.rs b/kanidmd/lib/src/plugins/dyngroup.rs index 2897726b0..6f3ce7065 100644 --- a/kanidmd/lib/src/plugins/dyngroup.rs +++ b/kanidmd/lib/src/plugins/dyngroup.rs @@ -141,7 +141,7 @@ impl DynGroup { #[instrument(level = "debug", name = "dyngroup_post_create", skip(qs, cand, ce))] pub fn post_create( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], ce: &CreateEvent, ) -> Result, OperationError> { @@ -242,7 +242,7 @@ impl DynGroup { skip(qs, pre_cand, cand, me) )] pub fn post_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, pre_cand: &[Arc>], cand: &[Entry], me: &ModifyEvent, @@ -352,7 +352,7 @@ impl DynGroup { // No post_delete handler is needed as refint takes care of this for us. - pub fn verify(_qs: &QueryServerReadTransaction) -> Vec> { + pub fn verify(_qs: &mut QueryServerReadTransaction) -> Vec> { vec![] } } diff --git a/kanidmd/lib/src/plugins/failure.rs b/kanidmd/lib/src/plugins/failure.rs deleted file mode 100644 index 64651817b..000000000 --- a/kanidmd/lib/src/plugins/failure.rs +++ /dev/null @@ -1,6 +0,0 @@ -// Failure inducing plugin -// -// Designed for complex server tests, this plugin is able to look at Event -// metadata and induce failures in various stages of query server operation -// execution. The idea is that we should be able to test and assert that -// rollback events do not have negative effects on various server elements. diff --git a/kanidmd/lib/src/plugins/gidnumber.rs b/kanidmd/lib/src/plugins/gidnumber.rs index 8de7cc7d0..b26103901 100644 --- a/kanidmd/lib/src/plugins/gidnumber.rs +++ b/kanidmd/lib/src/plugins/gidnumber.rs @@ -70,7 +70,7 @@ impl Plugin for GidNumber { skip(_qs, cand, _ce) )] fn pre_create_transform( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -83,7 +83,7 @@ impl Plugin for GidNumber { #[instrument(level = "debug", name = "gidnumber_pre_modify", skip(_qs, cand, _me))] fn pre_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { diff --git a/kanidmd/lib/src/plugins/jwskeygen.rs b/kanidmd/lib/src/plugins/jwskeygen.rs index 5c537692e..bf28544a8 100644 --- a/kanidmd/lib/src/plugins/jwskeygen.rs +++ b/kanidmd/lib/src/plugins/jwskeygen.rs @@ -77,7 +77,7 @@ impl Plugin for JwsKeygen { skip(_qs, cand, _ce) )] fn pre_create_transform( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -86,7 +86,7 @@ impl Plugin for JwsKeygen { #[instrument(level = "debug", name = "jwskeygen_pre_modify", skip(_qs, cand, _me))] fn pre_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { diff --git a/kanidmd/lib/src/plugins/memberof.rs b/kanidmd/lib/src/plugins/memberof.rs index 0defe755e..b3be24fb1 100644 --- a/kanidmd/lib/src/plugins/memberof.rs +++ b/kanidmd/lib/src/plugins/memberof.rs @@ -96,7 +96,7 @@ fn do_memberof( #[allow(clippy::cognitive_complexity)] fn apply_memberof( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, // TODO: Experiment with HashSet/BTreeSet here instead of vec. // May require https://github.com/rust-lang/rust/issues/62924 to allow poping mut group_affect: Vec, @@ -212,7 +212,7 @@ impl Plugin for MemberOf { #[instrument(level = "debug", name = "memberof_post_create", skip(qs, cand, ce))] fn post_create( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -245,7 +245,7 @@ impl Plugin for MemberOf { skip(qs, pre_cand, cand, me) )] fn post_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, pre_cand: &[Arc>], cand: &[Entry], me: &ModifyEvent, @@ -310,7 +310,7 @@ impl Plugin for MemberOf { #[instrument(level = "debug", name = "memberof_post_delete", skip(qs, cand, _de))] fn post_delete( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], _de: &DeleteEvent, ) -> Result<(), OperationError> { @@ -333,7 +333,7 @@ impl Plugin for MemberOf { } #[instrument(level = "debug", name = "memberof_verify", skip(qs))] - fn verify(qs: &QueryServerReadTransaction) -> Vec> { + fn verify(qs: &mut QueryServerReadTransaction) -> Vec> { let mut r = Vec::new(); let filt_in = filter!(f_pres("class")); diff --git a/kanidmd/lib/src/plugins/mod.rs b/kanidmd/lib/src/plugins/mod.rs index f38597ca4..e52f92213 100644 --- a/kanidmd/lib/src/plugins/mod.rs +++ b/kanidmd/lib/src/plugins/mod.rs @@ -15,13 +15,11 @@ mod attrunique; mod base; mod domain; pub(crate) mod dyngroup; -mod failure; mod gidnumber; mod jwskeygen; mod memberof; mod password_import; mod protected; -mod recycle; mod refint; mod spn; @@ -29,7 +27,7 @@ trait Plugin { fn id() -> &'static str; fn pre_create_transform( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, _cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -41,7 +39,7 @@ trait Plugin { } fn pre_create( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // List of what we will commit that is valid? _cand: &[Entry], _ce: &CreateEvent, @@ -51,7 +49,7 @@ trait Plugin { } fn post_create( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // List of what we commited that was valid? _cand: &[Entry], _ce: &CreateEvent, @@ -61,7 +59,7 @@ trait Plugin { } fn pre_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, _cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { @@ -70,7 +68,7 @@ trait Plugin { } fn post_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // List of what we modified that was valid? _pre_cand: &[Arc>], _cand: &[Entry], @@ -81,7 +79,7 @@ trait Plugin { } fn pre_delete( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, _cand: &mut Vec>, _de: &DeleteEvent, ) -> Result<(), OperationError> { @@ -90,7 +88,7 @@ trait Plugin { } fn post_delete( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // List of what we delete that was valid? _cand: &[Entry], _ce: &DeleteEvent, @@ -99,7 +97,7 @@ trait Plugin { Err(OperationError::InvalidState) } - fn verify(_qs: &QueryServerReadTransaction) -> Vec> { + fn verify(_qs: &mut QueryServerReadTransaction) -> Vec> { admin_error!("plugin {} has an unimplemented verify!", Self::id()); vec![Err(ConsistencyError::Unknown)] } @@ -121,7 +119,7 @@ macro_rules! run_verify_plugin { impl Plugins { #[instrument(level = "debug", name = "plugins::run_pre_create_transform", skip_all)] pub fn run_pre_create_transform( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -137,7 +135,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_pre_create", skip_all)] pub fn run_pre_create( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -146,7 +144,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_post_create", skip_all)] pub fn run_post_create( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -156,7 +154,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_pre_modify", skip_all)] pub fn run_pre_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, me: &ModifyEvent, ) -> Result<(), OperationError> { @@ -173,7 +171,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_post_modify", skip_all)] pub fn run_post_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, pre_cand: &[Arc>], cand: &[Entry], me: &ModifyEvent, @@ -185,7 +183,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_pre_delete", skip_all)] pub fn run_pre_delete( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, de: &DeleteEvent, ) -> Result<(), OperationError> { @@ -194,7 +192,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_post_delete", skip_all)] pub fn run_post_delete( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], de: &DeleteEvent, ) -> Result<(), OperationError> { @@ -204,7 +202,7 @@ impl Plugins { #[instrument(level = "debug", name = "plugins::run_verify", skip_all)] pub fn run_verify( - qs: &QueryServerReadTransaction, + qs: &mut QueryServerReadTransaction, results: &mut Vec>, ) { run_verify_plugin!(qs, results, base::Base); diff --git a/kanidmd/lib/src/plugins/password_import.rs b/kanidmd/lib/src/plugins/password_import.rs index 50a1cd2bd..aa27abef1 100644 --- a/kanidmd/lib/src/plugins/password_import.rs +++ b/kanidmd/lib/src/plugins/password_import.rs @@ -22,7 +22,7 @@ impl Plugin for PasswordImport { skip(_qs, cand, _ce) )] fn pre_create_transform( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -70,7 +70,7 @@ impl Plugin for PasswordImport { skip(_qs, cand, _me) )] fn pre_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { diff --git a/kanidmd/lib/src/plugins/protected.rs b/kanidmd/lib/src/plugins/protected.rs index 3fe74d6b2..6e6233371 100644 --- a/kanidmd/lib/src/plugins/protected.rs +++ b/kanidmd/lib/src/plugins/protected.rs @@ -39,7 +39,7 @@ impl Plugin for Protected { #[instrument(level = "debug", name = "protected_pre_create", skip(_qs, cand, ce))] fn pre_create( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // List of what we will commit that is valid? cand: &[Entry], ce: &CreateEvent, @@ -67,7 +67,7 @@ impl Plugin for Protected { #[instrument(level = "debug", name = "protected_pre_modify", skip(_qs, cand, me))] fn pre_modify( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // Should these be EntrySealed? cand: &mut Vec>, me: &ModifyEvent, @@ -138,7 +138,7 @@ impl Plugin for Protected { #[instrument(level = "debug", name = "protected_pre_delete", skip(_qs, cand, de))] fn pre_delete( - _qs: &QueryServerWriteTransaction, + _qs: &mut QueryServerWriteTransaction, // Should these be EntrySealed cand: &mut Vec>, de: &DeleteEvent, diff --git a/kanidmd/lib/src/plugins/recycle.rs b/kanidmd/lib/src/plugins/recycle.rs deleted file mode 100644 index bd5b73c1a..000000000 --- a/kanidmd/lib/src/plugins/recycle.rs +++ /dev/null @@ -1,2 +0,0 @@ -// Don't allow setting class = recycle/tombstone during any -// operation unless internal == true OR delete. diff --git a/kanidmd/lib/src/plugins/refint.rs b/kanidmd/lib/src/plugins/refint.rs index 06f130ea2..ae0f1595a 100644 --- a/kanidmd/lib/src/plugins/refint.rs +++ b/kanidmd/lib/src/plugins/refint.rs @@ -84,7 +84,7 @@ impl Plugin for ReferentialIntegrity { // be in cand AND db" to simply "is it in the DB?". #[instrument(level = "debug", name = "refint_post_create", skip(qs, cand, _ce))] fn post_create( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -125,7 +125,7 @@ impl Plugin for ReferentialIntegrity { skip(qs, _pre_cand, _cand, me) )] fn post_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, _pre_cand: &[Arc>], _cand: &[Entry], me: &ModifyEvent, @@ -165,7 +165,7 @@ impl Plugin for ReferentialIntegrity { #[instrument(level = "debug", name = "refint_post_delete", skip(qs, cand, _ce))] fn post_delete( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &[Entry], _ce: &DeleteEvent, ) -> Result<(), OperationError> { @@ -217,7 +217,7 @@ impl Plugin for ReferentialIntegrity { } #[instrument(level = "debug", name = "verify", skip(qs))] - fn verify(qs: &QueryServerReadTransaction) -> Vec> { + fn verify(qs: &mut QueryServerReadTransaction) -> Vec> { // Get all entries as cand // build a cand-uuid set let filt_in = filter_all!(f_pres("class")); @@ -599,7 +599,7 @@ mod tests { Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap() )]), None, - |qs: &QueryServerWriteTransaction| { + |qs: &mut QueryServerWriteTransaction| { // Any pre_hooks we need. In this case, we need to trigger the delete of testgroup_a let de_sin = unsafe { crate::event::DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq( @@ -647,7 +647,7 @@ mod tests { preload, filter!(f_eq("name", PartialValue::new_iname("testgroup_a"))), None, - |_qs: &QueryServerWriteTransaction| {} + |_qs: &mut QueryServerWriteTransaction| {} ); } @@ -689,7 +689,7 @@ mod tests { preload, filter!(f_eq("name", PartialValue::new_iname("testgroup_b"))), None, - |_qs: &QueryServerWriteTransaction| {} + |_qs: &mut QueryServerWriteTransaction| {} ); } @@ -715,7 +715,7 @@ mod tests { preload, filter!(f_eq("name", PartialValue::new_iname("testgroup_b"))), None, - |_qs: &QueryServerWriteTransaction| {} + |_qs: &mut QueryServerWriteTransaction| {} ); } @@ -761,7 +761,7 @@ mod tests { preload, filter!(f_eq("name", PartialValue::new_iname("testgroup"))), None, - |qs: &QueryServerWriteTransaction| { + |qs: &mut QueryServerWriteTransaction| { let cands = qs .internal_search(filter!(f_eq( "oauth2_rs_name", diff --git a/kanidmd/lib/src/plugins/spn.rs b/kanidmd/lib/src/plugins/spn.rs index fa4824c3b..c257ce030 100644 --- a/kanidmd/lib/src/plugins/spn.rs +++ b/kanidmd/lib/src/plugins/spn.rs @@ -25,7 +25,7 @@ impl Plugin for Spn { skip(qs, cand, _ce) )] fn pre_create_transform( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _ce: &CreateEvent, ) -> Result<(), OperationError> { @@ -60,7 +60,7 @@ impl Plugin for Spn { #[instrument(level = "debug", name = "spn_pre_modify", skip(qs, cand, _me))] fn pre_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, cand: &mut Vec>, _me: &ModifyEvent, ) -> Result<(), OperationError> { @@ -95,7 +95,7 @@ impl Plugin for Spn { skip(qs, pre_cand, cand, _ce) )] fn post_modify( - qs: &QueryServerWriteTransaction, + qs: &mut QueryServerWriteTransaction, // List of what we modified that was valid? pre_cand: &[Arc>], cand: &[Entry], @@ -138,7 +138,7 @@ impl Plugin for Spn { } #[instrument(level = "debug", name = "spn_verify", skip(qs))] - fn verify(qs: &QueryServerReadTransaction) -> Vec> { + fn verify(qs: &mut QueryServerReadTransaction) -> Vec> { // Verify that all items with spn's have valid spns. // We need to consider the case that an item has a different origin domain too, // so we should be able to verify that *those* spns validate to the trusted domain info @@ -313,42 +313,40 @@ mod tests { ); } - #[test] - fn test_spn_regen_domain_rename() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); + #[qs_test] + async fn test_spn_regen_domain_rename(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; - let ex1 = Value::new_spn_str("admin", "example.com"); - let ex2 = Value::new_spn_str("admin", "new.example.com"); - // get the current domain name - // check the spn on admin is admin@ - let e_pre = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("must not fail"); + let ex1 = Value::new_spn_str("admin", "example.com"); + let ex2 = Value::new_spn_str("admin", "new.example.com"); + // get the current domain name + // check the spn on admin is admin@ + let e_pre = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("must not fail"); - let e_pre_spn = e_pre.get_ava_single("spn").expect("must not fail"); - assert!(e_pre_spn == ex1); + let e_pre_spn = e_pre.get_ava_single("spn").expect("must not fail"); + assert!(e_pre_spn == ex1); - // trigger the domain_name change (this will be a cli option to the server - // in the final version), but it will still call the same qs function to perform the - // change. - unsafe { - server_txn - .domain_rename_inner("new.example.com") - .expect("should not fail!"); - } + // trigger the domain_name change (this will be a cli option to the server + // in the final version), but it will still call the same qs function to perform the + // change. + unsafe { + server_txn + .domain_rename_inner("new.example.com") + .expect("should not fail!"); + } - // check the spn on admin is admin@ - let e_post = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("must not fail"); + // check the spn on admin is admin@ + let e_post = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("must not fail"); - let e_post_spn = e_post.get_ava_single("spn").expect("must not fail"); - debug!("{:?}", e_post_spn); - debug!("{:?}", ex2); - assert!(e_post_spn == ex2); + let e_post_spn = e_post.get_ava_single("spn").expect("must not fail"); + debug!("{:?}", e_post_spn); + debug!("{:?}", ex2); + assert!(e_post_spn == ex2); - server_txn.commit().expect("Must not fail"); - }); + server_txn.commit().expect("Must not fail"); } } diff --git a/kanidmd/lib/src/schema.rs b/kanidmd/lib/src/schema.rs index def2f3831..e3e46552c 100644 --- a/kanidmd/lib/src/schema.rs +++ b/kanidmd/lib/src/schema.rs @@ -1672,9 +1672,8 @@ mod tests { #[test] fn test_schema_attribute_from_entry() { - run_test!(|_qs: &QueryServer| { - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1682,11 +1681,11 @@ mod tests { "uuid": ["66c68b2f-d02c-4243-8013-7946e40fe321"] } }"#, - SchemaAttribute - ); + SchemaAttribute + ); - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1697,11 +1696,11 @@ mod tests { "syntax": ["UTF8STRING"] } }"#, - SchemaAttribute - ); + SchemaAttribute + ); - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1713,11 +1712,11 @@ mod tests { "syntax": ["UTF8STRING"] } }"#, - SchemaAttribute - ); + SchemaAttribute + ); - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1729,11 +1728,11 @@ mod tests { "syntax": ["UTF8STRING"] } }"#, - SchemaAttribute - ); + SchemaAttribute + ); - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1745,12 +1744,12 @@ mod tests { "syntax": ["TNEOUNTUH"] } }"#, - SchemaAttribute - ); + SchemaAttribute + ); - // Index is allowed to be empty - sch_from_entry_ok!( - r#"{ + // Index is allowed to be empty + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1761,12 +1760,12 @@ mod tests { "syntax": ["UTF8STRING"] } }"#, - SchemaAttribute - ); + SchemaAttribute + ); - // Index present - sch_from_entry_ok!( - r#"{ + // Index present + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "attributetype"], "attributename": ["schema_attr_test"], @@ -1778,27 +1777,25 @@ mod tests { "syntax": ["UTF8STRING"] } }"#, - SchemaAttribute - ); - }); + SchemaAttribute + ); } #[test] fn test_schema_class_from_entry() { - run_test!(|_qs: &QueryServer| { - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object", "classtype"], "classname": ["schema_class_test"], "uuid": ["66c68b2f-d02c-4243-8013-7946e40fe321"] } }"#, - SchemaClass - ); + SchemaClass + ); - sch_from_entry_err!( - r#"{ + sch_from_entry_err!( + r#"{ "attrs": { "class": ["object"], "classname": ["schema_class_test"], @@ -1806,12 +1803,12 @@ mod tests { "uuid": ["66c68b2f-d02c-4243-8013-7946e40fe321"] } }"#, - SchemaClass - ); + SchemaClass + ); - // Classes can be valid with no attributes provided. - sch_from_entry_ok!( - r#"{ + // Classes can be valid with no attributes provided. + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "classtype"], "classname": ["schema_class_test"], @@ -1819,12 +1816,12 @@ mod tests { "uuid": ["66c68b2f-d02c-4243-8013-7946e40fe321"] } }"#, - SchemaClass - ); + SchemaClass + ); - // Classes with various may/must - sch_from_entry_ok!( - r#"{ + // Classes with various may/must + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "classtype"], "classname": ["schema_class_test"], @@ -1833,11 +1830,11 @@ mod tests { "systemmust": ["d"] } }"#, - SchemaClass - ); + SchemaClass + ); - sch_from_entry_ok!( - r#"{ + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "classtype"], "classname": ["schema_class_test"], @@ -1846,11 +1843,11 @@ mod tests { "systemmay": ["c"] } }"#, - SchemaClass - ); + SchemaClass + ); - sch_from_entry_ok!( - r#"{ + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "classtype"], "classname": ["schema_class_test"], @@ -1860,11 +1857,11 @@ mod tests { "must": ["b"] } }"#, - SchemaClass - ); + SchemaClass + ); - sch_from_entry_ok!( - r#"{ + sch_from_entry_ok!( + r#"{ "attrs": { "class": ["object", "classtype"], "classname": ["schema_class_test"], @@ -1876,9 +1873,8 @@ mod tests { "systemmust": ["d"] } }"#, - SchemaClass - ); - }); + SchemaClass + ); } #[test] diff --git a/kanidmd/lib/src/server.rs b/kanidmd/lib/src/server.rs index 1fd9accb9..63d69c996 100644 --- a/kanidmd/lib/src/server.rs +++ b/kanidmd/lib/src/server.rs @@ -7,7 +7,6 @@ use std::cell::Cell; use std::sync::Arc; use std::time::Duration; -use async_std::task; use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn}; use concread::cowcell::*; use hashbrown::{HashMap, HashSet}; @@ -821,7 +820,7 @@ impl<'a> QueryServerReadTransaction<'a> { // Verify the data content of the server is as expected. This will probably // call various functions for validation, including possibly plugin // verifications. - fn verify(&self) -> Vec> { + fn verify(&mut self) -> Vec> { // If we fail after backend, we need to return NOW because we can't // assert any other faith in the DB states. // * backend @@ -977,18 +976,13 @@ impl QueryServer { } } - #[cfg(test)] - pub fn read(&self) -> QueryServerReadTransaction { - task::block_on(self.read_async()) - } - pub fn try_quiesce(&self) { self.be.try_quiesce(); self.accesscontrols.try_quiesce(); self.resolve_filter_cache.try_quiesce(); } - pub async fn read_async(&self) -> QueryServerReadTransaction<'_> { + pub async fn read(&self) -> QueryServerReadTransaction<'_> { // We need to ensure a db conn will be available #[allow(clippy::expect_used)] let db_ticket = self @@ -1007,13 +1001,7 @@ impl QueryServer { } } - #[cfg(test)] - pub fn write(&self, ts: Duration) -> QueryServerWriteTransaction { - // Feed the current schema index metadata to the be write transaction. - task::block_on(self.write_async(ts)) - } - - pub async fn write_async(&self, ts: Duration) -> QueryServerWriteTransaction<'_> { + pub async fn write(&self, ts: Duration) -> QueryServerWriteTransaction<'_> { // Guarantee we are the only writer on the thread pool #[allow(clippy::expect_used)] let write_ticket = self @@ -1029,7 +1017,6 @@ impl QueryServer { .await .expect("unable to aquire db_ticket for qsw"); - // let schema_write = self.schema.write().await; let schema_write = self.schema.write(); let be_txn = self.be.write(); let d_info = self.d_info.write(); @@ -1065,7 +1052,7 @@ impl QueryServer { } } - pub fn initialise_helper(&self, ts: Duration) -> Result<(), OperationError> { + pub async fn initialise_helper(&self, ts: Duration) -> Result<(), OperationError> { // Check our database version - attempt to do an initial indexing // based on the in memory configuration // @@ -1076,7 +1063,7 @@ impl QueryServer { // A major reason here to split to multiple transactions is to allow schema // reloading to occur, which causes the idxmeta to update, and allows validation // of the schema in the subsequent steps as we proceed. - let reindex_write_1 = task::block_on(self.write_async(ts)); + let mut reindex_write_1 = self.write(ts).await; reindex_write_1 .upgrade_reindex(SYSTEM_INDEX_VERSION) .and_then(|_| reindex_write_1.commit())?; @@ -1090,19 +1077,19 @@ impl QueryServer { // the schema to tell us what's indexed), but because we have the in // mem schema that defines how schema is structuded, and this is all // marked "system", then we won't have an issue here. - let ts_write_1 = task::block_on(self.write_async(ts)); + let mut ts_write_1 = self.write(ts).await; ts_write_1 .initialise_schema_core() .and_then(|_| ts_write_1.commit())?; - let ts_write_2 = task::block_on(self.write_async(ts)); + let mut ts_write_2 = self.write(ts).await; ts_write_2 .initialise_schema_idm() .and_then(|_| ts_write_2.commit())?; // reindex and set to version + 1, this way when we bump the version // we are essetially pushing this version id back up to step write_1 - let reindex_write_2 = task::block_on(self.write_async(ts)); + let mut reindex_write_2 = self.write(ts).await; reindex_write_2 .upgrade_reindex(SYSTEM_INDEX_VERSION + 1) .and_then(|_| reindex_write_2.commit())?; @@ -1112,7 +1099,7 @@ impl QueryServer { // // A side effect of these reloads is that other plugins or elements that reload // on schema change are now setup. - let mut slope_reload = task::block_on(self.write_async(ts)); + let mut slope_reload = self.write(ts).await; slope_reload.set_phase(ServerPhase::SchemaReady); slope_reload.force_schema_reload(); slope_reload.commit()?; @@ -1125,7 +1112,7 @@ impl QueryServer { // the indexing subsystem is schema/value agnostic - the fact the values still let their keys // be extracted, means that the pres indexes will be valid even though the entries are pending // migration. We must be sure to NOT use EQ/SUB indexes in the migration code however! - let migrate_txn = task::block_on(self.write_async(ts)); + let mut migrate_txn = self.write(ts).await; // If we are "in the process of being setup" this is 0, and the migrations will have no // effect as ... there is nothing to migrate! It allows reset of the version to 0 to force // db migrations to take place. @@ -1167,7 +1154,7 @@ impl QueryServer { migrate_txn.commit()?; // Migrations complete. Init idm will now set the version as needed. - let mut ts_write_3 = task::block_on(self.write_async(ts)); + let mut ts_write_3 = self.write(ts).await; ts_write_3.initialise_idm().and_then(|_| { ts_write_3.set_phase(ServerPhase::Running); ts_write_3.commit() @@ -1177,15 +1164,15 @@ impl QueryServer { Ok(()) } - pub fn verify(&self) -> Vec> { - let r_txn = task::block_on(self.read_async()); + pub async fn verify(&self) -> Vec> { + let mut r_txn = self.read().await; r_txn.verify() } } impl<'a> QueryServerWriteTransaction<'a> { #[instrument(level = "debug", skip_all)] - pub fn create(&self, ce: &CreateEvent) -> Result<(), OperationError> { + pub fn create(&mut self, ce: &CreateEvent) -> Result<(), OperationError> { // The create event is a raw, read only representation of the request // that was made to us, including information about the identity // performing the request. @@ -1336,7 +1323,7 @@ impl<'a> QueryServerWriteTransaction<'a> { #[allow(clippy::cognitive_complexity)] #[instrument(level = "debug", skip_all)] - pub fn delete(&self, de: &DeleteEvent) -> Result<(), OperationError> { + pub fn delete(&mut self, de: &DeleteEvent) -> Result<(), OperationError> { // Do you have access to view all the set members? Reduce based on your // read permissions and attrs // THIS IS PRETTY COMPLEX SEE THE DESIGN DOC @@ -1544,7 +1531,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } #[instrument(level = "debug", skip_all)] - pub fn revive_recycled(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { + pub fn revive_recycled(&mut self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { // Revive an entry to live. This is a specialised function, and draws a lot of // inspiration from modify. // @@ -1694,7 +1681,10 @@ impl<'a> QueryServerWriteTransaction<'a> { } #[instrument(level = "debug", skip_all)] - pub fn revive_recycled_legacy(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { + pub fn revive_recycled_legacy( + &mut self, + re: &ReviveRecycledEvent, + ) -> Result<(), OperationError> { // Revive an entry to live. This is a specialised function, and draws a lot of // inspiration from modify. // @@ -1765,7 +1755,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// and call multiple pre-applies at the same time, else you can cause DB corruption. #[instrument(level = "debug", skip_all)] pub(crate) unsafe fn modify_pre_apply<'x>( - &self, + &mut self, me: &'x ModifyEvent, ) -> Result>, OperationError> { // Get the candidates. @@ -1886,7 +1876,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } #[instrument(level = "debug", skip_all)] - pub(crate) fn modify_apply(&self, mp: ModifyPartial<'_>) -> Result<(), OperationError> { + pub(crate) fn modify_apply(&mut self, mp: ModifyPartial<'_>) -> Result<(), OperationError> { let ModifyPartial { norm_cand, pre_candidates, @@ -1976,7 +1966,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } #[instrument(level = "debug", skip_all)] - pub fn modify(&self, me: &ModifyEvent) -> Result<(), OperationError> { + pub fn modify(&mut self, me: &ModifyEvent) -> Result<(), OperationError> { let mp = unsafe { self.modify_pre_apply(me)? }; if let Some(mp) = mp { self.modify_apply(mp) @@ -2131,7 +2121,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Migrate 2 to 3 changes the name, domain_name types from iutf8 to iname. #[instrument(level = "debug", skip_all)] - pub fn migrate_2_to_3(&self) -> Result<(), OperationError> { + pub fn migrate_2_to_3(&mut self) -> Result<(), OperationError> { admin_warn!("starting 2 to 3 migration. THIS MAY TAKE A LONG TIME!"); // Get all entries where pres name or domain_name. INCLUDE TS + RECYCLE. @@ -2203,7 +2193,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Migrate 3 to 4 - this triggers a regen of the domains security token /// as we previously did not have it in the entry. #[instrument(level = "debug", skip_all)] - pub fn migrate_3_to_4(&self) -> Result<(), OperationError> { + pub fn migrate_3_to_4(&mut self) -> Result<(), OperationError> { admin_warn!("starting 3 to 4 migration."); let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone())); let modlist = ModifyList::new_purge("domain_token_key"); @@ -2214,7 +2204,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Migrate 4 to 5 - this triggers a regen of all oauth2 RS es256 der keys /// as we previously did not generate them on entry creation. #[instrument(level = "debug", skip_all)] - pub fn migrate_4_to_5(&self) -> Result<(), OperationError> { + pub fn migrate_4_to_5(&mut self) -> Result<(), OperationError> { admin_warn!("starting 4 to 5 migration."); let filter = filter!(f_and!([ f_eq("class", (*PVCLASS_OAUTH2_RS).clone()), @@ -2228,7 +2218,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Migrate 5 to 6 - This updates the domain info item to reset the token /// keys based on the new encryption types. #[instrument(level = "debug", skip_all)] - pub fn migrate_5_to_6(&self) -> Result<(), OperationError> { + pub fn migrate_5_to_6(&mut self) -> Result<(), OperationError> { admin_warn!("starting 5 to 6 migration."); let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone())); let mut modlist = ModifyList::new_purge("domain_token_key"); @@ -2246,7 +2236,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Modify accounts that are not persons, to be service accounts so that the extension /// rules remain valid. #[instrument(level = "debug", skip_all)] - pub fn migrate_6_to_7(&self) -> Result<(), OperationError> { + pub fn migrate_6_to_7(&mut self) -> Result<(), OperationError> { admin_warn!("starting 6 to 7 migration."); let filter = filter!(f_and!([ f_eq("class", (*PVCLASS_ACCOUNT).clone()), @@ -2261,7 +2251,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// /// Touch all service accounts to trigger a regen of their es256 jws keys for api tokens #[instrument(level = "debug", skip_all)] - pub fn migrate_7_to_8(&self) -> Result<(), OperationError> { + pub fn migrate_7_to_8(&mut self) -> Result<(), OperationError> { admin_warn!("starting 7 to 8 migration."); let filter = filter!(f_eq("class", (*PVCLASS_SERVICE_ACCOUNT).clone())); let modlist = ModifyList::new_append("class", Value::new_class("service_account")); @@ -2276,7 +2266,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// /// The second change improves the current scope system to remove the implicit scope type. #[instrument(level = "debug", skip_all)] - pub fn migrate_8_to_9(&self) -> Result<(), OperationError> { + pub fn migrate_8_to_9(&mut self) -> Result<(), OperationError> { admin_warn!("starting 8 to 9 migration."); let filt = filter_all!(f_or!([ f_eq("class", PVCLASS_OAUTH2_RS.clone()), @@ -2357,7 +2347,7 @@ impl<'a> QueryServerWriteTransaction<'a> { // only, allowing certain plugin by passes etc. pub fn internal_create( - &self, + &mut self, entries: Vec>, ) -> Result<(), OperationError> { // Start the audit scope @@ -2366,7 +2356,10 @@ impl<'a> QueryServerWriteTransaction<'a> { self.create(&ce) } - pub fn internal_delete(&self, filter: &Filter) -> Result<(), OperationError> { + pub fn internal_delete( + &mut self, + filter: &Filter, + ) -> Result<(), OperationError> { let f_valid = filter .validate(self.get_schema()) .map_err(OperationError::SchemaViolation)?; @@ -2376,7 +2369,7 @@ impl<'a> QueryServerWriteTransaction<'a> { #[instrument(level = "debug", skip_all)] pub fn internal_modify( - &self, + &mut self, filter: &Filter, modlist: &ModifyList, ) -> Result<(), OperationError> { @@ -2391,7 +2384,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } pub fn impersonate_modify_valid( - &self, + &mut self, f_valid: Filter, f_intent_valid: Filter, m_valid: ModifyList, @@ -2402,7 +2395,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } pub fn impersonate_modify( - &self, + &mut self, filter: &Filter, filter_intent: &Filter, modlist: &ModifyList, @@ -2424,7 +2417,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } pub fn impersonate_modify_gen_event( - &self, + &mut self, filter: &Filter, filter_intent: &Filter, modlist: &ModifyList, @@ -2468,7 +2461,7 @@ impl<'a> QueryServerWriteTransaction<'a> { */ #[instrument(level = "debug", skip_all)] - pub fn internal_migrate_or_create_str(&self, e_str: &str) -> Result<(), OperationError> { + pub fn internal_migrate_or_create_str(&mut self, e_str: &str) -> Result<(), OperationError> { let res = Entry::from_proto_entry_str(e_str, self) /* .and_then(|e: Entry| { @@ -2483,7 +2476,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } pub fn internal_migrate_or_create( - &self, + &mut self, e: Entry, ) -> Result<(), OperationError> { // if the thing exists, ensure the set of attributes on @@ -2593,7 +2586,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } */ - pub fn initialise_schema_core(&self) -> Result<(), OperationError> { + pub fn initialise_schema_core(&mut self) -> Result<(), OperationError> { admin_debug!("initialise_schema_core -> start ..."); // Load in all the "core" schema, that we already have in "memory". let entries = self.schema.to_entries(); @@ -2615,7 +2608,7 @@ impl<'a> QueryServerWriteTransaction<'a> { r } - pub fn initialise_schema_idm(&self) -> Result<(), OperationError> { + pub fn initialise_schema_idm(&mut self) -> Result<(), OperationError> { admin_debug!("initialise_schema_idm -> start ..."); // List of IDM schemas to init. let idm_schema: Vec<&str> = vec![ @@ -2688,7 +2681,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } // This function is idempotent - pub fn initialise_idm(&self) -> Result<(), OperationError> { + pub fn initialise_idm(&mut self) -> Result<(), OperationError> { // First, check the system_info object. This stores some server information // and details. It's a pretty const thing. Also check anonymous, important to many // concepts. @@ -3056,7 +3049,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Initiate a domain display name change process. This isn't particularly scary /// because it's just a wibbly human-facing thing, not used for secure /// activities (yet) - pub fn set_domain_display_name(&self, new_domain_name: &str) -> Result<(), OperationError> { + pub fn set_domain_display_name(&mut self, new_domain_name: &str) -> Result<(), OperationError> { let modl = ModifyList::new_purge_and_set( "domain_display_name", Value::new_utf8(new_domain_name.to_string()), @@ -3068,7 +3061,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// Initiate a domain rename process. This is generally an internal function but it's /// exposed to the cli for admins to be able to initiate the process. - pub fn domain_rename(&self, new_domain_name: &str) -> Result<(), OperationError> { + pub fn domain_rename(&mut self, new_domain_name: &str) -> Result<(), OperationError> { // We can't use the d_info struct here, because this has the database version of the domain // name, not the in memory (config) version. We need to accept the domain's // new name from the caller so we can change this. @@ -3085,7 +3078,7 @@ impl<'a> QueryServerWriteTransaction<'a> { /// approached, especially if we have a domain re-name replicated to us. It could /// be that we end up needing to have this as a cow cell or similar? pub(crate) unsafe fn domain_rename_inner( - &self, + &mut self, new_domain_name: &str, ) -> Result<(), OperationError> { let modl = ModifyList::new_purge_and_set("domain_name", Value::new_iname(new_domain_name)); @@ -3105,7 +3098,7 @@ impl<'a> QueryServerWriteTransaction<'a> { self.changed_schema.set(true); } - pub(crate) fn upgrade_reindex(&self, v: i64) -> Result<(), OperationError> { + pub(crate) fn upgrade_reindex(&mut self, v: i64) -> Result<(), OperationError> { self.be_txn.upgrade_reindex(v) } @@ -3201,1129 +3194,1089 @@ mod tests { use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, ReviveRecycledEvent, SearchEvent}; use crate::prelude::*; - #[test] - fn test_qs_create_user() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); - let filt = filter!(f_eq("name", PartialValue::new_iname("testperson"))); - let admin = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("failed"); + #[qs_test] + async fn test_create_user(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; + let filt = filter!(f_eq("name", PartialValue::new_iname("testperson"))); + let admin = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("failed"); - let se1 = unsafe { SearchEvent::new_impersonate_entry(admin.clone(), filt.clone()) }; - let se2 = unsafe { SearchEvent::new_impersonate_entry(admin, filt) }; + let se1 = unsafe { SearchEvent::new_impersonate_entry(admin.clone(), filt.clone()) }; + let se2 = unsafe { SearchEvent::new_impersonate_entry(admin, filt) }; - let mut e = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("class", Value::new_class("account")), - ("name", Value::new_iname("testperson")), - ("spn", Value::new_spn_str("testperson", "example.com")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson")), - ("displayname", Value::new_utf8s("testperson")) - ); + let mut e = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("class", Value::new_class("account")), + ("name", Value::new_iname("testperson")), + ("spn", Value::new_spn_str("testperson", "example.com")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson")), + ("displayname", Value::new_utf8s("testperson")) + ); - let ce = CreateEvent::new_internal(vec![e.clone()]); + let ce = CreateEvent::new_internal(vec![e.clone()]); - let r1 = server_txn.search(&se1).expect("search failure"); - assert!(r1.is_empty()); + let r1 = server_txn.search(&se1).expect("search failure"); + assert!(r1.is_empty()); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - let r2 = server_txn.search(&se2).expect("search failure"); - debug!("--> {:?}", r2); - assert!(r2.len() == 1); + let r2 = server_txn.search(&se2).expect("search failure"); + debug!("--> {:?}", r2); + assert!(r2.len() == 1); - // We apply some member-of in the server now, so we add these before we seal. - e.add_ava("class", Value::new_class("memberof")); - e.add_ava("memberof", Value::new_refer(UUID_IDM_ALL_PERSONS)); - e.add_ava("directmemberof", Value::new_refer(UUID_IDM_ALL_PERSONS)); - e.add_ava("memberof", Value::new_refer(UUID_IDM_ALL_ACCOUNTS)); - e.add_ava("directmemberof", Value::new_refer(UUID_IDM_ALL_ACCOUNTS)); + // We apply some member-of in the server now, so we add these before we seal. + e.add_ava("class", Value::new_class("memberof")); + e.add_ava("memberof", Value::new_refer(UUID_IDM_ALL_PERSONS)); + e.add_ava("directmemberof", Value::new_refer(UUID_IDM_ALL_PERSONS)); + e.add_ava("memberof", Value::new_refer(UUID_IDM_ALL_ACCOUNTS)); + e.add_ava("directmemberof", Value::new_refer(UUID_IDM_ALL_ACCOUNTS)); - let expected = unsafe { vec![Arc::new(e.into_sealed_committed())] }; + let expected = unsafe { vec![Arc::new(e.into_sealed_committed())] }; - assert_eq!(r2, expected); + assert_eq!(r2, expected); + assert!(server_txn.commit().is_ok()); + } + + #[qs_test] + async fn test_init_idempotent_schema_core(server: &QueryServer) { + { + // Setup and abort. + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn.initialise_schema_core().is_ok()); + } + { + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn.initialise_schema_core().is_ok()); + assert!(server_txn.initialise_schema_core().is_ok()); assert!(server_txn.commit().is_ok()); - }); + } + { + // Now do it again in a new txn, but abort + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn.initialise_schema_core().is_ok()); + } + { + // Now do it again in a new txn. + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn.initialise_schema_core().is_ok()); + assert!(server_txn.commit().is_ok()); + } } - #[test] - fn test_qs_init_idempotent_schema_core() { - run_test!(|server: &QueryServer| { - { - // Setup and abort. - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn.initialise_schema_core().is_ok()); - } - { - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn.initialise_schema_core().is_ok()); - assert!(server_txn.initialise_schema_core().is_ok()); - assert!(server_txn.commit().is_ok()); - } - { - // Now do it again in a new txn, but abort - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn.initialise_schema_core().is_ok()); - } - { - // Now do it again in a new txn. - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn.initialise_schema_core().is_ok()); - assert!(server_txn.commit().is_ok()); - } - }); - } + #[qs_test] + async fn test_modify(server: &QueryServer) { + // Create an object + let mut server_txn = server.write(duration_from_epoch_now()).await; - #[test] - fn test_qs_modify() { - run_test!(|server: &QueryServer| { - // Create an object - let server_txn = server.write(duration_from_epoch_now()); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); + let e2 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson2")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63932").expect("uuid") + ), + ("description", Value::new_utf8s("testperson2")), + ("displayname", Value::new_utf8s("testperson2")) + ); - let e2 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson2")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63932").expect("uuid") - ), - ("description", Value::new_utf8s("testperson2")), - ("displayname", Value::new_utf8s("testperson2")) - ); + let ce = CreateEvent::new_internal(vec![e1.clone(), e2.clone()]); - let ce = CreateEvent::new_internal(vec![e1.clone(), e2.clone()]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + // Empty Modlist (filter is valid) + let me_emp = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_pres("class")), + ModifyList::new_list(vec![]), + ) + }; + assert!(server_txn.modify(&me_emp) == Err(OperationError::EmptyRequest)); - // Empty Modlist (filter is valid) - let me_emp = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_pres("class")), - ModifyList::new_list(vec![]), - ) - }; - assert!(server_txn.modify(&me_emp) == Err(OperationError::EmptyRequest)); - - // Mod changes no objects - let me_nochg = unsafe { - ModifyEvent::new_impersonate_entry_ser( - JSON_ADMIN_V1, - filter!(f_eq("name", PartialValue::new_iname("flarbalgarble"))), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("description"), - Value::from("anusaosu"), - )]), - ) - }; - assert!(server_txn.modify(&me_nochg) == Err(OperationError::NoMatchingEntries)); - - // Filter is invalid to schema - to check this due to changes in the way events are - // handled, we put this via the internal modify function to get the modlist - // checked for us. Normal server operation doesn't allow weird bypasses like - // this. - let r_inv_1 = server_txn.internal_modify( - &filter!(f_eq("tnanuanou", PartialValue::new_iname("Flarbalgarble"))), - &ModifyList::new_list(vec![Modify::Present( + // Mod changes no objects + let me_nochg = unsafe { + ModifyEvent::new_impersonate_entry_ser( + JSON_ADMIN_V1, + filter!(f_eq("name", PartialValue::new_iname("flarbalgarble"))), + ModifyList::new_list(vec![Modify::Present( AttrString::from("description"), Value::from("anusaosu"), )]), - ); - assert!( - r_inv_1 - == Err(OperationError::SchemaViolation( - SchemaError::InvalidAttribute("tnanuanou".to_string()) - )) - ); + ) + }; + assert!(server_txn.modify(&me_nochg) == Err(OperationError::NoMatchingEntries)); - // Mod is invalid to schema - let me_inv_m = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_pres("class")), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("htnaonu"), - Value::from("anusaosu"), - )]), - ) - }; - assert!( - server_txn.modify(&me_inv_m) - == Err(OperationError::SchemaViolation( - SchemaError::InvalidAttribute("htnaonu".to_string()) - )) - ); + // Filter is invalid to schema - to check this due to changes in the way events are + // handled, we put this via the internal modify function to get the modlist + // checked for us. Normal server operation doesn't allow weird bypasses like + // this. + let r_inv_1 = server_txn.internal_modify( + &filter!(f_eq("tnanuanou", PartialValue::new_iname("Flarbalgarble"))), + &ModifyList::new_list(vec![Modify::Present( + AttrString::from("description"), + Value::from("anusaosu"), + )]), + ); + assert!( + r_inv_1 + == Err(OperationError::SchemaViolation( + SchemaError::InvalidAttribute("tnanuanou".to_string()) + )) + ); - // Mod single object - let me_sin = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("name", PartialValue::new_iname("testperson2"))), - ModifyList::new_list(vec![ - Modify::Purged(AttrString::from("description")), - Modify::Present(AttrString::from("description"), Value::from("anusaosu")), - ]), - ) - }; - assert!(server_txn.modify(&me_sin).is_ok()); + // Mod is invalid to schema + let me_inv_m = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_pres("class")), + ModifyList::new_list(vec![Modify::Present( + AttrString::from("htnaonu"), + Value::from("anusaosu"), + )]), + ) + }; + assert!( + server_txn.modify(&me_inv_m) + == Err(OperationError::SchemaViolation( + SchemaError::InvalidAttribute("htnaonu".to_string()) + )) + ); - // Mod multiple object - let me_mult = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_or!([ - f_eq("name", PartialValue::new_iname("testperson1")), - f_eq("name", PartialValue::new_iname("testperson2")), - ])), - ModifyList::new_list(vec![ - Modify::Purged(AttrString::from("description")), - Modify::Present(AttrString::from("description"), Value::from("anusaosu")), - ]), - ) - }; - assert!(server_txn.modify(&me_mult).is_ok()); + // Mod single object + let me_sin = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("name", PartialValue::new_iname("testperson2"))), + ModifyList::new_list(vec![ + Modify::Purged(AttrString::from("description")), + Modify::Present(AttrString::from("description"), Value::from("anusaosu")), + ]), + ) + }; + assert!(server_txn.modify(&me_sin).is_ok()); - assert!(server_txn.commit().is_ok()); - }) - } - - #[test] - fn test_modify_invalid_class() { - // Test modifying an entry and adding an extra class, that would cause the entry - // to no longer conform to schema. - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); - - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - - let ce = CreateEvent::new_internal(vec![e1.clone()]); - - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); - - // Add class but no values - let me_sin = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("name", PartialValue::new_iname("testperson1"))), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("class"), - Value::new_class("system_info"), - )]), - ) - }; - assert!(server_txn.modify(&me_sin).is_err()); - - // Add multivalue where not valid - let me_sin = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("name", PartialValue::new_iname("testperson1"))), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("name"), - Value::new_iname("testpersonx"), - )]), - ) - }; - assert!(server_txn.modify(&me_sin).is_err()); - - // add class and valid values? - let me_sin = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("name", PartialValue::new_iname("testperson1"))), - ModifyList::new_list(vec![ - Modify::Present(AttrString::from("class"), Value::new_class("system_info")), - // Modify::Present("domain".to_string(), Value::new_iutf8("domain.name")), - Modify::Present(AttrString::from("version"), Value::new_uint32(1)), - ]), - ) - }; - assert!(server_txn.modify(&me_sin).is_ok()); - - // Replace a value - let me_sin = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("name", PartialValue::new_iname("testperson1"))), - ModifyList::new_list(vec![ - Modify::Purged(AttrString::from("name")), - Modify::Present(AttrString::from("name"), Value::new_iname("testpersonx")), - ]), - ) - }; - assert!(server_txn.modify(&me_sin).is_ok()); - }) - } - - #[test] - fn test_qs_delete() { - run_test!(|server: &QueryServer| { - // Create - let server_txn = server.write(duration_from_epoch_now()); - - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson")), - ("displayname", Value::new_utf8s("testperson1")) - ); - - let e2 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson2")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63932").expect("uuid") - ), - ("description", Value::new_utf8s("testperson")), - ("displayname", Value::new_utf8s("testperson2")) - ); - - let e3 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson3")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63933").expect("uuid") - ), - ("description", Value::new_utf8s("testperson")), - ("displayname", Value::new_utf8s("testperson3")) - ); - - let ce = CreateEvent::new_internal(vec![e1.clone(), e2.clone(), e3.clone()]); - - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); - - // Delete filter is syntax invalid - let de_inv = - unsafe { DeleteEvent::new_internal_invalid(filter!(f_pres("nhtoaunaoehtnu"))) }; - assert!(server_txn.delete(&de_inv).is_err()); - - // Delete deletes nothing - let de_empty = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "uuid", - PartialValue::new_uuids("cc8e95b4-c24f-4d68-ba54-000000000000").unwrap() - ))) - }; - assert!(server_txn.delete(&de_empty).is_err()); - - // Delete matches one - let de_sin = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "name", - PartialValue::new_iname("testperson3") - ))) - }; - assert!(server_txn.delete(&de_sin).is_ok()); - - // Delete matches many - let de_mult = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "description", - PartialValue::new_utf8s("testperson") - ))) - }; - assert!(server_txn.delete(&de_mult).is_ok()); - - assert!(server_txn.commit().is_ok()); - }) - } - - #[test] - fn test_qs_tombstone() { - run_test!(|server: &QueryServer| { - // First we setup some timestamps - let time_p1 = duration_from_epoch_now(); - let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); - let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); - - let server_txn = server.write(time_p1); - let admin = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("failed"); - - let filt_i_ts = filter_all!(f_eq("class", PartialValue::new_class("tombstone"))); - - // Create fake external requests. Probably from admin later - // Should we do this with impersonate instead of using the external - let me_ts = unsafe { - ModifyEvent::new_impersonate_entry( - admin.clone(), - filt_i_ts.clone(), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("class"), - Value::new_class("tombstone"), - )]), - ) - }; - - let de_ts = - unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_ts.clone()) }; - let se_ts = unsafe { SearchEvent::new_ext_impersonate_entry(admin, filt_i_ts.clone()) }; - - // First, create an entry, then push it through the lifecycle. - let e_ts = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - - let ce = CreateEvent::new_internal(vec![e_ts]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); - - let de_sin = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq( - "name", - PartialValue::new_iname("testperson1") - )]))) - }; - assert!(server_txn.delete(&de_sin).is_ok()); - - // Commit - assert!(server_txn.commit().is_ok()); - - // Now, establish enough time for the recycled items to be purged. - let server_txn = server.write(time_p2); - assert!(server_txn.purge_recycled().is_ok()); - - // Now test the tombstone properties. - - // Can it be seen (external search) - let r1 = server_txn.search(&se_ts).expect("search failed"); - assert!(r1.is_empty()); - - // Can it be deleted (external delete) - // Should be err-no candidates. - assert!(server_txn.delete(&de_ts).is_err()); - - // Can it be modified? (external modify) - // Should be err-no candidates - assert!(server_txn.modify(&me_ts).is_err()); - - // Can it be seen (internal search) - // Internal search should see it. - let r2 = server_txn - .internal_search(filt_i_ts.clone()) - .expect("internal search failed"); - assert!(r2.len() == 1); - - // If we purge now, nothing happens, we aren't past the time window. - assert!(server_txn.purge_tombstones().is_ok()); - - let r3 = server_txn - .internal_search(filt_i_ts.clone()) - .expect("internal search failed"); - assert!(r3.len() == 1); - - // Commit - assert!(server_txn.commit().is_ok()); - - // New txn, push the cid forward. - let server_txn = server.write(time_p3); - - // Now purge - assert!(server_txn.purge_tombstones().is_ok()); - - // Assert it's gone - // Internal search should not see it. - let r4 = server_txn - .internal_search(filt_i_ts) - .expect("internal search failed"); - assert!(r4.is_empty()); - - assert!(server_txn.commit().is_ok()); - }) - } - - #[test] - fn test_qs_recycle_simple() { - run_test!(|server: &QueryServer| { - // First we setup some timestamps - let time_p1 = duration_from_epoch_now(); - let time_p2 = time_p1 + Duration::from_secs(RECYCLEBIN_MAX_AGE * 2); - - let server_txn = server.write(time_p1); - let admin = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("failed"); - - let filt_i_rc = filter_all!(f_eq("class", PartialValue::new_class("recycled"))); - - let filt_i_ts = filter_all!(f_eq("class", PartialValue::new_class("tombstone"))); - - let filt_i_per = filter_all!(f_eq("class", PartialValue::new_class("person"))); - - // Create fake external requests. Probably from admin later - let me_rc = unsafe { - ModifyEvent::new_impersonate_entry( - admin.clone(), - filt_i_rc.clone(), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("class"), - Value::new_class("recycled"), - )]), - ) - }; - - let de_rc = - unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_rc.clone()) }; - - let se_rc = - unsafe { SearchEvent::new_ext_impersonate_entry(admin.clone(), filt_i_rc.clone()) }; - - let sre_rc = - unsafe { SearchEvent::new_rec_impersonate_entry(admin.clone(), filt_i_rc.clone()) }; - - let rre_rc = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin, - filter_all!(f_eq("name", PartialValue::new_iname("testperson1"))), - ) - }; - - // Create some recycled objects - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - - let e2 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson2")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63932").expect("uuid") - ), - ("description", Value::new_utf8s("testperson2")), - ("displayname", Value::new_utf8s("testperson2")) - ); - - let ce = CreateEvent::new_internal(vec![e1, e2]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); - - // Now we immediately delete these to force them to the correct state. - let de_sin = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_or!([ + // Mod multiple object + let me_mult = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_or!([ f_eq("name", PartialValue::new_iname("testperson1")), f_eq("name", PartialValue::new_iname("testperson2")), - ]))) - }; - assert!(server_txn.delete(&de_sin).is_ok()); + ])), + ModifyList::new_list(vec![ + Modify::Purged(AttrString::from("description")), + Modify::Present(AttrString::from("description"), Value::from("anusaosu")), + ]), + ) + }; + assert!(server_txn.modify(&me_mult).is_ok()); - // Can it be seen (external search) - let r1 = server_txn.search(&se_rc).expect("search failed"); - assert!(r1.is_empty()); + assert!(server_txn.commit().is_ok()); + } - // Can it be deleted (external delete) - // Should be err-no candidates. - assert!(server_txn.delete(&de_rc).is_err()); + #[qs_test] + async fn test_modify_invalid_class(server: &QueryServer) { + // Test modifying an entry and adding an extra class, that would cause the entry + // to no longer conform to schema. + let mut server_txn = server.write(duration_from_epoch_now()).await; - // Can it be modified? (external modify) - // Should be err-no candidates - assert!(server_txn.modify(&me_rc).is_err()); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); - // Can in be seen by special search? (external recycle search) - let r2 = server_txn.search(&sre_rc).expect("search failed"); - assert!(r2.len() == 2); + let ce = CreateEvent::new_internal(vec![e1.clone()]); - // Can it be seen (internal search) - // Internal search should see it. - let r2 = server_txn - .internal_search(filt_i_rc.clone()) - .expect("internal search failed"); - assert!(r2.len() == 2); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // There are now two paths forward - // revival or purge! - assert!(server_txn.revive_recycled(&rre_rc).is_ok()); + // Add class but no values + let me_sin = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("name", PartialValue::new_iname("testperson1"))), + ModifyList::new_list(vec![Modify::Present( + AttrString::from("class"), + Value::new_class("system_info"), + )]), + ) + }; + assert!(server_txn.modify(&me_sin).is_err()); - // Not enough time has passed, won't have an effect for purge to TS - assert!(server_txn.purge_recycled().is_ok()); - let r3 = server_txn - .internal_search(filt_i_rc.clone()) - .expect("internal search failed"); - assert!(r3.len() == 1); + // Add multivalue where not valid + let me_sin = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("name", PartialValue::new_iname("testperson1"))), + ModifyList::new_list(vec![Modify::Present( + AttrString::from("name"), + Value::new_iname("testpersonx"), + )]), + ) + }; + assert!(server_txn.modify(&me_sin).is_err()); - // Commit - assert!(server_txn.commit().is_ok()); + // add class and valid values? + let me_sin = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("name", PartialValue::new_iname("testperson1"))), + ModifyList::new_list(vec![ + Modify::Present(AttrString::from("class"), Value::new_class("system_info")), + // Modify::Present("domain".to_string(), Value::new_iutf8("domain.name")), + Modify::Present(AttrString::from("version"), Value::new_uint32(1)), + ]), + ) + }; + assert!(server_txn.modify(&me_sin).is_ok()); - // Now, establish enough time for the recycled items to be purged. - let server_txn = server.write(time_p2); + // Replace a value + let me_sin = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("name", PartialValue::new_iname("testperson1"))), + ModifyList::new_list(vec![ + Modify::Purged(AttrString::from("name")), + Modify::Present(AttrString::from("name"), Value::new_iname("testpersonx")), + ]), + ) + }; + assert!(server_txn.modify(&me_sin).is_ok()); + } - // purge to tombstone, now that time has passed. - assert!(server_txn.purge_recycled().is_ok()); + #[qs_test] + async fn test_delete(server: &QueryServer) { + // Create + let mut server_txn = server.write(duration_from_epoch_now()).await; - // Should be no recycled objects. - let r4 = server_txn - .internal_search(filt_i_rc.clone()) - .expect("internal search failed"); - assert!(r4.is_empty()); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson")), + ("displayname", Value::new_utf8s("testperson1")) + ); - // There should be one tombstone - let r5 = server_txn - .internal_search(filt_i_ts.clone()) - .expect("internal search failed"); - assert!(r5.len() == 1); + let e2 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson2")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63932").expect("uuid") + ), + ("description", Value::new_utf8s("testperson")), + ("displayname", Value::new_utf8s("testperson2")) + ); - // There should be one entry - let r6 = server_txn - .internal_search(filt_i_per.clone()) - .expect("internal search failed"); - assert!(r6.len() == 1); + let e3 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson3")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63933").expect("uuid") + ), + ("description", Value::new_utf8s("testperson")), + ("displayname", Value::new_utf8s("testperson3")) + ); - assert!(server_txn.commit().is_ok()); - }) + let ce = CreateEvent::new_internal(vec![e1.clone(), e2.clone(), e3.clone()]); + + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); + + // Delete filter is syntax invalid + let de_inv = + unsafe { DeleteEvent::new_internal_invalid(filter!(f_pres("nhtoaunaoehtnu"))) }; + assert!(server_txn.delete(&de_inv).is_err()); + + // Delete deletes nothing + let de_empty = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "uuid", + PartialValue::new_uuids("cc8e95b4-c24f-4d68-ba54-000000000000").unwrap() + ))) + }; + assert!(server_txn.delete(&de_empty).is_err()); + + // Delete matches one + let de_sin = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "name", + PartialValue::new_iname("testperson3") + ))) + }; + assert!(server_txn.delete(&de_sin).is_ok()); + + // Delete matches many + let de_mult = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "description", + PartialValue::new_utf8s("testperson") + ))) + }; + assert!(server_txn.delete(&de_mult).is_ok()); + + assert!(server_txn.commit().is_ok()); + } + + #[qs_test] + async fn test_tombstone(server: &QueryServer) { + // First we setup some timestamps + let time_p1 = duration_from_epoch_now(); + let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); + let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); + + let mut server_txn = server.write(time_p1).await; + let admin = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("failed"); + + let filt_i_ts = filter_all!(f_eq("class", PartialValue::new_class("tombstone"))); + + // Create fake external requests. Probably from admin later + // Should we do this with impersonate instead of using the external + let me_ts = unsafe { + ModifyEvent::new_impersonate_entry( + admin.clone(), + filt_i_ts.clone(), + ModifyList::new_list(vec![Modify::Present( + AttrString::from("class"), + Value::new_class("tombstone"), + )]), + ) + }; + + let de_ts = unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_ts.clone()) }; + let se_ts = unsafe { SearchEvent::new_ext_impersonate_entry(admin, filt_i_ts.clone()) }; + + // First, create an entry, then push it through the lifecycle. + let e_ts = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + + let ce = CreateEvent::new_internal(vec![e_ts]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); + + let de_sin = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq( + "name", + PartialValue::new_iname("testperson1") + )]))) + }; + assert!(server_txn.delete(&de_sin).is_ok()); + + // Commit + assert!(server_txn.commit().is_ok()); + + // Now, establish enough time for the recycled items to be purged. + let mut server_txn = server.write(time_p2).await; + assert!(server_txn.purge_recycled().is_ok()); + + // Now test the tombstone properties. + + // Can it be seen (external search) + let r1 = server_txn.search(&se_ts).expect("search failed"); + assert!(r1.is_empty()); + + // Can it be deleted (external delete) + // Should be err-no candidates. + assert!(server_txn.delete(&de_ts).is_err()); + + // Can it be modified? (external modify) + // Should be err-no candidates + assert!(server_txn.modify(&me_ts).is_err()); + + // Can it be seen (internal search) + // Internal search should see it. + let r2 = server_txn + .internal_search(filt_i_ts.clone()) + .expect("internal search failed"); + assert!(r2.len() == 1); + + // If we purge now, nothing happens, we aren't past the time window. + assert!(server_txn.purge_tombstones().is_ok()); + + let r3 = server_txn + .internal_search(filt_i_ts.clone()) + .expect("internal search failed"); + assert!(r3.len() == 1); + + // Commit + assert!(server_txn.commit().is_ok()); + + // New txn, push the cid forward. + let server_txn = server.write(time_p3).await; + + // Now purge + assert!(server_txn.purge_tombstones().is_ok()); + + // Assert it's gone + // Internal search should not see it. + let r4 = server_txn + .internal_search(filt_i_ts) + .expect("internal search failed"); + assert!(r4.is_empty()); + + assert!(server_txn.commit().is_ok()); + } + + #[qs_test] + async fn test_recycle_simple(server: &QueryServer) { + // First we setup some timestamps + let time_p1 = duration_from_epoch_now(); + let time_p2 = time_p1 + Duration::from_secs(RECYCLEBIN_MAX_AGE * 2); + + let mut server_txn = server.write(time_p1).await; + let admin = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("failed"); + + let filt_i_rc = filter_all!(f_eq("class", PartialValue::new_class("recycled"))); + + let filt_i_ts = filter_all!(f_eq("class", PartialValue::new_class("tombstone"))); + + let filt_i_per = filter_all!(f_eq("class", PartialValue::new_class("person"))); + + // Create fake external requests. Probably from admin later + let me_rc = unsafe { + ModifyEvent::new_impersonate_entry( + admin.clone(), + filt_i_rc.clone(), + ModifyList::new_list(vec![Modify::Present( + AttrString::from("class"), + Value::new_class("recycled"), + )]), + ) + }; + + let de_rc = unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_rc.clone()) }; + + let se_rc = + unsafe { SearchEvent::new_ext_impersonate_entry(admin.clone(), filt_i_rc.clone()) }; + + let sre_rc = + unsafe { SearchEvent::new_rec_impersonate_entry(admin.clone(), filt_i_rc.clone()) }; + + let rre_rc = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin, + filter_all!(f_eq("name", PartialValue::new_iname("testperson1"))), + ) + }; + + // Create some recycled objects + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + + let e2 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson2")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63932").expect("uuid") + ), + ("description", Value::new_utf8s("testperson2")), + ("displayname", Value::new_utf8s("testperson2")) + ); + + let ce = CreateEvent::new_internal(vec![e1, e2]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); + + // Now we immediately delete these to force them to the correct state. + let de_sin = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_or!([ + f_eq("name", PartialValue::new_iname("testperson1")), + f_eq("name", PartialValue::new_iname("testperson2")), + ]))) + }; + assert!(server_txn.delete(&de_sin).is_ok()); + + // Can it be seen (external search) + let r1 = server_txn.search(&se_rc).expect("search failed"); + assert!(r1.is_empty()); + + // Can it be deleted (external delete) + // Should be err-no candidates. + assert!(server_txn.delete(&de_rc).is_err()); + + // Can it be modified? (external modify) + // Should be err-no candidates + assert!(server_txn.modify(&me_rc).is_err()); + + // Can in be seen by special search? (external recycle search) + let r2 = server_txn.search(&sre_rc).expect("search failed"); + assert!(r2.len() == 2); + + // Can it be seen (internal search) + // Internal search should see it. + let r2 = server_txn + .internal_search(filt_i_rc.clone()) + .expect("internal search failed"); + assert!(r2.len() == 2); + + // There are now two paths forward + // revival or purge! + assert!(server_txn.revive_recycled(&rre_rc).is_ok()); + + // Not enough time has passed, won't have an effect for purge to TS + assert!(server_txn.purge_recycled().is_ok()); + let r3 = server_txn + .internal_search(filt_i_rc.clone()) + .expect("internal search failed"); + assert!(r3.len() == 1); + + // Commit + assert!(server_txn.commit().is_ok()); + + // Now, establish enough time for the recycled items to be purged. + let server_txn = server.write(time_p2).await; + + // purge to tombstone, now that time has passed. + assert!(server_txn.purge_recycled().is_ok()); + + // Should be no recycled objects. + let r4 = server_txn + .internal_search(filt_i_rc.clone()) + .expect("internal search failed"); + assert!(r4.is_empty()); + + // There should be one tombstone + let r5 = server_txn + .internal_search(filt_i_ts.clone()) + .expect("internal search failed"); + assert!(r5.len() == 1); + + // There should be one entry + let r6 = server_txn + .internal_search(filt_i_per.clone()) + .expect("internal search failed"); + assert!(r6.len() == 1); + + assert!(server_txn.commit().is_ok()); } // The delete test above should be unaffected by recycle anyway - #[test] - fn test_qs_recycle_advanced() { - run_test!(|server: &QueryServer| { - // Create items - let server_txn = server.write(duration_from_epoch_now()); - let admin = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("failed"); + #[qs_test] + async fn test_qs_recycle_advanced(server: &QueryServer) { + // Create items + let mut server_txn = server.write(duration_from_epoch_now()).await; + let admin = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("failed"); - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - let ce = CreateEvent::new_internal(vec![e1]); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); - // Delete and ensure they became recycled. - let de_sin = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "name", - PartialValue::new_iname("testperson1") - ))) - }; - assert!(server_txn.delete(&de_sin).is_ok()); - // Can in be seen by special search? (external recycle search) - let filt_rc = filter_all!(f_eq("class", PartialValue::new_class("recycled"))); - let sre_rc = unsafe { SearchEvent::new_rec_impersonate_entry(admin, filt_rc.clone()) }; - let r2 = server_txn.search(&sre_rc).expect("search failed"); - assert!(r2.len() == 1); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); + // Delete and ensure they became recycled. + let de_sin = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "name", + PartialValue::new_iname("testperson1") + ))) + }; + assert!(server_txn.delete(&de_sin).is_ok()); + // Can in be seen by special search? (external recycle search) + let filt_rc = filter_all!(f_eq("class", PartialValue::new_class("recycled"))); + let sre_rc = unsafe { SearchEvent::new_rec_impersonate_entry(admin, filt_rc.clone()) }; + let r2 = server_txn.search(&sre_rc).expect("search failed"); + assert!(r2.len() == 1); - // Create dup uuid (rej) - // After a delete -> recycle, create duplicate name etc. - let cr = server_txn.create(&ce); - assert!(cr.is_err()); + // Create dup uuid (rej) + // After a delete -> recycle, create duplicate name etc. + let cr = server_txn.create(&ce); + assert!(cr.is_err()); - assert!(server_txn.commit().is_ok()); - }) + assert!(server_txn.commit().is_ok()); } - #[test] - fn test_qs_name_to_uuid() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); + #[qs_test] + async fn test_name_to_uuid(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + let ce = CreateEvent::new_internal(vec![e1]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // Name doesn't exist - let r1 = server_txn.name_to_uuid("testpers"); - assert!(r1.is_err()); - // Name doesn't exist (not syntax normalised) - let r2 = server_txn.name_to_uuid("tEsTpErS"); - assert!(r2.is_err()); - // Name does exist - let r3 = server_txn.name_to_uuid("testperson1"); - assert!(r3.is_ok()); - // Name is not syntax normalised (but exists) - let r4 = server_txn.name_to_uuid("tEsTpErSoN1"); - assert!(r4.is_ok()); - }) + // Name doesn't exist + let r1 = server_txn.name_to_uuid("testpers"); + assert!(r1.is_err()); + // Name doesn't exist (not syntax normalised) + let r2 = server_txn.name_to_uuid("tEsTpErS"); + assert!(r2.is_err()); + // Name does exist + let r3 = server_txn.name_to_uuid("testperson1"); + assert!(r3.is_ok()); + // Name is not syntax normalised (but exists) + let r4 = server_txn.name_to_uuid("tEsTpErSoN1"); + assert!(r4.is_ok()); } - #[test] - fn test_qs_uuid_to_spn() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); + #[qs_test] + async fn test_uuid_to_spn(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("class", Value::new_class("account")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("class", Value::new_class("account")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + let ce = CreateEvent::new_internal(vec![e1]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // Name doesn't exist - let r1 = server_txn - .uuid_to_spn(Uuid::parse_str("bae3f507-e6c3-44ba-ad01-f8ff1083534a").unwrap()); - // There is nothing. - assert!(r1 == Ok(None)); - // Name does exist - let r3 = server_txn - .uuid_to_spn(Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()); - println!("{:?}", r3); - assert!(r3.unwrap().unwrap() == Value::new_spn_str("testperson1", "example.com")); - // Name is not syntax normalised (but exists) - let r4 = server_txn - .uuid_to_spn(Uuid::parse_str("CC8E95B4-C24F-4D68-BA54-8BED76F63930").unwrap()); - assert!(r4.unwrap().unwrap() == Value::new_spn_str("testperson1", "example.com")); - }) + // Name doesn't exist + let r1 = server_txn + .uuid_to_spn(Uuid::parse_str("bae3f507-e6c3-44ba-ad01-f8ff1083534a").unwrap()); + // There is nothing. + assert!(r1 == Ok(None)); + // Name does exist + let r3 = server_txn + .uuid_to_spn(Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()); + println!("{:?}", r3); + assert!(r3.unwrap().unwrap() == Value::new_spn_str("testperson1", "example.com")); + // Name is not syntax normalised (but exists) + let r4 = server_txn + .uuid_to_spn(Uuid::parse_str("CC8E95B4-C24F-4D68-BA54-8BED76F63930").unwrap()); + assert!(r4.unwrap().unwrap() == Value::new_spn_str("testperson1", "example.com")); } - #[test] - fn test_qs_uuid_to_rdn() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); + #[qs_test] + async fn test_uuid_to_rdn(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("class", Value::new_class("account")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson")), - ("displayname", Value::new_utf8s("testperson1")) - ); - let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("class", Value::new_class("account")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson")), + ("displayname", Value::new_utf8s("testperson1")) + ); + let ce = CreateEvent::new_internal(vec![e1]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // Name doesn't exist - let r1 = server_txn - .uuid_to_rdn(Uuid::parse_str("bae3f507-e6c3-44ba-ad01-f8ff1083534a").unwrap()); - // There is nothing. - assert!(r1.unwrap() == "uuid=bae3f507-e6c3-44ba-ad01-f8ff1083534a"); - // Name does exist - let r3 = server_txn - .uuid_to_rdn(Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()); - println!("{:?}", r3); - assert!(r3.unwrap() == "spn=testperson1@example.com"); - // Uuid is not syntax normalised (but exists) - let r4 = server_txn - .uuid_to_rdn(Uuid::parse_str("CC8E95B4-C24F-4D68-BA54-8BED76F63930").unwrap()); - assert!(r4.unwrap() == "spn=testperson1@example.com"); - }) + // Name doesn't exist + let r1 = server_txn + .uuid_to_rdn(Uuid::parse_str("bae3f507-e6c3-44ba-ad01-f8ff1083534a").unwrap()); + // There is nothing. + assert!(r1.unwrap() == "uuid=bae3f507-e6c3-44ba-ad01-f8ff1083534a"); + // Name does exist + let r3 = server_txn + .uuid_to_rdn(Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()); + println!("{:?}", r3); + assert!(r3.unwrap() == "spn=testperson1@example.com"); + // Uuid is not syntax normalised (but exists) + let r4 = server_txn + .uuid_to_rdn(Uuid::parse_str("CC8E95B4-C24F-4D68-BA54-8BED76F63930").unwrap()); + assert!(r4.unwrap() == "spn=testperson1@example.com"); } - #[test] - fn test_qs_uuid_to_star_recycle() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); + #[qs_test] + async fn test_uuid_to_star_recycle(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("class", Value::new_class("account")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("class", Value::new_class("account")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); - let tuuid = Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(); + let tuuid = Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(); - let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let ce = CreateEvent::new_internal(vec![e1]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - assert!(server_txn.uuid_to_rdn(tuuid) == Ok("spn=testperson1@example.com".to_string())); + assert!(server_txn.uuid_to_rdn(tuuid) == Ok("spn=testperson1@example.com".to_string())); - assert!( - server_txn.uuid_to_spn(tuuid) - == Ok(Some(Value::new_spn_str("testperson1", "example.com"))) - ); + assert!( + server_txn.uuid_to_spn(tuuid) + == Ok(Some(Value::new_spn_str("testperson1", "example.com"))) + ); - assert!(server_txn.name_to_uuid("testperson1") == Ok(tuuid)); + assert!(server_txn.name_to_uuid("testperson1") == Ok(tuuid)); - // delete - let de_sin = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "name", - PartialValue::new_iname("testperson1") - ))) - }; - assert!(server_txn.delete(&de_sin).is_ok()); + // delete + let de_sin = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "name", + PartialValue::new_iname("testperson1") + ))) + }; + assert!(server_txn.delete(&de_sin).is_ok()); - // all should fail - assert!( - server_txn.uuid_to_rdn(tuuid) - == Ok("uuid=cc8e95b4-c24f-4d68-ba54-8bed76f63930".to_string()) - ); + // all should fail + assert!( + server_txn.uuid_to_rdn(tuuid) + == Ok("uuid=cc8e95b4-c24f-4d68-ba54-8bed76f63930".to_string()) + ); - assert!(server_txn.uuid_to_spn(tuuid) == Ok(None)); + assert!(server_txn.uuid_to_spn(tuuid) == Ok(None)); - assert!(server_txn.name_to_uuid("testperson1").is_err()); + assert!(server_txn.name_to_uuid("testperson1").is_err()); - // revive - let admin = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("failed"); - let rre_rc = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin, - filter_all!(f_eq("name", PartialValue::new_iname("testperson1"))), - ) - }; - assert!(server_txn.revive_recycled(&rre_rc).is_ok()); + // revive + let admin = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("failed"); + let rre_rc = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin, + filter_all!(f_eq("name", PartialValue::new_iname("testperson1"))), + ) + }; + assert!(server_txn.revive_recycled(&rre_rc).is_ok()); - // all checks pass + // all checks pass - assert!(server_txn.uuid_to_rdn(tuuid) == Ok("spn=testperson1@example.com".to_string())); + assert!(server_txn.uuid_to_rdn(tuuid) == Ok("spn=testperson1@example.com".to_string())); - assert!( - server_txn.uuid_to_spn(tuuid) - == Ok(Some(Value::new_spn_str("testperson1", "example.com"))) - ); + assert!( + server_txn.uuid_to_spn(tuuid) + == Ok(Some(Value::new_spn_str("testperson1", "example.com"))) + ); - assert!(server_txn.name_to_uuid("testperson1") == Ok(tuuid)); - }) + assert!(server_txn.name_to_uuid("testperson1") == Ok(tuuid)); } - #[test] - fn test_qs_clone_value() { - run_test!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + #[qs_test] + async fn test_clone_value(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + let ce = CreateEvent::new_internal(vec![e1]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // test attr not exist - let r1 = server_txn.clone_value(&"tausau".to_string(), &"naoeutnhaou".to_string()); + // test attr not exist + let r1 = server_txn.clone_value(&"tausau".to_string(), &"naoeutnhaou".to_string()); - assert!(r1.is_err()); + assert!(r1.is_err()); - // test attr not-normalised (error) - // test attr not-reference - let r2 = server_txn.clone_value(&"NaMe".to_string(), &"NaMe".to_string()); + // test attr not-normalised (error) + // test attr not-reference + let r2 = server_txn.clone_value(&"NaMe".to_string(), &"NaMe".to_string()); - assert!(r2.is_err()); + assert!(r2.is_err()); - // test attr reference - let r3 = server_txn.clone_value(&"member".to_string(), &"testperson1".to_string()); + // test attr reference + let r3 = server_txn.clone_value(&"member".to_string(), &"testperson1".to_string()); - assert!(r3 == Ok(Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap())); + assert!(r3 == Ok(Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap())); - // test attr reference already resolved. - let r4 = server_txn.clone_value( - &"member".to_string(), - &"cc8e95b4-c24f-4d68-ba54-8bed76f63930".to_string(), - ); + // test attr reference already resolved. + let r4 = server_txn.clone_value( + &"member".to_string(), + &"cc8e95b4-c24f-4d68-ba54-8bed76f63930".to_string(), + ); - debug!("{:?}", r4); - assert!(r4 == Ok(Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap())); - }) + debug!("{:?}", r4); + assert!(r4 == Ok(Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap())); } - #[test] - fn test_qs_dynamic_schema_class() { - run_test!(|server: &QueryServer| { - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("testclass")), - ("name", Value::new_iname("testobj1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ) - ); + #[qs_test] + async fn test_dynamic_schema_class(server: &QueryServer) { + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("testclass")), + ("name", Value::new_iname("testobj1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ) + ); - // Class definition - let e_cd = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("classtype")), - ("classname", Value::new_iutf8("testclass")), - ( - "uuid", - Value::new_uuids("cfcae205-31c3-484b-8ced-667d1709c5e3").expect("uuid") - ), - ("description", Value::new_utf8s("Test Class")), - ("may", Value::new_iutf8("name")) - ); - let server_txn = server.write(duration_from_epoch_now()); - // Add a new class. - let ce_class = CreateEvent::new_internal(vec![e_cd.clone()]); - assert!(server_txn.create(&ce_class).is_ok()); - // Trying to add it now should fail. - let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); - assert!(server_txn.create(&ce_fail).is_err()); + // Class definition + let e_cd = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("classtype")), + ("classname", Value::new_iutf8("testclass")), + ( + "uuid", + Value::new_uuids("cfcae205-31c3-484b-8ced-667d1709c5e3").expect("uuid") + ), + ("description", Value::new_utf8s("Test Class")), + ("may", Value::new_iutf8("name")) + ); + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Add a new class. + let ce_class = CreateEvent::new_internal(vec![e_cd.clone()]); + assert!(server_txn.create(&ce_class).is_ok()); + // Trying to add it now should fail. + let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); + assert!(server_txn.create(&ce_fail).is_err()); - // Commit - server_txn.commit().expect("should not fail"); + // Commit + server_txn.commit().expect("should not fail"); - // Start a new write - let server_txn = server.write(duration_from_epoch_now()); - // Add the class to an object - // should work - let ce_work = CreateEvent::new_internal(vec![e1.clone()]); - assert!(server_txn.create(&ce_work).is_ok()); + // Start a new write + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Add the class to an object + // should work + let ce_work = CreateEvent::new_internal(vec![e1.clone()]); + assert!(server_txn.create(&ce_work).is_ok()); - // Commit - server_txn.commit().expect("should not fail"); + // Commit + server_txn.commit().expect("should not fail"); - // Start a new write - let server_txn = server.write(duration_from_epoch_now()); - // delete the class - let de_class = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "classname", - PartialValue::new_class("testclass") - ))) - }; - assert!(server_txn.delete(&de_class).is_ok()); - // Commit - server_txn.commit().expect("should not fail"); + // Start a new write + let mut server_txn = server.write(duration_from_epoch_now()).await; + // delete the class + let de_class = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "classname", + PartialValue::new_class("testclass") + ))) + }; + assert!(server_txn.delete(&de_class).is_ok()); + // Commit + server_txn.commit().expect("should not fail"); - // Start a new write - let server_txn = server.write(duration_from_epoch_now()); - // Trying to add now should fail - let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); - assert!(server_txn.create(&ce_fail).is_err()); - // Search our entry - let testobj1 = server_txn - .internal_search_uuid( - &Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(), - ) - .expect("failed"); - assert!(testobj1.attribute_equality("class", &PartialValue::new_class("testclass"))); + // Start a new write + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Trying to add now should fail + let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); + assert!(server_txn.create(&ce_fail).is_err()); + // Search our entry + let testobj1 = server_txn + .internal_search_uuid(&Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()) + .expect("failed"); + assert!(testobj1.attribute_equality("class", &PartialValue::new_class("testclass"))); - // Should still be good - server_txn.commit().expect("should not fail"); - // Commit. - }) + // Should still be good + server_txn.commit().expect("should not fail"); + // Commit. } - #[test] - fn test_qs_dynamic_schema_attr() { - run_test!(|server: &QueryServer| { - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("extensibleobject")), - ("name", Value::new_iname("testobj1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("testattr", Value::new_utf8s("test")) - ); + #[qs_test] + async fn test_dynamic_schema_attr(server: &QueryServer) { + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("extensibleobject")), + ("name", Value::new_iname("testobj1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("testattr", Value::new_utf8s("test")) + ); - // Attribute definition - let e_ad = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("attributetype")), - ( - "uuid", - Value::new_uuids("cfcae205-31c3-484b-8ced-667d1709c5e3").expect("uuid") - ), - ("attributename", Value::new_iutf8("testattr")), - ("description", Value::new_utf8s("Test Attribute")), - ("multivalue", Value::new_bool(false)), - ("unique", Value::new_bool(false)), - ("syntax", Value::new_syntaxs("UTF8STRING").expect("syntax")) - ); + // Attribute definition + let e_ad = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("attributetype")), + ( + "uuid", + Value::new_uuids("cfcae205-31c3-484b-8ced-667d1709c5e3").expect("uuid") + ), + ("attributename", Value::new_iutf8("testattr")), + ("description", Value::new_utf8s("Test Attribute")), + ("multivalue", Value::new_bool(false)), + ("unique", Value::new_bool(false)), + ("syntax", Value::new_syntaxs("UTF8STRING").expect("syntax")) + ); - let server_txn = server.write(duration_from_epoch_now()); - // Add a new attribute. - let ce_attr = CreateEvent::new_internal(vec![e_ad.clone()]); - assert!(server_txn.create(&ce_attr).is_ok()); - // Trying to add it now should fail. (use extensible object) - let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); - assert!(server_txn.create(&ce_fail).is_err()); + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Add a new attribute. + let ce_attr = CreateEvent::new_internal(vec![e_ad.clone()]); + assert!(server_txn.create(&ce_attr).is_ok()); + // Trying to add it now should fail. (use extensible object) + let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); + assert!(server_txn.create(&ce_fail).is_err()); - // Commit - server_txn.commit().expect("should not fail"); + // Commit + server_txn.commit().expect("should not fail"); - // Start a new write - let server_txn = server.write(duration_from_epoch_now()); - // Add the attr to an object - // should work - let ce_work = CreateEvent::new_internal(vec![e1.clone()]); - assert!(server_txn.create(&ce_work).is_ok()); + // Start a new write + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Add the attr to an object + // should work + let ce_work = CreateEvent::new_internal(vec![e1.clone()]); + assert!(server_txn.create(&ce_work).is_ok()); - // Commit - server_txn.commit().expect("should not fail"); + // Commit + server_txn.commit().expect("should not fail"); - // Start a new write - let server_txn = server.write(duration_from_epoch_now()); - // delete the attr - let de_attr = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_eq( - "attributename", - PartialValue::new_iutf8("testattr") - ))) - }; - assert!(server_txn.delete(&de_attr).is_ok()); - // Commit - server_txn.commit().expect("should not fail"); + // Start a new write + let mut server_txn = server.write(duration_from_epoch_now()).await; + // delete the attr + let de_attr = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_eq( + "attributename", + PartialValue::new_iutf8("testattr") + ))) + }; + assert!(server_txn.delete(&de_attr).is_ok()); + // Commit + server_txn.commit().expect("should not fail"); - // Start a new write - let server_txn = server.write(duration_from_epoch_now()); - // Trying to add now should fail - let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); - assert!(server_txn.create(&ce_fail).is_err()); - // Search our attribute - should FAIL - let filt = filter!(f_eq("testattr", PartialValue::new_utf8s("test"))); - assert!(server_txn.internal_search(filt).is_err()); - // Search the entry - the attribute will still be present - // even if we can't search on it. - let testobj1 = server_txn - .internal_search_uuid( - &Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(), - ) - .expect("failed"); - assert!(testobj1.attribute_equality("testattr", &PartialValue::new_utf8s("test"))); + // Start a new write + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Trying to add now should fail + let ce_fail = CreateEvent::new_internal(vec![e1.clone()]); + assert!(server_txn.create(&ce_fail).is_err()); + // Search our attribute - should FAIL + let filt = filter!(f_eq("testattr", PartialValue::new_utf8s("test"))); + assert!(server_txn.internal_search(filt).is_err()); + // Search the entry - the attribute will still be present + // even if we can't search on it. + let testobj1 = server_txn + .internal_search_uuid(&Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()) + .expect("failed"); + assert!(testobj1.attribute_equality("testattr", &PartialValue::new_utf8s("test"))); - server_txn.commit().expect("should not fail"); - // Commit. - }) + server_txn.commit().expect("should not fail"); + // Commit. } - #[test] - fn test_qs_modify_password_only() { - run_test!(|server: &QueryServer| { - let e1 = entry_init!( - ("class", Value::new_class("object")), - ("class", Value::new_class("person")), - ("class", Value::new_class("account")), - ("name", Value::new_iname("testperson1")), - ( - "uuid", - Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") - ), - ("description", Value::new_utf8s("testperson1")), - ("displayname", Value::new_utf8s("testperson1")) - ); - let server_txn = server.write(duration_from_epoch_now()); - // Add the entry. Today we have no syntax to take simple str to a credential - // but honestly, that's probably okay :) - let ce = CreateEvent::new_internal(vec![e1]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + #[qs_test] + async fn test_modify_password_only(server: &QueryServer) { + let e1 = entry_init!( + ("class", Value::new_class("object")), + ("class", Value::new_class("person")), + ("class", Value::new_class("account")), + ("name", Value::new_iname("testperson1")), + ( + "uuid", + Value::new_uuids("cc8e95b4-c24f-4d68-ba54-8bed76f63930").expect("uuid") + ), + ("description", Value::new_utf8s("testperson1")), + ("displayname", Value::new_utf8s("testperson1")) + ); + let mut server_txn = server.write(duration_from_epoch_now()).await; + // Add the entry. Today we have no syntax to take simple str to a credential + // but honestly, that's probably okay :) + let ce = CreateEvent::new_internal(vec![e1]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // Build the credential. - let p = CryptoPolicy::minimum(); - let cred = Credential::new_password_only(&p, "test_password").unwrap(); - let v_cred = Value::new_credential("primary", cred); - assert!(v_cred.validate()); + // Build the credential. + let p = CryptoPolicy::minimum(); + let cred = Credential::new_password_only(&p, "test_password").unwrap(); + let v_cred = Value::new_credential("primary", cred); + assert!(v_cred.validate()); - // now modify and provide a primary credential. - let me_inv_m = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("name", PartialValue::new_iname("testperson1"))), - ModifyList::new_list(vec![Modify::Present( - AttrString::from("primary_credential"), - v_cred, - )]), - ) - }; - // go! - assert!(server_txn.modify(&me_inv_m).is_ok()); + // now modify and provide a primary credential. + let me_inv_m = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("name", PartialValue::new_iname("testperson1"))), + ModifyList::new_list(vec![Modify::Present( + AttrString::from("primary_credential"), + v_cred, + )]), + ) + }; + // go! + assert!(server_txn.modify(&me_inv_m).is_ok()); - // assert it exists and the password checks out - let test_ent = server_txn - .internal_search_uuid( - &Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap(), - ) - .expect("failed"); - // get the primary ava - let cred_ref = test_ent - .get_ava_single_credential("primary_credential") - .expect("Failed"); - // do a pw check. - assert!(cred_ref.verify_password("test_password").unwrap()); - }) + // assert it exists and the password checks out + let test_ent = server_txn + .internal_search_uuid(&Uuid::parse_str("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()) + .expect("failed"); + // get the primary ava + let cred_ref = test_ent + .get_ava_single_credential("primary_credential") + .expect("Failed"); + // do a pw check. + assert!(cred_ref.verify_password("test_password").unwrap()); } fn create_user(name: &str, uuid: &str) -> Entry { @@ -4361,286 +4314,258 @@ mod tests { e.attribute_equality("memberof", &PartialValue::new_refer_s(mo).unwrap()) } - #[test] - fn test_qs_revive_advanced_directmemberships() { - run_test!(|server: &QueryServer| { - // Create items - let server_txn = server.write(duration_from_epoch_now()); - let admin = server_txn - .internal_search_uuid(&UUID_ADMIN) - .expect("failed"); + #[qs_test] + async fn test_revive_advanced_directmemberships(server: &QueryServer) { + // Create items + let mut server_txn = server.write(duration_from_epoch_now()).await; + let admin = server_txn + .internal_search_uuid(&UUID_ADMIN) + .expect("failed"); - // Right need a user in a direct group. - let u1 = create_user("u1", "22b47373-d123-421f-859e-9ddd8ab14a2a"); - let g1 = create_group( - "g1", - "cca2bbfc-5b43-43f3-be9e-f5b03b3defec", - &["22b47373-d123-421f-859e-9ddd8ab14a2a"], - ); + // Right need a user in a direct group. + let u1 = create_user("u1", "22b47373-d123-421f-859e-9ddd8ab14a2a"); + let g1 = create_group( + "g1", + "cca2bbfc-5b43-43f3-be9e-f5b03b3defec", + &["22b47373-d123-421f-859e-9ddd8ab14a2a"], + ); - // Need a user in A -> B -> User, such that A/B are re-adde as MO - let u2 = create_user("u2", "5c19a4a2-b9f0-4429-b130-5782de5fddda"); - let g2a = create_group( - "g2a", - "e44cf9cd-9941-44cb-a02f-307b6e15ac54", - &["5c19a4a2-b9f0-4429-b130-5782de5fddda"], - ); - let g2b = create_group( - "g2b", - "d3132e6e-18ce-4b87-bee1-1d25e4bfe96d", - &["e44cf9cd-9941-44cb-a02f-307b6e15ac54"], - ); + // Need a user in A -> B -> User, such that A/B are re-adde as MO + let u2 = create_user("u2", "5c19a4a2-b9f0-4429-b130-5782de5fddda"); + let g2a = create_group( + "g2a", + "e44cf9cd-9941-44cb-a02f-307b6e15ac54", + &["5c19a4a2-b9f0-4429-b130-5782de5fddda"], + ); + let g2b = create_group( + "g2b", + "d3132e6e-18ce-4b87-bee1-1d25e4bfe96d", + &["e44cf9cd-9941-44cb-a02f-307b6e15ac54"], + ); - // Need a user in a group that is recycled after, then revived at the same time. - let u3 = create_user("u3", "68467a41-6e8e-44d0-9214-a5164e75ca03"); - let g3 = create_group( - "g3", - "36048117-e479-45ed-aeb5-611e8d83d5b1", - &["68467a41-6e8e-44d0-9214-a5164e75ca03"], - ); + // Need a user in a group that is recycled after, then revived at the same time. + let u3 = create_user("u3", "68467a41-6e8e-44d0-9214-a5164e75ca03"); + let g3 = create_group( + "g3", + "36048117-e479-45ed-aeb5-611e8d83d5b1", + &["68467a41-6e8e-44d0-9214-a5164e75ca03"], + ); - // A user in a group that is recycled, user is revived, THEN the group is. Group - // should be present in MO after the second revive. - let u4 = create_user("u4", "d696b10f-1729-4f1a-83d0-ca06525c2f59"); - let g4 = create_group( - "g4", - "d5c59ac6-c533-4b00-989f-d0e183f07bab", - &["d696b10f-1729-4f1a-83d0-ca06525c2f59"], - ); + // A user in a group that is recycled, user is revived, THEN the group is. Group + // should be present in MO after the second revive. + let u4 = create_user("u4", "d696b10f-1729-4f1a-83d0-ca06525c2f59"); + let g4 = create_group( + "g4", + "d5c59ac6-c533-4b00-989f-d0e183f07bab", + &["d696b10f-1729-4f1a-83d0-ca06525c2f59"], + ); - let ce = CreateEvent::new_internal(vec![u1, g1, u2, g2a, g2b, u3, g3, u4, g4]); - let cr = server_txn.create(&ce); - assert!(cr.is_ok()); + let ce = CreateEvent::new_internal(vec![u1, g1, u2, g2a, g2b, u3, g3, u4, g4]); + let cr = server_txn.create(&ce); + assert!(cr.is_ok()); - // Now recycle the needed entries. - let de = unsafe { - DeleteEvent::new_internal_invalid(filter!(f_or(vec![ - f_eq("name", PartialValue::new_iname("u1")), - f_eq("name", PartialValue::new_iname("u2")), + // Now recycle the needed entries. + let de = unsafe { + DeleteEvent::new_internal_invalid(filter!(f_or(vec![ + f_eq("name", PartialValue::new_iname("u1")), + f_eq("name", PartialValue::new_iname("u2")), + f_eq("name", PartialValue::new_iname("u3")), + f_eq("name", PartialValue::new_iname("g3")), + f_eq("name", PartialValue::new_iname("u4")), + f_eq("name", PartialValue::new_iname("g4")) + ]))) + }; + assert!(server_txn.delete(&de).is_ok()); + + // Now revive and check each one, one at a time. + let rev1 = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin.clone(), + filter_all!(f_eq("name", PartialValue::new_iname("u1"))), + ) + }; + assert!(server_txn.revive_recycled(&rev1).is_ok()); + // check u1 contains MO -> + assert!(check_entry_has_mo( + &server_txn, + "u1", + "cca2bbfc-5b43-43f3-be9e-f5b03b3defec" + )); + + // Revive u2 and check it has two mo. + let rev2 = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin.clone(), + filter_all!(f_eq("name", PartialValue::new_iname("u2"))), + ) + }; + assert!(server_txn.revive_recycled(&rev2).is_ok()); + assert!(check_entry_has_mo( + &server_txn, + "u2", + "e44cf9cd-9941-44cb-a02f-307b6e15ac54" + )); + assert!(check_entry_has_mo( + &server_txn, + "u2", + "d3132e6e-18ce-4b87-bee1-1d25e4bfe96d" + )); + + // Revive u3 and g3 at the same time. + let rev3 = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin.clone(), + filter_all!(f_or(vec![ f_eq("name", PartialValue::new_iname("u3")), - f_eq("name", PartialValue::new_iname("g3")), - f_eq("name", PartialValue::new_iname("u4")), - f_eq("name", PartialValue::new_iname("g4")) - ]))) - }; - assert!(server_txn.delete(&de).is_ok()); + f_eq("name", PartialValue::new_iname("g3")) + ])), + ) + }; + assert!(server_txn.revive_recycled(&rev3).is_ok()); + assert!( + check_entry_has_mo(&server_txn, "u3", "36048117-e479-45ed-aeb5-611e8d83d5b1") == false + ); - // Now revive and check each one, one at a time. - let rev1 = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin.clone(), - filter_all!(f_eq("name", PartialValue::new_iname("u1"))), - ) - }; - assert!(server_txn.revive_recycled(&rev1).is_ok()); - // check u1 contains MO -> - assert!(check_entry_has_mo( - &server_txn, - "u1", - "cca2bbfc-5b43-43f3-be9e-f5b03b3defec" - )); + // Revive u4, should NOT have the MO. + let rev4a = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin.clone(), + filter_all!(f_eq("name", PartialValue::new_iname("u4"))), + ) + }; + assert!(server_txn.revive_recycled(&rev4a).is_ok()); + assert!( + check_entry_has_mo(&server_txn, "u4", "d5c59ac6-c533-4b00-989f-d0e183f07bab") == false + ); - // Revive u2 and check it has two mo. - let rev2 = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin.clone(), - filter_all!(f_eq("name", PartialValue::new_iname("u2"))), - ) - }; - assert!(server_txn.revive_recycled(&rev2).is_ok()); - assert!(check_entry_has_mo( - &server_txn, - "u2", - "e44cf9cd-9941-44cb-a02f-307b6e15ac54" - )); - assert!(check_entry_has_mo( - &server_txn, - "u2", - "d3132e6e-18ce-4b87-bee1-1d25e4bfe96d" - )); + // Now revive g4, should allow MO onto u4. + let rev4b = unsafe { + ReviveRecycledEvent::new_impersonate_entry( + admin, + filter_all!(f_eq("name", PartialValue::new_iname("g4"))), + ) + }; + assert!(server_txn.revive_recycled(&rev4b).is_ok()); + assert!( + check_entry_has_mo(&server_txn, "u4", "d5c59ac6-c533-4b00-989f-d0e183f07bab") == false + ); - // Revive u3 and g3 at the same time. - let rev3 = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin.clone(), - filter_all!(f_or(vec![ - f_eq("name", PartialValue::new_iname("u3")), - f_eq("name", PartialValue::new_iname("g3")) - ])), - ) - }; - assert!(server_txn.revive_recycled(&rev3).is_ok()); - assert!( - check_entry_has_mo(&server_txn, "u3", "36048117-e479-45ed-aeb5-611e8d83d5b1") - == false - ); - - // Revive u4, should NOT have the MO. - let rev4a = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin.clone(), - filter_all!(f_eq("name", PartialValue::new_iname("u4"))), - ) - }; - assert!(server_txn.revive_recycled(&rev4a).is_ok()); - assert!( - check_entry_has_mo(&server_txn, "u4", "d5c59ac6-c533-4b00-989f-d0e183f07bab") - == false - ); - - // Now revive g4, should allow MO onto u4. - let rev4b = unsafe { - ReviveRecycledEvent::new_impersonate_entry( - admin, - filter_all!(f_eq("name", PartialValue::new_iname("g4"))), - ) - }; - assert!(server_txn.revive_recycled(&rev4b).is_ok()); - assert!( - check_entry_has_mo(&server_txn, "u4", "d5c59ac6-c533-4b00-989f-d0e183f07bab") - == false - ); - - assert!(server_txn.commit().is_ok()); - }) + assert!(server_txn.commit().is_ok()); } - /* - #[test] - fn test_qs_schema_dump_attrs() { - run_test!(|server: &QueryServer| { - use crate::schema::SchemaTransaction; - let server_txn = server.write(); - let schema = server_txn.get_schema(); + #[qs_test_no_init] + async fn test_qs_upgrade_entry_attrs(server: &QueryServer) { + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION).is_ok()); + assert!(server_txn.commit().is_ok()); - for k in schema.get_attributes().keys() { - debug!("{}", k); - } - debug!("===="); - for k in schema.get_classes().keys() { - debug!("{}", k); - } + let mut server_txn = server.write(duration_from_epoch_now()).await; + server_txn.initialise_schema_core().unwrap(); + server_txn.initialise_schema_idm().unwrap(); + assert!(server_txn.commit().is_ok()); - }) - } - */ + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION + 1).is_ok()); + assert!(server_txn.commit().is_ok()); - #[test] - fn test_qs_upgrade_entry_attrs() { - run_test_no_init!(|server: &QueryServer| { - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION).is_ok()); - assert!(server_txn.commit().is_ok()); + let mut server_txn = server.write(duration_from_epoch_now()).await; + assert!(server_txn + .internal_migrate_or_create_str(JSON_SYSTEM_INFO_V1) + .is_ok()); + assert!(server_txn + .internal_migrate_or_create_str(JSON_DOMAIN_INFO_V1) + .is_ok()); + assert!(server_txn + .internal_migrate_or_create_str(JSON_SYSTEM_CONFIG_V1) + .is_ok()); + assert!(server_txn.commit().is_ok()); - let server_txn = server.write(duration_from_epoch_now()); - server_txn.initialise_schema_core().unwrap(); - server_txn.initialise_schema_idm().unwrap(); - assert!(server_txn.commit().is_ok()); + let mut server_txn = server.write(duration_from_epoch_now()).await; + // ++ Mod the schema to set name to the old string type + let me_syn = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_or!([ + f_eq("attributename", PartialValue::new_iutf8("name")), + f_eq("attributename", PartialValue::new_iutf8("domain_name")), + ])), + ModifyList::new_purge_and_set( + "syntax", + Value::new_syntaxs("UTF8STRING_INSENSITIVE").unwrap(), + ), + ) + }; + assert!(server_txn.modify(&me_syn).is_ok()); + assert!(server_txn.commit().is_ok()); - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn.upgrade_reindex(SYSTEM_INDEX_VERSION + 1).is_ok()); - assert!(server_txn.commit().is_ok()); - - let server_txn = server.write(duration_from_epoch_now()); - assert!(server_txn - .internal_migrate_or_create_str(JSON_SYSTEM_INFO_V1) - .is_ok()); - assert!(server_txn - .internal_migrate_or_create_str(JSON_DOMAIN_INFO_V1) - .is_ok()); - assert!(server_txn - .internal_migrate_or_create_str(JSON_SYSTEM_CONFIG_V1) - .is_ok()); - assert!(server_txn.commit().is_ok()); - - let server_txn = server.write(duration_from_epoch_now()); - // ++ Mod the schema to set name to the old string type - let me_syn = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_or!([ - f_eq("attributename", PartialValue::new_iutf8("name")), - f_eq("attributename", PartialValue::new_iutf8("domain_name")), - ])), - ModifyList::new_purge_and_set( - "syntax", - Value::new_syntaxs("UTF8STRING_INSENSITIVE").unwrap(), + let mut server_txn = server.write(duration_from_epoch_now()).await; + // ++ Mod domain name and name to be the old type. + let me_dn = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_eq("uuid", PartialValue::new_uuid(UUID_DOMAIN_INFO))), + ModifyList::new_list(vec![ + Modify::Purged(AttrString::from("name")), + Modify::Purged(AttrString::from("domain_name")), + Modify::Present(AttrString::from("name"), Value::new_iutf8("domain_local")), + Modify::Present( + AttrString::from("domain_name"), + Value::new_iutf8("example.com"), ), - ) - }; - assert!(server_txn.modify(&me_syn).is_ok()); - assert!(server_txn.commit().is_ok()); + ]), + ) + }; + assert!(server_txn.modify(&me_dn).is_ok()); - let mut server_txn = server.write(duration_from_epoch_now()); - // ++ Mod domain name and name to be the old type. - let me_dn = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_eq("uuid", PartialValue::new_uuid(UUID_DOMAIN_INFO))), - ModifyList::new_list(vec![ - Modify::Purged(AttrString::from("name")), - Modify::Purged(AttrString::from("domain_name")), - Modify::Present(AttrString::from("name"), Value::new_iutf8("domain_local")), - Modify::Present( - AttrString::from("domain_name"), - Value::new_iutf8("example.com"), - ), - ]), - ) - }; - assert!(server_txn.modify(&me_dn).is_ok()); + // Now, both the types are invalid. - // Now, both the types are invalid. + // WARNING! We can't commit here because this triggers domain_reload which will fail + // due to incorrect syntax of the domain name! Run the migration in the same txn! + // Trigger a schema reload. + assert!(server_txn.reload_schema().is_ok()); - // WARNING! We can't commit here because this triggers domain_reload which will fail - // due to incorrect syntax of the domain name! Run the migration in the same txn! - // Trigger a schema reload. - assert!(server_txn.reload_schema().is_ok()); + // We can't just re-run the migrate here because name takes it's definition from + // in memory, and we can't re-run the initial memory gen. So we just fix it to match + // what the migrate "would do". + let me_syn = unsafe { + ModifyEvent::new_internal_invalid( + filter!(f_or!([ + f_eq("attributename", PartialValue::new_iutf8("name")), + f_eq("attributename", PartialValue::new_iutf8("domain_name")), + ])), + ModifyList::new_purge_and_set( + "syntax", + Value::new_syntaxs("UTF8STRING_INAME").unwrap(), + ), + ) + }; + assert!(server_txn.modify(&me_syn).is_ok()); - // We can't just re-run the migrate here because name takes it's definition from - // in memory, and we can't re-run the initial memory gen. So we just fix it to match - // what the migrate "would do". - let me_syn = unsafe { - ModifyEvent::new_internal_invalid( - filter!(f_or!([ - f_eq("attributename", PartialValue::new_iutf8("name")), - f_eq("attributename", PartialValue::new_iutf8("domain_name")), - ])), - ModifyList::new_purge_and_set( - "syntax", - Value::new_syntaxs("UTF8STRING_INAME").unwrap(), - ), - ) - }; - assert!(server_txn.modify(&me_syn).is_ok()); + // WARNING! We can't commit here because this triggers domain_reload which will fail + // due to incorrect syntax of the domain name! Run the migration in the same txn! + // Trigger a schema reload. + assert!(server_txn.reload_schema().is_ok()); - // WARNING! We can't commit here because this triggers domain_reload which will fail - // due to incorrect syntax of the domain name! Run the migration in the same txn! - // Trigger a schema reload. - assert!(server_txn.reload_schema().is_ok()); + // ++ Run the upgrade for X to Y + assert!(server_txn.migrate_2_to_3().is_ok()); - // ++ Run the upgrade for X to Y - assert!(server_txn.migrate_2_to_3().is_ok()); + assert!(server_txn.commit().is_ok()); - assert!(server_txn.commit().is_ok()); - - // Assert that it migrated and worked as expected. - let server_txn = server.write(duration_from_epoch_now()); - let domain = server_txn - .internal_search_uuid(&UUID_DOMAIN_INFO) - .expect("failed"); - // ++ assert all names are iname - assert!( - domain.get_ava_set("name").expect("no name?").syntax() - == SyntaxType::Utf8StringIname - ); - // ++ assert all domain/domain_name are iname - assert!( - domain - .get_ava_set("domain_name") - .expect("no domain_name?") - .syntax() - == SyntaxType::Utf8StringIname - ); - assert!(server_txn.commit().is_ok()); - }) + // Assert that it migrated and worked as expected. + let server_txn = server.write(duration_from_epoch_now()).await; + let domain = server_txn + .internal_search_uuid(&UUID_DOMAIN_INFO) + .expect("failed"); + // ++ assert all names are iname + assert!( + domain.get_ava_set("name").expect("no name?").syntax() == SyntaxType::Utf8StringIname + ); + // ++ assert all domain/domain_name are iname + assert!( + domain + .get_ava_set("domain_name") + .expect("no domain_name?") + .syntax() + == SyntaxType::Utf8StringIname + ); + assert!(server_txn.commit().is_ok()); } } diff --git a/kanidmd/lib/src/testkit.rs b/kanidmd/lib/src/testkit.rs new file mode 100644 index 000000000..6a67e2324 --- /dev/null +++ b/kanidmd/lib/src/testkit.rs @@ -0,0 +1,21 @@ +use crate::be::{Backend, BackendConfig}; +use crate::prelude::*; +use crate::schema::Schema; +#[allow(unused_imports)] +use crate::utils::duration_from_epoch_now; + +pub async fn setup_test() -> QueryServer { + let _ = sketching::test_init(); + + // Create an in memory BE + let schema_outer = Schema::new().expect("Failed to init schema"); + let idxmeta = { + let schema_txn = schema_outer.write(); + schema_txn.reload_idxmeta() + }; + let be = Backend::new(BackendConfig::new_test(), idxmeta, false).expect("Failed to init BE"); + + let qs = QueryServer::new(be, schema_outer, "example.com".to_string()); + // Init is called via the proc macro + qs +} diff --git a/kanidmd/testkit-macros/Cargo.toml b/kanidmd/testkit-macros/Cargo.toml new file mode 100644 index 000000000..8193f6fe5 --- /dev/null +++ b/kanidmd/testkit-macros/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "testkit-macros" +version = "0.1.0" +edition = "2021" + +[lib] +proc-macro = true + +[dependencies] +proc-macro2.workspace = true +quote.workspace = true +syn.workspace = true + + diff --git a/kanidmd/testkit-macros/src/entry.rs b/kanidmd/testkit-macros/src/entry.rs new file mode 100644 index 000000000..f8b0c1bec --- /dev/null +++ b/kanidmd/testkit-macros/src/entry.rs @@ -0,0 +1,88 @@ +use proc_macro::TokenStream; +use proc_macro2::{Ident, Span}; +use syn::spanned::Spanned; + +use quote::{quote, quote_spanned, ToTokens}; + +fn parse_knobs(input: syn::ItemFn) -> TokenStream { + // If type mismatch occurs, the current rustc points to the last statement. + let (last_stmt_start_span, _last_stmt_end_span) = { + let mut last_stmt = input + .block + .stmts + .last() + .map(ToTokens::into_token_stream) + .unwrap_or_default() + .into_iter(); + // `Span` on stable Rust has a limitation that only points to the first + // token, not the whole tokens. We can work around this limitation by + // using the first/last span of the tokens like + // `syn::Error::new_spanned` does. + let start = last_stmt.next().map_or_else(Span::call_site, |t| t.span()); + let end = last_stmt.last().map_or(start, |t| t.span()); + (start, end) + }; + + let rt = quote_spanned! {last_stmt_start_span=> + tokio::runtime::Builder::new_current_thread() + }; + + let header = quote! { + #[::core::prelude::v1::test] + }; + + let fn_name = &input.sig.ident; + let test_driver = Ident::new(&format!("tk_{}", fn_name), input.sig.span()); + + // Effectively we are just injecting a real test function around this which we will + // call. + + let result = quote! { + #input + + #header + fn #test_driver() { + let body = async { + let rsclient = kanidmd_testkit::setup_async_test().await; + #fn_name(rsclient).await + }; + #[allow(clippy::expect_used, clippy::diverging_sub_expression)] + { + return #rt + .enable_all() + .build() + .expect("Failed building the Runtime") + .block_on(body); + } + } + }; + + result.into() +} + +fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream { + tokens.extend(TokenStream::from(error.into_compile_error())); + tokens +} + +pub(crate) fn test(_args: TokenStream, item: TokenStream) -> TokenStream { + // If any of the steps for this macro fail, we still want to expand to an item that is as close + // to the expected output as possible. This helps out IDEs such that completions and other + // related features keep working. + let input: syn::ItemFn = match syn::parse(item.clone()) { + Ok(it) => it, + Err(e) => return token_stream_with_error(item, e), + }; + + if let Some(attr) = input.attrs.iter().find(|attr| attr.path.is_ident("test")) { + let msg = "second test attribute is supplied"; + return token_stream_with_error(item, syn::Error::new_spanned(&attr, msg)); + }; + + if input.sig.asyncness.is_none() { + let msg = "the `async` keyword is missing from the function declaration"; + return token_stream_with_error(item, syn::Error::new_spanned(input.sig.fn_token, msg)); + } + + parse_knobs(input) +} diff --git a/kanidmd/testkit-macros/src/lib.rs b/kanidmd/testkit-macros/src/lib.rs new file mode 100644 index 000000000..42af50275 --- /dev/null +++ b/kanidmd/testkit-macros/src/lib.rs @@ -0,0 +1,23 @@ +#![deny(warnings)] +#![warn(unused_extern_crates)] +#![deny(clippy::todo)] +#![deny(clippy::unimplemented)] +#![deny(clippy::unwrap_used)] +#![deny(clippy::expect_used)] +#![deny(clippy::panic)] +#![deny(clippy::unreachable)] +#![deny(clippy::await_holding_lock)] +#![deny(clippy::needless_pass_by_value)] +#![deny(clippy::trivially_copy_pass_by_ref)] + +mod entry; + +#[allow(unused_extern_crates)] +extern crate proc_macro; + +use proc_macro::TokenStream; + +#[proc_macro_attribute] +pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { + entry::test(args, item) +} diff --git a/kanidmd/testkit/Cargo.toml b/kanidmd/testkit/Cargo.toml new file mode 100644 index 000000000..18d5c318c --- /dev/null +++ b/kanidmd/testkit/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "kanidmd_testkit" +description = "Kanidm Server Test Framework" +documentation = "https://docs.rs/kanidm/latest/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lib] +name = "kanidmd_testkit" +path = "src/lib.rs" + +[dependencies] +kanidm_client.workspace = true +kanidm_proto.workspace = true +kanidmd_core.workspace = true +kanidmd_lib.workspace = true +futures.workspace = true + +webauthn-authenticator-rs.workspace = true +oauth2_ext = { workspace = true, default-features = false } + +url = { workspace = true, features = ["serde"] } +reqwest = { workspace = true, features=["cookies", "json", "native-tls"] } +sketching.workspace = true +testkit-macros.workspace = true +tracing = { workspace = true, features = ["attributes"] } +tokio = { workspace = true, features = ["net", "sync", "io-util", "macros"] } + + +[build-dependencies] +profiles.workspace = true + +[dev-dependencies] +compact_jwt.workspace = true +serde_json.workspace = true diff --git a/kanidmd/testkit/build.rs b/kanidmd/testkit/build.rs new file mode 100644 index 000000000..5a19158c1 --- /dev/null +++ b/kanidmd/testkit/build.rs @@ -0,0 +1,3 @@ +fn main() { + profiles::apply_profile(); +} diff --git a/kanidmd/core/tests/common.rs b/kanidmd/testkit/src/lib.rs similarity index 81% rename from kanidmd/core/tests/common.rs rename to kanidmd/testkit/src/lib.rs index 6d8f47ca2..bff9f0948 100644 --- a/kanidmd/core/tests/common.rs +++ b/kanidmd/testkit/src/lib.rs @@ -1,3 +1,15 @@ +#![deny(warnings)] +#![warn(unused_extern_crates)] +#![deny(clippy::todo)] +#![deny(clippy::unimplemented)] +#![deny(clippy::unwrap_used)] +#![deny(clippy::expect_used)] +#![deny(clippy::panic)] +#![deny(clippy::unreachable)] +#![deny(clippy::await_holding_lock)] +#![deny(clippy::needless_pass_by_value)] +#![deny(clippy::trivially_copy_pass_by_ref)] + use std::net::TcpStream; use std::sync::atomic::{AtomicU16, Ordering}; @@ -10,6 +22,8 @@ pub const ADMIN_TEST_USER: &str = "admin"; pub const ADMIN_TEST_PASSWORD: &str = "integration test admin password"; pub static PORT_ALLOC: AtomicU16 = AtomicU16::new(18080); +pub use testkit_macros::test; + pub fn is_free_port(port: u16) -> bool { // TODO: Refactor to use `Result::is_err` in a future PR match TcpStream::connect(("0.0.0.0", port)) { @@ -50,7 +64,7 @@ pub async fn setup_async_test() -> KanidmClient { config.address = format!("127.0.0.1:{}", port); config.secure_cookies = false; config.integration_test_config = Some(int_config); - config.role = ServerRole::WriteReplicaNoUI; + config.role = ServerRole::WriteReplica; config.domain = "localhost".to_string(); config.origin = addr.clone(); // config.log_level = Some(LogLevel::Verbose as u32); @@ -64,10 +78,12 @@ pub async fn setup_async_test() -> KanidmClient { task::yield_now().await; let rsclient = KanidmClientBuilder::new() - .address(addr) + .address(addr.clone()) .no_proxy() .build() .expect("Failed to build client"); + tracing::info!("Testkit server setup complete - {}", addr); + rsclient } diff --git a/kanidmd/core/tests/default_entries.rs b/kanidmd/testkit/tests/default_entries.rs similarity index 94% rename from kanidmd/core/tests/default_entries.rs rename to kanidmd/testkit/tests/default_entries.rs index 8f23d7eb6..47fe30ab8 100644 --- a/kanidmd/core/tests/default_entries.rs +++ b/kanidmd/testkit/tests/default_entries.rs @@ -4,8 +4,7 @@ use std::collections::HashSet; use kanidm_client::KanidmClient; use kanidm_proto::v1::{Filter, Modify, ModifyList}; -mod common; -use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD, ADMIN_TEST_USER}; +use kanidmd_testkit::{ADMIN_TEST_PASSWORD, ADMIN_TEST_USER}; static USER_READABLE_ATTRS: [&str; 9] = [ "name", @@ -286,9 +285,8 @@ async fn test_modify_group( // - Read to all self attributes (within security constraints). // - Write to a limited set of self attributes, such as: // name, displayname, legalname, ssh-keys, credentials etc. -#[tokio::test] -async fn test_default_entries_rbac_users() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_users(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -327,9 +325,8 @@ async fn test_default_entries_rbac_users() { // Account Managers // read and write to accounts, including write credentials but NOT private data (see people manager) // ability to lock and unlock accounts, excluding high access members. -#[tokio::test] -async fn test_default_entries_rbac_account_managers() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_account_managers(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -363,9 +360,8 @@ async fn test_default_entries_rbac_account_managers() { // Group Managers // read all groups // write group but not high access -#[tokio::test] -async fn test_default_entries_rbac_group_managers() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_group_managers(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -411,9 +407,8 @@ async fn test_default_entries_rbac_group_managers() { // Admins // read and write access control entries. -#[tokio::test] -async fn test_default_entries_rbac_admins_access_control_entries() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_admins_access_control_entries(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -463,9 +458,8 @@ async fn test_default_entries_rbac_admins_access_control_entries() { // read schema entries. // TODO #252: write schema entries -#[tokio::test] -async fn test_default_entries_rbac_admins_schema_entries() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_admins_schema_entries(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -579,9 +573,8 @@ async fn test_default_entries_rbac_admins_schema_entries() { // modify all groups including high access groups. // create new accounts (to bootstrap the system). -#[tokio::test] -async fn test_default_entries_rbac_admins_group_entries() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_admins_group_entries(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -600,9 +593,8 @@ async fn test_default_entries_rbac_admins_group_entries() { } // modify high access accounts as an escalation for security sensitive accounts. -#[tokio::test] -async fn test_default_entries_rbac_admins_ha_accounts() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_admins_ha_accounts(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -617,9 +609,8 @@ async fn test_default_entries_rbac_admins_ha_accounts() { } // recover from the recycle bin -#[tokio::test] -async fn test_default_entries_rbac_admins_recycle_accounts() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_admins_recycle_accounts(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -641,9 +632,8 @@ async fn test_default_entries_rbac_admins_recycle_accounts() { // People Managers // read private or sensitive data of persons, IE legalName // write private or sensitive data of persons, IE legalName -#[tokio::test] -async fn test_default_entries_rbac_people_managers() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_people_managers(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -684,9 +674,8 @@ async fn test_default_entries_rbac_people_managers() { // Anonymous Clients + Everyone Else // read memberof, unix attrs, name, displayname, class -#[tokio::test] -async fn test_default_entries_rbac_anonymous_entry() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_anonymous_entry(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -715,9 +704,8 @@ async fn test_default_entries_rbac_anonymous_entry() { // RADIUS Servers // Read radius credentials // Read other needed attributes to fulfil radius functions. -#[tokio::test] -async fn test_default_entries_rbac_radius_servers() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_default_entries_rbac_radius_servers(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await @@ -738,9 +726,8 @@ async fn test_default_entries_rbac_radius_servers() { test_write_attrs(&rsclient, "test", &RADIUS_NECESSARY_ATTRS, false).await; } -#[tokio::test] -async fn test_self_write_mail_priv_people() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_self_write_mail_priv_people(rsclient: KanidmClient) { rsclient .auth_simple_password(ADMIN_TEST_USER, ADMIN_TEST_PASSWORD) .await diff --git a/kanidmd/testkit/tests/https_middleware.rs b/kanidmd/testkit/tests/https_middleware.rs new file mode 100644 index 000000000..731cb0692 --- /dev/null +++ b/kanidmd/testkit/tests/https_middleware.rs @@ -0,0 +1,39 @@ +use kanidm_client::KanidmClient; + +#[kanidmd_testkit::test] +async fn test_https_middleware_headers(rsclient: KanidmClient) { + // We need to do manual reqwests here. + let addr = rsclient.get_url(); + + // here we test the /ui/ endpoint which should have the headers + let response = match reqwest::get(format!("{}/ui/", &addr)).await { + Ok(value) => value, + Err(error) => { + panic!("Failed to query {:?} : {:#?}", addr, error); + } + }; + eprintln!("response: {:#?}", response); + assert_eq!(response.status(), 200); + + eprintln!( + "csp headers: {:#?}", + response.headers().get("content-security-policy") + ); + assert_ne!(response.headers().get("content-security-policy"), None); + + // here we test the /pkg/ endpoint which shouldn't have the headers + let response = + match reqwest::get(format!("{}/pkg/external/bootstrap.bundle.min.js", &addr)).await { + Ok(value) => value, + Err(error) => { + panic!("Failed to query {:?} : {:#?}", addr, error); + } + }; + eprintln!("response: {:#?}", response); + assert_eq!(response.status(), 200); + eprintln!( + "csp headers: {:#?}", + response.headers().get("content-security-policy") + ); + assert_eq!(response.headers().get("content-security-policy"), None); +} diff --git a/kanidmd/core/tests/oauth2_test.rs b/kanidmd/testkit/tests/oauth2_test.rs similarity index 98% rename from kanidmd/core/tests/oauth2_test.rs rename to kanidmd/testkit/tests/oauth2_test.rs index 77e284e00..d97b4519a 100644 --- a/kanidmd/core/tests/oauth2_test.rs +++ b/kanidmd/testkit/tests/oauth2_test.rs @@ -1,5 +1,4 @@ #![deny(warnings)] -mod common; use std::collections::HashMap; use std::convert::TryFrom; use std::str::FromStr; @@ -12,7 +11,8 @@ use kanidm_proto::oauth2::{ use oauth2_ext::PkceCodeChallenge; use url::Url; -use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD}; +use kanidm_client::KanidmClient; +use kanidmd_testkit::ADMIN_TEST_PASSWORD; macro_rules! assert_no_cache { ($response:expr) => {{ @@ -38,9 +38,8 @@ macro_rules! assert_no_cache { }}; } -#[tokio::test] -async fn test_oauth2_openid_basic_flow() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_oauth2_openid_basic_flow(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; diff --git a/kanidmd/core/tests/proto_v1_test.rs b/kanidmd/testkit/tests/proto_v1_test.rs similarity index 93% rename from kanidmd/core/tests/proto_v1_test.rs rename to kanidmd/testkit/tests/proto_v1_test.rs index 01c8abbbd..8d49f899d 100644 --- a/kanidmd/core/tests/proto_v1_test.rs +++ b/kanidmd/testkit/tests/proto_v1_test.rs @@ -7,20 +7,19 @@ use kanidm_proto::v1::{ use kanidmd_lib::credential::totp::Totp; use tracing::debug; -mod common; use std::str::FromStr; use compact_jwt::JwsUnverified; use webauthn_authenticator_rs::softpasskey::SoftPasskey; use webauthn_authenticator_rs::WebauthnAuthenticator; -use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD}; +use kanidm_client::KanidmClient; +use kanidmd_testkit::ADMIN_TEST_PASSWORD; const UNIX_TEST_PASSWORD: &str = "unix test user password"; -#[tokio::test] -async fn test_server_create() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_create(rsclient: KanidmClient) { let e: Entry = serde_json::from_str( r#"{ "attrs": { @@ -45,11 +44,9 @@ async fn test_server_create() { assert!(res.is_ok()); } -#[tokio::test] -async fn test_server_modify() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_modify(rsclient: KanidmClient) { // Build a self mod. - let f = Filter::SelfUuid; let m = ModifyList::new_list(vec![ Modify::Purged("displayname".to_string()), @@ -70,9 +67,8 @@ async fn test_server_modify() { assert!(res.is_ok()); } -#[tokio::test] -async fn test_server_whoami_anonymous() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_whoami_anonymous(rsclient: KanidmClient) { // First show we are un-authenticated. let pre_res = rsclient.whoami().await; // This means it was okay whoami, but no uat attached. @@ -97,9 +93,8 @@ async fn test_server_whoami_anonymous() { assert!(res.is_ok()); } -#[tokio::test] -async fn test_server_whoami_admin_simple_password() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_whoami_admin_simple_password(rsclient: KanidmClient) { // First show we are un-authenticated. let pre_res = rsclient.whoami().await; // This means it was okay whoami, but no uat attached. @@ -120,9 +115,8 @@ async fn test_server_whoami_admin_simple_password() { assert!(e.attrs.get("spn") == Some(&vec!["admin@localhost".to_string()])); } -#[tokio::test] -async fn test_server_search() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_search(rsclient: KanidmClient) { // First show we are un-authenticated. let pre_res = rsclient.whoami().await; // This means it was okay whoami, but no uat attached. @@ -146,9 +140,8 @@ async fn test_server_search() { } // test the rest group endpoint. -#[tokio::test] -async fn test_server_rest_group_read() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_group_read(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -163,9 +156,8 @@ async fn test_server_rest_group_read() { println!("{:?}", g); } -#[tokio::test] -async fn test_server_rest_group_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_group_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -238,9 +230,8 @@ async fn test_server_rest_group_lifecycle() { assert!(members == Some(vec!["idm_admin@localhost".to_string()])); } -#[tokio::test] -async fn test_server_rest_account_read() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_account_read(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -255,9 +246,8 @@ async fn test_server_rest_account_read() { println!("{:?}", a); } -#[tokio::test] -async fn test_server_rest_schema_read() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_schema_read(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -284,9 +274,8 @@ async fn test_server_rest_schema_read() { } // Test resetting a radius cred, and then checking/viewing it. -#[tokio::test] -async fn test_server_radius_credential_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_radius_credential_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -356,9 +345,8 @@ async fn test_server_radius_credential_lifecycle() { assert!(n_sec.is_none()); } -#[tokio::test] -async fn test_server_rest_person_account_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_person_account_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -408,9 +396,8 @@ async fn test_server_rest_person_account_lifecycle() { .unwrap(); } -#[tokio::test] -async fn test_server_rest_sshkey_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_sshkey_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -466,9 +453,8 @@ async fn test_server_rest_sshkey_lifecycle() { assert!(skn.unwrap() == Some("ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBx4TpJYQjd0YI5lQIHqblIsCIK5NKVFURYS/eM3o6/Z william@amethyst".to_string())); } -#[tokio::test] -async fn test_server_rest_domain_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_domain_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -497,9 +483,8 @@ async fn test_server_rest_domain_lifecycle() { ); } -#[tokio::test] -async fn test_server_rest_posix_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_posix_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -594,9 +579,8 @@ async fn test_server_rest_posix_lifecycle() { assert!(r3.name == "posix_group"); } -#[tokio::test] -async fn test_server_rest_posix_auth_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_posix_auth_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -667,9 +651,8 @@ async fn test_server_rest_posix_auth_lifecycle() { }; } -#[tokio::test] -async fn test_server_rest_recycle_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_recycle_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -722,9 +705,8 @@ async fn test_server_rest_recycle_lifecycle() { assert!(acc.is_some()); } -#[tokio::test] -async fn test_server_rest_account_import_password() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_account_import_password(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -784,9 +766,8 @@ async fn test_server_rest_account_import_password() { } } -#[tokio::test] -async fn test_server_rest_oauth2_basic_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_rest_oauth2_basic_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -922,9 +903,8 @@ async fn test_server_rest_oauth2_basic_lifecycle() { assert!(final_configs.is_empty()); } -#[tokio::test] -async fn test_server_credential_update_session_pw() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_credential_update_session_pw(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -981,9 +961,8 @@ async fn test_server_credential_update_session_pw() { assert!(res.is_ok()); } -#[tokio::test] -async fn test_server_credential_update_session_totp_pw() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_credential_update_session_totp_pw(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -1099,9 +1078,8 @@ async fn test_server_credential_update_session_totp_pw() { assert!(res.is_ok()); } -#[tokio::test] -async fn test_server_credential_update_session_passkey() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_credential_update_session_passkey(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -1188,9 +1166,8 @@ async fn test_server_credential_update_session_passkey() { assert!(res.is_ok()); } -#[tokio::test] -async fn test_server_api_token_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_api_token_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; @@ -1247,9 +1224,8 @@ async fn test_server_api_token_lifecycle() { // No need to test expiry, that's validated in the server internal tests. } -#[tokio::test] -async fn test_server_user_auth_token_lifecycle() { - let rsclient = setup_async_test().await; +#[kanidmd_testkit::test] +async fn test_server_user_auth_token_lifecycle(rsclient: KanidmClient) { let res = rsclient .auth_simple_password("admin", ADMIN_TEST_PASSWORD) .await; diff --git a/profiles/Cargo.toml b/profiles/Cargo.toml index bde4c7c98..bdbeffe09 100644 --- a/profiles/Cargo.toml +++ b/profiles/Cargo.toml @@ -2,6 +2,8 @@ name = "profiles" description = "Kanidm Build System Profiles" documentation = "https://docs.rs/kanidm/latest/kanidm/" +# We do not have tests in this pkg +autotests = false version.workspace = true authors.workspace = true diff --git a/sketching/Cargo.toml b/sketching/Cargo.toml index a1e9d627b..566199807 100644 --- a/sketching/Cargo.toml +++ b/sketching/Cargo.toml @@ -1,5 +1,7 @@ [package] name = "sketching" +# We do not have tests in this pkg +autotests = false version.workspace = true authors.workspace = true