diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 000000000..b9129e6f1 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,11 @@ + +reorder_imports = true + +## Requires nightly +# imports_granularity = "Module" +# group_imports = "StdExternalCrate" +# format_code_in_doc_comments = true +# format_macro_bodies = true +# reorder_impl_items = true + + diff --git a/Cargo.lock b/Cargo.lock index 1ae114a29..6b6ef4b39 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -82,6 +82,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "android_system_properties" version = "0.1.5" @@ -186,6 +201,7 @@ version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "345fd392ab01f746c717b1357165b76f0b67a60192007b234058c9045fdcf695" dependencies = [ + "brotli", "flate2", "futures-core", "futures-io", @@ -578,6 +594,27 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfa8873f51c92e232f9bac4065cddef41b714152812bfc5f7672ba16d6ef8cd9" +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bstr" version = "0.2.17" @@ -602,6 +639,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" +[[package]] +name = "bytemuck" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" + [[package]] name = "byteorder" version = "1.4.3" @@ -753,6 +796,12 @@ dependencies = [ "os_str_bytes", ] +[[package]] +name = "color_quant" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" + [[package]] name = "compact_jwt" version = "0.2.8" @@ -850,9 +899,9 @@ dependencies = [ [[package]] name = "cookie" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05" +checksum = "344adc371239ef32293cb1c4fe519592fcf21206c79c02854320afcdf3ab4917" dependencies = [ "percent-encoding", "time 0.3.14", @@ -865,7 +914,7 @@ version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e4b6aa369f41f5faa04bb80c9b1f4216ea81646ed6124d76ba5c49a7aafd9cd" dependencies = [ - "cookie 0.16.0", + "cookie 0.16.1", "idna 0.2.3", "log", "publicsuffix", @@ -1991,6 +2040,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" +dependencies = [ + "http", + "hyper", + "rustls", + "tokio", + "tokio-rustls", +] + [[package]] name = "hyper-tls" version = "0.5.0" @@ -2061,6 +2123,20 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" +[[package]] +name = "image" +version = "0.23.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" +dependencies = [ + "bytemuck", + "byteorder", + "color_quant", + "num-iter", + "num-rational", + "num-traits", +] + [[package]] name = "indexmap" version = "1.9.1" @@ -2174,7 +2250,7 @@ dependencies = [ "rusqlite", "saffron", "serde", - "serde_cbor", + "serde_cbor_2", "serde_json", "sketching", "smartstring", @@ -2664,6 +2740,28 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-iter" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +dependencies = [ + "autocfg", + "num-integer", + "num-traits", +] + [[package]] name = "num-traits" version = "0.2.15" @@ -2724,6 +2822,7 @@ dependencies = [ "getrandom 0.2.7", "http", "rand 0.8.5", + "reqwest", "serde", "serde_json", "serde_path_to_error", @@ -2773,9 +2872,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.41" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "618febf65336490dfcf20b73f885f5651a0c89c64c2d4a8c3662585a70bf5bd0" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if 1.0.0", @@ -2805,9 +2904,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.75" +version = "0.9.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5f9bd0c2710541a3cda73d6f9ac4f1b240de4ae261065d309dbe73d9dceb42f" +checksum = "5230151e44c0f05157effb743e8d517472843121cf9243e8b81393edb5acd9ce" dependencies = [ "autocfg", "cc", @@ -2919,6 +3018,24 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +[[package]] +name = "phf" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "928c6535de93548188ef63bb7c4036bd415cd8f36ad25af44b9789b2ee72a48c" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fb5f6f826b772a8d4c0394209441e7d37cbbb967ae9c7e0e8134365c9ee676" +dependencies = [ + "siphasher", +] + [[package]] name = "phonenumber" version = "0.3.1+8.12.9" @@ -3085,9 +3202,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" [[package]] name = "proc-macro2" -version = "1.0.43" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" +checksum = "7bd7356a8122b6c4a24a82b278680c73357984ca2fc79a0f9fa6dea7dced7c58" dependencies = [ "unicode-ident", ] @@ -3124,6 +3241,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16d2f1455f3630c6e5107b4f2b94e74d76dea80736de0981fd27644216cff57f" dependencies = [ "checked_int_cast", + "image", ] [[package]] @@ -3341,7 +3459,7 @@ checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" dependencies = [ "base64 0.13.0", "bytes", - "cookie 0.16.0", + "cookie 0.16.1", "cookie_store", "encoding_rs", "futures-core", @@ -3350,6 +3468,7 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", @@ -3360,19 +3479,38 @@ dependencies = [ "percent-encoding", "pin-project-lite 0.2.9", "proc-macro-hack", + "rustls", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg", ] +[[package]] +name = "ring" +version = "0.16.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] + [[package]] name = "route-recognizer" version = "0.2.0" @@ -3443,6 +3581,27 @@ dependencies = [ "nom 7.1.1", ] +[[package]] +name = "rustls" +version = "0.20.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" +dependencies = [ + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +dependencies = [ + "base64 0.13.0", +] + [[package]] name = "ryu" version = "1.0.11" @@ -3511,7 +3670,6 @@ version = "1.1.0-alpha.9" dependencies = [ "async-std", "async-trait", - "base64 0.13.0", "compact_jwt", "futures", "futures-util", @@ -3541,6 +3699,16 @@ dependencies = [ "webauthn-authenticator-rs", ] +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "security-framework" version = "2.7.0" @@ -3800,9 +3968,15 @@ dependencies = [ "event-listener", ] +[[package]] +name = "siphasher" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de" + [[package]] name = "sketching" -version = "0.1.0" +version = "1.1.0-alpha.9" dependencies = [ "async-trait", "num_enum", @@ -3861,6 +4035,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "sptr" version = "0.3.2" @@ -3965,9 +4145,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.100" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52205623b1b0f064a4e71182c3b18ae902267282930c6d5462c91b859668426e" +checksum = "e90cde112c4b9690b8cbe810cba9ddd8bc1d7472e2cae317b69e9438c1cba7d2" dependencies = [ "proc-macro2", "quote", @@ -4027,18 +4207,18 @@ checksum = "949517c0cf1bf4ee812e2e07e08ab448e3ae0d23472aee8a06c985f0c8815b16" [[package]] name = "thiserror" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c53f98874615aea268107765aa1ed8f6116782501d18e53d08b471733bea6c85" +checksum = "0a99cb8c4b9a8ef0e7907cd3b617cc8dc04d571c4e73c8ae403d80ac160bb122" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8b463991b4eab2d801e724172285ec4195c650e8ec79b149e6c2a8e6dd3f783" +checksum = "3a891860d3c8d66fec8e73ddb3765f90082374dbaaa833407b904a94f1a7eb43" dependencies = [ "proc-macro2", "quote", @@ -4086,6 +4266,7 @@ dependencies = [ "async-compression", "futures-lite", "http-types", + "phf", "regex", "tide", ] @@ -4273,6 +4454,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.23.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" +dependencies = [ + "rustls", + "tokio", + "webpki", +] + [[package]] name = "tokio-util" version = "0.7.4" @@ -4441,6 +4633,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + [[package]] name = "url" version = "2.3.1" @@ -4774,6 +4972,25 @@ dependencies = [ "web-sys", ] +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +dependencies = [ + "webpki", +] + [[package]] name = "wepoll-ffi" version = "0.1.2" diff --git a/Cargo.toml b/Cargo.toml index f92ce1de4..93c34448b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,20 +17,131 @@ members = [ "kanidmd/score", "orca", "profiles", - "sketching", + "sketching" ] exclude = [ "kanidm_unix_int/pam_tester" ] +[workspace.package] +version = "1.1.0-alpha.9" +authors = [ + "William Brown ", + "James Hodgkinson ", + ] +rust-version = "1.64" +edition = "2021" +license = "MPL-2.0" +homepage = "https://github.com/kanidm/kanidm/" +repository = "https://github.com/kanidm/kanidm/" [workspace.dependencies] +async-std = { version = "^1.12.0", features = ["tokio1"] } +async-trait = "^0.1.57" +base32 = "^0.4.0" +base64 = "^0.13.0" +base64urlsafedata = "0.1.0" +bytes = "^1.1.0" +clap = { version = "^3.2", features = ["derive"] } +clap_complete = "^3.2.5" +# Forced by saffron +chrono = "^0.4.20" +compact_jwt = "^0.2.3" # compact_jwt = { path = "../compact_jwt" } +concread = "^0.4.0" # concread = { path = "../concread" } +crossbeam = "0.8.1" +criterion = "^0.4.0" +csv = "1.1.6" +dialoguer = "0.10.1" +dyn-clone = "^1.0.9" +fernet = "^0.2.0" +filetime = "^0.2.17" +futures = "^0.3.21" +futures-util = "^0.3.21" +gloo = "^0.8.0" +gloo-net = "0.2.4" +hashbrown = { version = "0.12.3", features = ["serde", "inline-more", "ahash"] } +http-types = "^2.12.0" +idlset = "^0.2.4" # idlset = { path = "../idlset" } -# ldap3_server = { path = "../ldap3_server" } +js-sys = "^0.3.58" +# RENAME THIS +kanidm = { path = "./kanidmd/idm" } +kanidm_client = { path = "./kanidm_client" } +kanidm_proto = { path = "./kanidm_proto" } +kanidm_unix_int = { path = "./kanidm_unix_int" } +last-git-commit = "0.2.0" +# REMOVE this +lazy_static = "^1.4.0" +ldap3_proto = "^0.2.3" +libc = "^0.2.127" +libnss = "^0.4.0" +libsqlite3-sys = "^0.25.0" +lru = "^0.8.0" +mathru = "^0.13.0" +num_enum = "^0.5.7" +oauth2_ext = { version = "^4.1.0", package = "oauth2" } +openssl = "^0.10.41" +paste = "^1.0.9" +pkg-config = "^0.3.25" +profiles = { path = "./profiles" } +qrcode = "^0.12.0" +r2d2 = "^0.8.9" +r2d2_sqlite = "^0.21.0" +rand = "^0.8.5" +# try to remove this +rayon = "^1.5.3" +regex = "1.5.6" +reqwest = "0.11.11" +rpassword = "^7.0.0" +rusqlite = "^0.28.0" +saffron = "^0.1.0" +# Rename this! +score = { path = "./kanidmd/score" } +serde = "^1.0.142" +serde_cbor = { version = "0.12.0-dev", package = "serde_cbor_2" } +serde_json = "^1.0.83" +serde-wasm-bindgen = "0.4" +shellexpand = "^2.1.2" +sketching = { path = "./sketching" } +smartstring = "^1.0.1" +smolset = "^1.3.1" +sshkeys = "^0.3.1" +tide = "^0.16.0" +tide-compress = "0.10.6" +tide-openssl = "^0.1.1" + +# Unable to increase version due to removing ability to detect +# local platform time. +time = "=0.2.27" + +tikv-jemallocator = "0.5" + +tokio = "^1.21.1" +tokio-openssl = "^0.6.3" +tokio-util = "^0.7.4" + +toml = "^0.5.9" +touch = "^0.0.1" +tracing = { version = "^0.1.35", features = ["max_level_trace", "release_max_level_debug"] } +tracing-subscriber = { version = "^0.3.14", features = ["env-filter"] } + +# tracing-forest = { path = "/Users/william/development/tracing-forest/tracing-forest" } +tracing-forest = { git = "https://github.com/QnnOkabayashi/tracing-forest.git", rev = "48d78f7294ceee47a22eee5c80964143c4fb3fe1" } + +url = "^2.3.1" +urlencoding = "2.1.2" +users = "^0.11.0" +uuid = "^1.1.2" + +validator = "^0.16.0" + +wasm-bindgen = "^0.2.81" +wasm-bindgen-futures = "^0.4.30" +wasm-bindgen-test = "0.3.33" webauthn-authenticator-rs = "0.4.7" webauthn-rs = "0.4.7" @@ -40,6 +151,13 @@ webauthn-rs-proto = "0.4.7" # webauthn-rs = { path = "../webauthn-rs/webauthn-rs" } # webauthn-rs-core = { path = "../webauthn-rs/webauthn-rs-core" } # webauthn-rs-proto = { path = "../webauthn-rs/webauthn-rs-proto" } +web-sys = "^0.3.60" +whoami = "^1.2.3" + +yew = "^0.19.3" +yew-agent = "^0.1.0" +yew-router = "^0.16.0" +zxcvbn = "^2.2.1" # enshrinken the WASMs [profile.release.package.kanidmd_web_ui] diff --git a/kanidm_client/Cargo.toml b/kanidm_client/Cargo.toml index be83b9fc4..cf8cdd1b0 100644 --- a/kanidm_client/Cargo.toml +++ b/kanidm_client/Cargo.toml @@ -1,25 +1,26 @@ [package] name = "kanidm_client" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.64" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Client Library" documentation = "https://docs.rs/kanidm_client/latest/kanidm_client/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [dependencies] -tracing = "^0.1.35" -reqwest = { version = "^0.11.11", features=["cookies", "json", "native-tls"] } -kanidm_proto = { path = "../kanidm_proto", version = "1.1.0-alpha.8" } -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -time = { version = "=0.2.27", features = ["serde", "std"] } -tokio = { version = "^1.21.1", features = ["rt", "net", "time", "macros", "sync", "signal"] } -toml = "^0.5.9" -uuid = { version = "^1.1.2", features = ["serde", "v4"] } -url = { version = "^2.3.1", features = ["serde"] } +tracing.workspace = true +reqwest = { workspace = true, features=["cookies", "json", "native-tls"] } +kanidm_proto.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +time = { workspace = true, features = ["serde", "std"] } +tokio = { workspace = true, features = ["rt", "net", "time", "macros", "sync", "signal"] } +toml.workspace = true +uuid = { workspace = true, features = ["serde", "v4"] } +url = { workspace = true, features = ["serde"] } webauthn-rs-proto = { workspace = true, features = ["wasm"] } diff --git a/kanidm_client/src/lib.rs b/kanidm_client/src/lib.rs index ce788e192..17f693dee 100644 --- a/kanidm_client/src/lib.rs +++ b/kanidm_client/src/lib.rs @@ -13,32 +13,26 @@ #[macro_use] extern crate tracing; -use reqwest::header::CONTENT_TYPE; -use serde::de::DeserializeOwned; -use serde::Deserialize; -use serde::Serialize; -use serde_json::error::Error as SerdeJsonError; +use std::collections::{BTreeMap, BTreeSet as Set}; use std::fmt::{Display, Formatter}; use std::fs::File; #[cfg(target_family = "unix")] // not needed for windows builds use std::fs::{metadata, Metadata}; -use std::io::ErrorKind; -use std::io::Read; - +use std::io::{ErrorKind, Read}; #[cfg(target_family = "unix")] // not needed for windows builds use std::os::unix::fs::MetadataExt; - -use std::collections::BTreeMap; -use std::collections::BTreeSet as Set; use std::path::Path; use std::time::Duration; + +use kanidm_proto::v1::*; +use reqwest::header::CONTENT_TYPE; +pub use reqwest::StatusCode; +use serde::de::DeserializeOwned; +use serde::{Deserialize, Serialize}; +use serde_json::error::Error as SerdeJsonError; use tokio::sync::RwLock; use url::Url; use uuid::Uuid; - -pub use reqwest::StatusCode; - -use kanidm_proto::v1::*; use webauthn_rs_proto::{ PublicKeyCredential, RegisterPublicKeyCredential, RequestChallengeResponse, }; @@ -1767,6 +1761,7 @@ impl KanidmClient { self.perform_patch_request(format!("/v1/oauth2/{}", id).as_str(), update_oauth2_rs) .await } + pub async fn idm_oauth2_rs_prefer_spn_username(&self, id: &str) -> Result<(), ClientError> { let mut update_oauth2_rs = Entry { attrs: BTreeMap::new(), diff --git a/kanidm_client/src/person.rs b/kanidm_client/src/person.rs index 7da638571..f01678279 100644 --- a/kanidm_client/src/person.rs +++ b/kanidm_client/src/person.rs @@ -1,11 +1,9 @@ -use crate::ClientError; -use crate::KanidmClient; -use kanidm_proto::v1::AccountUnixExtend; -use kanidm_proto::v1::CredentialStatus; -use kanidm_proto::v1::Entry; -use kanidm_proto::v1::SingleStringRequest; use std::collections::BTreeMap; +use kanidm_proto::v1::{AccountUnixExtend, CredentialStatus, Entry, SingleStringRequest}; + +use crate::{ClientError, KanidmClient}; + impl KanidmClient { pub async fn idm_person_account_list(&self) -> Result, ClientError> { self.perform_get_request("/v1/person").await diff --git a/kanidm_client/src/service_account.rs b/kanidm_client/src/service_account.rs index 8b9cfe327..0a6930113 100644 --- a/kanidm_client/src/service_account.rs +++ b/kanidm_client/src/service_account.rs @@ -1,13 +1,11 @@ -use crate::ClientError; -use crate::KanidmClient; -use kanidm_proto::v1::AccountUnixExtend; -use kanidm_proto::v1::CredentialStatus; -use kanidm_proto::v1::Entry; -use kanidm_proto::v1::{ApiToken, ApiTokenGenerate}; use std::collections::BTreeMap; + +use kanidm_proto::v1::{AccountUnixExtend, ApiToken, ApiTokenGenerate, CredentialStatus, Entry}; use time::OffsetDateTime; use uuid::Uuid; +use crate::{ClientError, KanidmClient}; + impl KanidmClient { pub async fn idm_service_account_list(&self) -> Result, ClientError> { self.perform_get_request("/v1/service_account").await diff --git a/kanidm_proto/Cargo.toml b/kanidm_proto/Cargo.toml index d4b5b837e..000b4fd06 100644 --- a/kanidm_proto/Cargo.toml +++ b/kanidm_proto/Cargo.toml @@ -1,30 +1,30 @@ [package] name = "kanidm_proto" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.64" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Protocol Bindings for serde" documentation = "https://docs.rs/kanidm_proto/latest/kanidm_proto/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [features] wasm = ["webauthn-rs-proto/wasm"] [dependencies] -base32 = "^0.4.0" -base64urlsafedata = "0.1.0" -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -# Can not upgrade due to breaking timezone apis. -time = { version = "=0.2.27", features = ["serde", "std"] } -url = { version = "^2.3.1", features = ["serde"] } -urlencoding = "2.1.2" -uuid = { version = "^1.1.2", features = ["serde"] } +base32.workspace = true +base64urlsafedata.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +time = { workspace = true, features = ["serde", "std"] } +url = { workspace = true, features = ["serde"] } +urlencoding.workspace = true +uuid = { workspace = true, features = ["serde"] } webauthn-rs-proto.workspace = true [target.'cfg(not(target_family = "wasm"))'.dependencies] -last-git-commit = "0.2.0" +last-git-commit.workspace = true diff --git a/kanidm_proto/src/messages.rs b/kanidm_proto/src/messages.rs index 79e2f6a7d..1ce3e5640 100644 --- a/kanidm_proto/src/messages.rs +++ b/kanidm_proto/src/messages.rs @@ -1,9 +1,10 @@ // User-facing output things -use serde::{Deserialize, Serialize}; use std::fmt; use std::str::FromStr; +use serde::{Deserialize, Serialize}; + /// This is used in user-facing CLIs to set the formatting for output, /// and defaults to text. #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] @@ -21,6 +22,7 @@ impl Default for ConsoleOutputMode { impl FromStr for ConsoleOutputMode { type Err = &'static str; + /// This can be safely unwrap'd because it'll always return a default of text /// ``` /// use kanidm_proto::messages::ConsoleOutputMode; @@ -141,7 +143,6 @@ impl Default for AccountChangeMessage { /// msg.output_mode = ConsoleOutputMode::JSON; /// let expected_result = "{\"action\":\"cake_eating\",\"result\":\"It was amazing\",\"status\":\"success\",\"src_user\":\"Kani\",\"dest_user\":\"Krabby\"}"; /// assert_eq!(format!("{}", msg), expected_result); -/// /// ``` impl fmt::Display for AccountChangeMessage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -149,7 +150,7 @@ impl fmt::Display for AccountChangeMessage { ConsoleOutputMode::JSON => write!( f, "{}", - serde_json::to_string(self).unwrap_or(format!("{:?}", self)) // if it fails to JSON serialize, just debug-dump it + serde_json::to_string(self).unwrap_or(format!("{:?}", self)) /* if it fails to JSON serialize, just debug-dump it */ ), ConsoleOutputMode::Text => write!( f, @@ -182,20 +183,20 @@ impl Default for BasicMessage { /// This outputs in either JSON or Text depending on the output_mode setting /// ``` -/// use std::fmt::format; /// use kanidm_proto::messages::*; +/// use std::fmt::format; /// let mut msg = BasicMessage::default(); -/// msg.action=String::from("cake_eating"); -/// msg.result=String::from("It was amazing"); +/// msg.action = String::from("cake_eating"); +/// msg.result = String::from("It was amazing"); /// assert_eq!(msg.status, MessageStatus::Success); /// /// let expected_result = "success - cake_eating: It was amazing"; /// assert_eq!(format!("{}", msg), expected_result); /// /// msg.output_mode = ConsoleOutputMode::JSON; -/// let expected_result = "{\"action\":\"cake_eating\",\"result\":\"It was amazing\",\"status\":\"success\"}"; +/// let expected_result = +/// "{\"action\":\"cake_eating\",\"result\":\"It was amazing\",\"status\":\"success\"}"; /// assert_eq!(format!("{}", msg), expected_result); -/// /// ``` impl fmt::Display for BasicMessage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -203,7 +204,7 @@ impl fmt::Display for BasicMessage { ConsoleOutputMode::JSON => write!( f, "{}", - serde_json::to_string(self).unwrap_or(format!("{:?}", self)) // if it fails to JSON serialize, just debug-dump it + serde_json::to_string(self).unwrap_or(format!("{:?}", self)) /* if it fails to JSON serialize, just debug-dump it */ ), ConsoleOutputMode::Text => { write!(f, "{} - {}: {}", self.status, self.action, self.result,) diff --git a/kanidm_proto/src/oauth2.rs b/kanidm_proto/src/oauth2.rs index 987d0a345..a2c134356 100644 --- a/kanidm_proto/src/oauth2.rs +++ b/kanidm_proto/src/oauth2.rs @@ -1,6 +1,7 @@ +use std::collections::BTreeMap; + use base64urlsafedata::Base64UrlSafeData; use serde::{Deserialize, Serialize}; -use std::collections::BTreeMap; use url::Url; #[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone, Copy)] diff --git a/kanidm_proto/src/v1.rs b/kanidm_proto/src/v1.rs index ef37de3e7..29ff9a2d4 100644 --- a/kanidm_proto/src/v1.rs +++ b/kanidm_proto/src/v1.rs @@ -1,8 +1,8 @@ -use serde::{Deserialize, Serialize}; use std::cmp::Ordering; -use std::collections::BTreeMap; -use std::collections::BTreeSet; +use std::collections::{BTreeMap, BTreeSet}; use std::fmt; + +use serde::{Deserialize, Serialize}; use uuid::Uuid; use webauthn_rs_proto::{ CreationChallengeResponse, PublicKeyCredential, RegisterPublicKeyCredential, @@ -1068,8 +1068,7 @@ impl SingleStringRequest { #[cfg(test)] mod tests { - use crate::v1::Filter as ProtoFilter; - use crate::v1::{TotpAlgo, TotpSecret}; + use crate::v1::{Filter as ProtoFilter, TotpAlgo, TotpSecret}; #[test] fn test_protofilter_simple() { diff --git a/kanidm_tools/Cargo.toml b/kanidm_tools/Cargo.toml index 293add408..01950e153 100644 --- a/kanidm_tools/Cargo.toml +++ b/kanidm_tools/Cargo.toml @@ -1,15 +1,16 @@ [package] name = "kanidm_tools" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.64" -edition = "2021" default-run = "kanidm" -license = "MPL-2.0" description = "Kanidm Client Tools" documentation = "https://docs.rs/kanidm_tools/latest/kanidm_tools/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [lib] name = "kanidm_cli" @@ -29,28 +30,28 @@ name = "kanidm_badlist_preprocess" path = "src/badlist_preprocess.rs" [dependencies] -clap = { version = "^3.2", features = ["derive", "env"] } -compact_jwt = "^0.2.3" -dialoguer = "^0.10.1" -libc = "^0.2.127" -kanidm_client = { path = "../kanidm_client", version = "1.1.0-alpha.8" } -kanidm_proto = { path = "../kanidm_proto", version = "1.1.0-alpha.8" } -qrcode = { version = "^0.12.0", default-features = false } -rayon = "^1.5.3" -rpassword = "^7.0.0" -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -shellexpand = "^2.1.2" -time = { version = "=0.2.27", features = ["serde", "std"] } -tracing = "^0.1.35" -tracing-subscriber = { version = "^0.3.14", features = ["env-filter", "fmt"] } -tokio = { version = "^1.21.1", features = ["rt", "macros"] } -url = { version = "^2.3.1", features = ["serde"] } -uuid = "^1.1.2" +clap = { workspace = true, features = ["derive", "env"] } +compact_jwt.workspace = true +dialoguer.workspace = true +libc.workspace = true +kanidm_client.workspace = true +kanidm_proto.workspace = true +qrcode = { workspace = true, default-features = false } +rayon.workspace = true +rpassword.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +shellexpand.workspace = true +time = { workspace = true, features = ["serde", "std"] } +tracing.workspace = true +tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } +tokio = { workspace = true, features = ["rt", "macros"] } +url = { workspace = true, features = ["serde"] } +uuid.workspace = true webauthn-authenticator-rs = { workspace = true, features = ["u2fhid"] } -zxcvbn = "^2.2.1" +zxcvbn.workspace = true [build-dependencies] -clap = { version = "^3.2", features = ["derive"] } -clap_complete = { version = "^3.2.5"} -uuid = "^1.1.2" +clap = { workspace = true, features = ["derive"] } +clap_complete.workspace = true +uuid.workspace = true diff --git a/kanidm_tools/build.rs b/kanidm_tools/build.rs index 9468b3b44..0cd62aa96 100644 --- a/kanidm_tools/build.rs +++ b/kanidm_tools/build.rs @@ -2,10 +2,10 @@ use std::env; use std::path::PathBuf; -use uuid::Uuid; use clap::{CommandFactory, Parser}; use clap_complete::{generate_to, Shell}; +use uuid::Uuid; include!("src/opt/ssh_authorizedkeys.rs"); include!("src/opt/badlist_preprocess.rs"); diff --git a/kanidm_tools/src/badlist_preprocess.rs b/kanidm_tools/src/badlist_preprocess.rs index 68e238853..5e9dceaf5 100644 --- a/kanidm_tools/src/badlist_preprocess.rs +++ b/kanidm_tools/src/badlist_preprocess.rs @@ -14,9 +14,8 @@ use std::io::BufWriter; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; -use kanidm_proto::v1::Modify; - use clap::Parser; +use kanidm_proto::v1::Modify; use rayon::prelude::*; use tracing::{debug, error, info}; diff --git a/kanidm_tools/src/cli/common.rs b/kanidm_tools/src/cli/common.rs index 71e89cdb2..d2cd0069e 100644 --- a/kanidm_tools/src/cli/common.rs +++ b/kanidm_tools/src/cli/common.rs @@ -1,11 +1,14 @@ -use crate::session::read_tokens; -use crate::CommonOpt; +use std::str::FromStr; + use compact_jwt::{Jws, JwsUnverified}; -use dialoguer::{theme::ColorfulTheme, Select}; +use dialoguer::theme::ColorfulTheme; +use dialoguer::Select; use kanidm_client::{KanidmClient, KanidmClientBuilder}; use kanidm_proto::constants::{DEFAULT_CLIENT_CONFIG_PATH, DEFAULT_CLIENT_CONFIG_PATH_HOME}; use kanidm_proto::v1::UserAuthToken; -use std::str::FromStr; + +use crate::session::read_tokens; +use crate::CommonOpt; impl CommonOpt { pub fn to_unauth_client(&self) -> KanidmClient { diff --git a/kanidm_tools/src/cli/lib.rs b/kanidm_tools/src/cli/lib.rs index ecf64dfa8..73f48821c 100644 --- a/kanidm_tools/src/cli/lib.rs +++ b/kanidm_tools/src/cli/lib.rs @@ -15,6 +15,7 @@ extern crate tracing; use std::path::PathBuf; + use uuid::Uuid; include!("../opt/kanidm.rs"); diff --git a/kanidm_tools/src/cli/main.rs b/kanidm_tools/src/cli/main.rs index 5c2c8fbc2..b9321208d 100644 --- a/kanidm_tools/src/cli/main.rs +++ b/kanidm_tools/src/cli/main.rs @@ -13,7 +13,8 @@ use clap::Parser; use kanidm_cli::KanidmClientParser; -use tracing_subscriber::{fmt, prelude::*, EnvFilter}; +use tracing_subscriber::prelude::*; +use tracing_subscriber::{fmt, EnvFilter}; #[tokio::main(flavor = "current_thread")] async fn main() { diff --git a/kanidm_tools/src/cli/person.rs b/kanidm_tools/src/cli/person.rs index 28da66ce9..809364510 100644 --- a/kanidm_tools/src/cli/person.rs +++ b/kanidm_tools/src/cli/person.rs @@ -1,22 +1,25 @@ -use crate::password_prompt; -use crate::{ - AccountCredential, AccountRadius, AccountSsh, AccountValidity, PersonOpt, PersonPosix, -}; -use dialoguer::{theme::ColorfulTheme, Select}; -use dialoguer::{Confirm, Input, Password}; +use std::fmt::{self, Debug}; +use std::str::FromStr; + +use dialoguer::theme::ColorfulTheme; +use dialoguer::{Confirm, Input, Password, Select}; use kanidm_client::ClientError::Http as ClientErrorHttp; use kanidm_client::KanidmClient; use kanidm_proto::messages::{AccountChangeMessage, ConsoleOutputMode, MessageStatus}; use kanidm_proto::v1::OperationError::PasswordQuality; use kanidm_proto::v1::{CUIntentToken, CURegState, CUSessionToken, CUStatus, TotpSecret}; -use qrcode::{render::unicode, QrCode}; -use std::fmt::{self, Debug}; -use std::str::FromStr; +use qrcode::render::unicode; +use qrcode::QrCode; use time::OffsetDateTime; use url::Url; use uuid::Uuid; +use webauthn_authenticator_rs::u2fhid::U2FHid; +use webauthn_authenticator_rs::WebauthnAuthenticator; -use webauthn_authenticator_rs::{u2fhid::U2FHid, WebauthnAuthenticator}; +use crate::{ + password_prompt, AccountCredential, AccountRadius, AccountSsh, AccountValidity, PersonOpt, + PersonPosix, +}; impl PersonOpt { pub fn debug(&self) -> bool { diff --git a/kanidm_tools/src/cli/raw.rs b/kanidm_tools/src/cli/raw.rs index 99e33d186..fb7cb1907 100644 --- a/kanidm_tools/src/cli/raw.rs +++ b/kanidm_tools/src/cli/raw.rs @@ -1,14 +1,14 @@ -use crate::RawOpt; -use kanidm_proto::v1::{Entry, Filter, Modify, ModifyList}; use std::collections::BTreeMap; - use std::error::Error; use std::fs::File; use std::io::BufReader; use std::path::Path; +use kanidm_proto::v1::{Entry, Filter, Modify, ModifyList}; use serde::de::DeserializeOwned; +use crate::RawOpt; + fn read_file>(path: P) -> Result> { let f = File::open(path)?; let r = BufReader::new(f); diff --git a/kanidm_tools/src/cli/serviceaccount.rs b/kanidm_tools/src/cli/serviceaccount.rs index 190945d31..af6a9b898 100644 --- a/kanidm_tools/src/cli/serviceaccount.rs +++ b/kanidm_tools/src/cli/serviceaccount.rs @@ -1,9 +1,10 @@ +use kanidm_proto::messages::{AccountChangeMessage, ConsoleOutputMode, MessageStatus}; +use time::OffsetDateTime; + use crate::{ AccountSsh, AccountValidity, ServiceAccountApiToken, ServiceAccountCredential, ServiceAccountOpt, ServiceAccountPosix, }; -use kanidm_proto::messages::{AccountChangeMessage, ConsoleOutputMode, MessageStatus}; -use time::OffsetDateTime; impl ServiceAccountOpt { pub fn debug(&self) -> bool { diff --git a/kanidm_tools/src/cli/session.rs b/kanidm_tools/src/cli/session.rs index ab7c9870a..90bad6665 100644 --- a/kanidm_tools/src/cli/session.rs +++ b/kanidm_tools/src/cli/session.rs @@ -1,23 +1,22 @@ -use crate::common::prompt_for_username_get_username; -use crate::{LoginOpt, LogoutOpt, SessionOpt}; +use std::collections::BTreeMap; +use std::fs::{create_dir, File}; +use std::io::{self, BufReader, BufWriter, ErrorKind, Write}; +use std::path::PathBuf; +use std::str::FromStr; +use compact_jwt::JwsUnverified; +use dialoguer::theme::ColorfulTheme; +use dialoguer::Select; use kanidm_client::{ClientError, KanidmClient}; use kanidm_proto::v1::{AuthAllowed, AuthResponse, AuthState, UserAuthToken}; #[cfg(target_family = "unix")] use libc::umask; -use std::collections::BTreeMap; -use std::fs::{create_dir, File}; -use std::io::ErrorKind; -use std::io::{self, BufReader, BufWriter, Write}; -use std::path::PathBuf; -use std::str::FromStr; -use webauthn_authenticator_rs::{ - prelude::RequestChallengeResponse, u2fhid::U2FHid, WebauthnAuthenticator, -}; +use webauthn_authenticator_rs::prelude::RequestChallengeResponse; +use webauthn_authenticator_rs::u2fhid::U2FHid; +use webauthn_authenticator_rs::WebauthnAuthenticator; -use dialoguer::{theme::ColorfulTheme, Select}; - -use compact_jwt::JwsUnverified; +use crate::common::prompt_for_username_get_username; +use crate::{LoginOpt, LogoutOpt, SessionOpt}; static TOKEN_DIR: &str = "~/.cache"; static TOKEN_PATH: &str = "~/.cache/kanidm_tokens"; diff --git a/kanidm_tools/src/ssh_authorizedkeys.rs b/kanidm_tools/src/ssh_authorizedkeys.rs index 3de10f740..29b209d87 100644 --- a/kanidm_tools/src/ssh_authorizedkeys.rs +++ b/kanidm_tools/src/ssh_authorizedkeys.rs @@ -12,7 +12,6 @@ use std::path::PathBuf; use clap::Parser; use kanidm_client::{ClientError, KanidmClientBuilder}; - use kanidm_proto::constants::{DEFAULT_CLIENT_CONFIG_PATH, DEFAULT_CLIENT_CONFIG_PATH_HOME}; use tracing::{debug, error}; diff --git a/kanidm_unix_int/Cargo.toml b/kanidm_unix_int/Cargo.toml index 80f0d93a7..248182e4e 100644 --- a/kanidm_unix_int/Cargo.toml +++ b/kanidm_unix_int/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "kanidm_unix_int" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.64" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Unix Integration Clients" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [lib] name = "kanidm_unix_common" @@ -43,46 +44,42 @@ name = "kanidm_test_auth" path = "src/test_auth.rs" [dependencies] -kanidm_client = { path = "../kanidm_client" } -kanidm_proto = { path = "../kanidm_proto" } -kanidm = { path = "../kanidmd/idm" } +bytes.workspace = true +clap = { workspace = true, features = ["derive", "env"] } +futures.workspace = true +libc.workspace = true +libsqlite3-sys.workspace = true +lru.workspace = true +kanidm_client.workspace = true +kanidm_proto.workspace = true +# This is just used for password hashing and tests, so we could +# clean this up +kanidm.workspace = true -tracing = "^0.1.35" -sketching = { path = "../sketching" } +r2d2.workspace = true +r2d2_sqlite.workspace = true +rpassword.workspace = true +rusqlite.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +sketching.workspace = true -toml = "^0.5.9" -rpassword = "^7.0.0" -tokio = { version = "^1.21.1", features = ["rt", "macros", "sync", "time", "net", "io-util"] } -tokio-util = { version = "^0.7.4", features = ["codec"] } - -futures = "^0.3.21" -bytes = "^1.1.0" - -libc = "^0.2.127" -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -clap = { version = "^3.2", features = ["derive", "env"] } - -libsqlite3-sys = "0.25.0" -rusqlite = "^0.28.0" -r2d2 = "^0.8.10" -r2d2_sqlite = "^0.21.0" - -reqwest = "^0.11.11" - -users = "^0.11.0" - -lru = "^0.8.0" +toml.workspace = true +tokio = { workspace = true, features = ["rt", "macros", "sync", "time", "net", "io-util"] } +tokio-util = { workspace = true, features = ["codec"] } +tracing.workspace = true +reqwest.workspace = true +users.workspace = true [features] # default = [ "libsqlite3-sys/bundled" ] [dev-dependencies] # kanidm = { path = "../kanidmd/idm" } -score = { path = "../kanidmd/score" } +score.workspace = true [build-dependencies] -clap = { version = "^3.2", features = ["derive"] } -clap_complete = "^3.2.5" -profiles = { path = "../profiles" } +clap = { workspace = true, features = ["derive"] } +clap_complete.workspace = true +profiles.workspace = true diff --git a/kanidm_unix_int/build.rs b/kanidm_unix_int/build.rs index 83e5e6656..4e0898aba 100644 --- a/kanidm_unix_int/build.rs +++ b/kanidm_unix_int/build.rs @@ -1,11 +1,10 @@ #![allow(dead_code)] use std::env; +use std::path::PathBuf; use clap::{IntoApp, Parser}; use clap_complete::{generate_to, Shell}; -use std::path::PathBuf; - include!("src/opt/ssh_authorizedkeys.rs"); include!("src/opt/cache_invalidate.rs"); include!("src/opt/cache_clear.rs"); diff --git a/kanidm_unix_int/nss_kanidm/Cargo.toml b/kanidm_unix_int/nss_kanidm/Cargo.toml index 2759a262b..49d6292e0 100644 --- a/kanidm_unix_int/nss_kanidm/Cargo.toml +++ b/kanidm_unix_int/nss_kanidm/Cargo.toml @@ -1,9 +1,13 @@ [package] name = "nss_kanidm" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [lib] name = "nss_kanidm" @@ -11,9 +15,9 @@ crate-type = [ "cdylib" ] path = "src/lib.rs" [dependencies] -kanidm_unix_int = { path = "../" } -libnss = "^0.4.0" -libc = "^0.2.127" -paste = "^1.0.9" -lazy_static = "^1.4.0" +kanidm_unix_int.workspace = true +libnss.workspace = true +libc.workspace = true +paste.workspace = true +lazy_static.workspace = true diff --git a/kanidm_unix_int/nss_kanidm/src/lib.rs b/kanidm_unix_int/nss_kanidm/src/lib.rs index e41b9d731..84cea4273 100644 --- a/kanidm_unix_int/nss_kanidm/src/lib.rs +++ b/kanidm_unix_int/nss_kanidm/src/lib.rs @@ -19,7 +19,6 @@ use kanidm_unix_common::client_sync::call_daemon_blocking; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; use kanidm_unix_common::unix_proto::{ClientRequest, ClientResponse, NssGroup, NssUser}; - use libnss::group::{Group, GroupHooks}; use libnss::interop::Response; use libnss::passwd::{Passwd, PasswdHooks}; diff --git a/kanidm_unix_int/pam_kanidm/Cargo.toml b/kanidm_unix_int/pam_kanidm/Cargo.toml index 9db9c9bf5..d7931c623 100644 --- a/kanidm_unix_int/pam_kanidm/Cargo.toml +++ b/kanidm_unix_int/pam_kanidm/Cargo.toml @@ -1,19 +1,23 @@ [package] name = "pam_kanidm" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" links = "pam" +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + [lib] name = "pam_kanidm" crate-type = [ "cdylib" ] path = "src/lib.rs" [dependencies] -kanidm_unix_int = { path = "../" } -libc = "^0.2.127" +kanidm_unix_int.workspace = true +libc.workspace = true [build-dependencies] -pkg-config = "^0.3.25" +pkg-config.workspace = true diff --git a/kanidm_unix_int/pam_kanidm/src/lib.rs b/kanidm_unix_int/pam_kanidm/src/lib.rs index e0b5996e6..f473e51ca 100644 --- a/kanidm_unix_int/pam_kanidm/src/lib.rs +++ b/kanidm_unix_int/pam_kanidm/src/lib.rs @@ -14,19 +14,20 @@ // extern crate libc; mod pam; -use crate::pam::constants::*; -use crate::pam::conv::PamConv; -use crate::pam::module::{PamHandle, PamHooks}; - use std::collections::BTreeSet; use std::convert::TryFrom; use std::ffi::CStr; + // use std::os::raw::c_char; use kanidm_unix_common::client_sync::call_daemon_blocking; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; use kanidm_unix_common::unix_proto::{ClientRequest, ClientResponse}; +use crate::pam::constants::*; +use crate::pam::conv::PamConv; +use crate::pam::module::{PamHandle, PamHooks}; + #[derive(Debug)] struct Options { debug: bool, diff --git a/kanidm_unix_int/pam_kanidm/src/pam/conv.rs b/kanidm_unix_int/pam_kanidm/src/pam/conv.rs index aecc8d955..166fdf266 100644 --- a/kanidm_unix_int/pam_kanidm/src/pam/conv.rs +++ b/kanidm_unix_int/pam_kanidm/src/pam/conv.rs @@ -1,9 +1,9 @@ -use libc::{c_char, c_int}; use std::ffi::{CStr, CString}; use std::ptr; -use crate::pam::constants::PamResultCode; -use crate::pam::constants::*; +use libc::{c_char, c_int}; + +use crate::pam::constants::{PamResultCode, *}; use crate::pam::module::{PamItem, PamResult}; #[allow(missing_copy_implementations)] diff --git a/kanidm_unix_int/pam_kanidm/src/pam/macros.rs b/kanidm_unix_int/pam_kanidm/src/pam/macros.rs index 393beca9e..9986e28f1 100644 --- a/kanidm_unix_int/pam_kanidm/src/pam/macros.rs +++ b/kanidm_unix_int/pam_kanidm/src/pam/macros.rs @@ -7,10 +7,11 @@ /// Here is full example of a PAM module that would authenticate and authorize everybody: /// /// ``` -/// #[macro_use] extern crate pam; +/// #[macro_use] +/// extern crate pam; /// -/// use pam::module::{PamHooks, PamHandle}; -/// use pam::constants::{PamResultCode, PamFlag}; +/// use pam::constants::{PamFlag, PamResultCode}; +/// use pam::module::{PamHandle, PamHooks}; /// use std::ffi::CStr; /// /// # fn main() {} @@ -18,15 +19,15 @@ /// pam_hooks!(MyPamModule); /// /// impl PamHooks for MyPamModule { -/// fn sm_authenticate(pamh: &PamHandle, args: Vec<&CStr>, flags: PamFlag) -> PamResultCode { -/// println!("Everybody is authenticated!"); -/// PamResultCode::PAM_SUCCESS -/// } +/// fn sm_authenticate(pamh: &PamHandle, args: Vec<&CStr>, flags: PamFlag) -> PamResultCode { +/// println!("Everybody is authenticated!"); +/// PamResultCode::PAM_SUCCESS +/// } /// -/// fn acct_mgmt(pamh: &PamHandle, args: Vec<&CStr>, flags: PamFlag) -> PamResultCode { -/// println!("Everybody is authorized!"); -/// PamResultCode::PAM_SUCCESS -/// } +/// fn acct_mgmt(pamh: &PamHandle, args: Vec<&CStr>, flags: PamFlag) -> PamResultCode { +/// println!("Everybody is authorized!"); +/// PamResultCode::PAM_SUCCESS +/// } /// } /// ``` #[macro_export] @@ -36,6 +37,7 @@ macro_rules! pam_hooks { mod pam_hooks_scope { use std::ffi::CStr; use std::os::raw::{c_char, c_int}; + use $crate::pam::constants::{PamFlag, PamResultCode}; use $crate::pam::module::{PamHandle, PamHooks}; diff --git a/kanidm_unix_int/pam_kanidm/src/pam/module.rs b/kanidm_unix_int/pam_kanidm/src/pam/module.rs index 994768ff5..4760b75ea 100755 --- a/kanidm_unix_int/pam_kanidm/src/pam/module.rs +++ b/kanidm_unix_int/pam_kanidm/src/pam/module.rs @@ -1,9 +1,10 @@ //! Functions for use in pam modules. -use libc::c_char; use std::ffi::{CStr, CString}; use std::{mem, ptr}; +use libc::c_char; + use crate::pam::constants::{PamFlag, PamItemType, PamResultCode, PAM_AUTHTOK}; /// Opaque type, used as a pointer when making pam API calls. diff --git a/kanidm_unix_int/pam_tester/Cargo.toml b/kanidm_unix_int/pam_tester/Cargo.toml index b8634f588..bf0a279d8 100644 --- a/kanidm_unix_int/pam_tester/Cargo.toml +++ b/kanidm_unix_int/pam_tester/Cargo.toml @@ -1,11 +1,14 @@ [package] name = "pam_tester" -version = "0.1.2" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [dependencies] pam = "^0.7.0" + diff --git a/kanidm_unix_int/src/cache.rs b/kanidm_unix_int/src/cache.rs index 9b54b67b1..744568e5f 100644 --- a/kanidm_unix_int/src/cache.rs +++ b/kanidm_unix_int/src/cache.rs @@ -1,19 +1,20 @@ -use crate::db::Db; -use crate::unix_config::{HomeAttr, UidAttr}; -use crate::unix_proto::{HomeDirectoryInfo, NssGroup, NssUser}; -use kanidm_client::ClientError; -use kanidm_client::KanidmClient; -use kanidm_proto::v1::{OperationError, UnixGroupToken, UnixUserToken}; -use lru::LruCache; -use reqwest::StatusCode; use std::collections::BTreeSet; use std::num::NonZeroUsize; use std::ops::{Add, Sub}; use std::path::Path; use std::string::ToString; use std::time::{Duration, SystemTime}; + +use kanidm_client::{ClientError, KanidmClient}; +use kanidm_proto::v1::{OperationError, UnixGroupToken, UnixUserToken}; +use lru::LruCache; +use reqwest::StatusCode; use tokio::sync::{Mutex, RwLock}; +use crate::db::Db; +use crate::unix_config::{HomeAttr, UidAttr}; +use crate::unix_proto::{HomeDirectoryInfo, NssGroup, NssUser}; + const NXCACHE_SIZE: usize = 2048; #[derive(Debug, Clone, PartialEq, Eq, Hash)] diff --git a/kanidm_unix_int/src/cache_clear.rs b/kanidm_unix_int/src/cache_clear.rs index 3101a7ea0..7dd3a5eae 100644 --- a/kanidm_unix_int/src/cache_clear.rs +++ b/kanidm_unix_int/src/cache_clear.rs @@ -14,9 +14,7 @@ extern crate tracing; use clap::Parser; - use futures::executor::block_on; - use kanidm_unix_common::client::call_daemon; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; diff --git a/kanidm_unix_int/src/cache_invalidate.rs b/kanidm_unix_int/src/cache_invalidate.rs index c6edf0200..0a59795aa 100644 --- a/kanidm_unix_int/src/cache_invalidate.rs +++ b/kanidm_unix_int/src/cache_invalidate.rs @@ -14,9 +14,7 @@ extern crate tracing; use clap::Parser; - use futures::executor::block_on; - use kanidm_unix_common::client::call_daemon; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; diff --git a/kanidm_unix_int/src/client.rs b/kanidm_unix_int/src/client.rs index fd10a8439..84ceec92a 100644 --- a/kanidm_unix_int/src/client.rs +++ b/kanidm_unix_int/src/client.rs @@ -1,9 +1,8 @@ -use bytes::{BufMut, BytesMut}; -use futures::SinkExt; -use futures::StreamExt; use std::error::Error; -use std::io::Error as IoError; -use std::io::ErrorKind; +use std::io::{Error as IoError, ErrorKind}; + +use bytes::{BufMut, BytesMut}; +use futures::{SinkExt, StreamExt}; use tokio::net::UnixStream; // use tokio::runtime::Builder; use tokio_util::codec::Framed; @@ -14,8 +13,8 @@ use crate::unix_proto::{ClientRequest, ClientResponse}; struct ClientCodec; impl Decoder for ClientCodec { - type Item = ClientResponse; type Error = IoError; + type Item = ClientResponse; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match serde_json::from_slice::(&src) { diff --git a/kanidm_unix_int/src/client_sync.rs b/kanidm_unix_int/src/client_sync.rs index 16e19042f..42d2261e6 100644 --- a/kanidm_unix_int/src/client_sync.rs +++ b/kanidm_unix_int/src/client_sync.rs @@ -1,9 +1,6 @@ use std::error::Error; -use std::io::Error as IoError; -use std::io::ErrorKind; -use std::io::{Read, Write}; +use std::io::{Error as IoError, ErrorKind, Read, Write}; use std::os::unix::net::UnixStream; - use std::time::{Duration, SystemTime}; use crate::unix_proto::{ClientRequest, ClientResponse}; diff --git a/kanidm_unix_int/src/daemon.rs b/kanidm_unix_int/src/daemon.rs index 7701b882e..d03a62928 100644 --- a/kanidm_unix_int/src/daemon.rs +++ b/kanidm_unix_int/src/daemon.rs @@ -10,10 +10,18 @@ #![deny(clippy::needless_pass_by_value)] #![deny(clippy::trivially_copy_pass_by_ref)] +use std::error::Error; +use std::fs::metadata; +use std::io; +use std::io::{Error as IoError, ErrorKind}; +use std::os::unix::fs::MetadataExt; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::time::Duration; + use bytes::{BufMut, BytesMut}; use clap::{Arg, ArgAction, Command}; -use futures::SinkExt; -use futures::StreamExt; +use futures::{SinkExt, StreamExt}; use kanidm::utils::file_permissions_readonly; use kanidm_client::KanidmClientBuilder; use kanidm_proto::constants::DEFAULT_CLIENT_CONFIG_PATH; @@ -22,22 +30,14 @@ use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; use kanidm_unix_common::unix_proto::{ClientRequest, ClientResponse, TaskRequest, TaskResponse}; use libc::umask; -use sketching::tracing_forest::{self, traits::*, util::*}; -use std::error::Error; -use std::fs::metadata; -use std::io; -use std::io::Error as IoError; -use std::io::ErrorKind; -use std::os::unix::fs::MetadataExt; -use std::path::{Path, PathBuf}; -use std::sync::Arc; -use std::time::Duration; +use sketching::tracing_forest::traits::*; +use sketching::tracing_forest::util::*; +use sketching::tracing_forest::{self}; use tokio::net::{UnixListener, UnixStream}; use tokio::sync::mpsc::{channel, Receiver, Sender}; use tokio::sync::oneshot; use tokio::time; -use tokio_util::codec::Framed; -use tokio_util::codec::{Decoder, Encoder}; +use tokio_util::codec::{Decoder, Encoder, Framed}; use users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid}; //=== the codec @@ -47,8 +47,8 @@ type AsyncTaskRequest = (TaskRequest, oneshot::Sender<()>); struct ClientCodec; impl Decoder for ClientCodec { - type Item = ClientRequest; type Error = io::Error; + type Item = ClientRequest; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match serde_json::from_slice::(&src) { @@ -85,8 +85,8 @@ impl ClientCodec { struct TaskCodec; impl Decoder for TaskCodec { - type Item = TaskResponse; type Error = io::Error; + type Item = TaskResponse; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match serde_json::from_slice::(&src) { diff --git a/kanidm_unix_int/src/daemon_status.rs b/kanidm_unix_int/src/daemon_status.rs index afc9cf4bc..b8370b85e 100644 --- a/kanidm_unix_int/src/daemon_status.rs +++ b/kanidm_unix_int/src/daemon_status.rs @@ -13,12 +13,10 @@ #[macro_use] extern crate tracing; -use clap::Parser; - use std::path::PathBuf; +use clap::Parser; // use futures::executor::block_on; - use kanidm_unix_common::client_sync::call_daemon_blocking; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; diff --git a/kanidm_unix_int/src/db.rs b/kanidm_unix_int/src/db.rs index 99b763495..68473507a 100644 --- a/kanidm_unix_int/src/db.rs +++ b/kanidm_unix_int/src/db.rs @@ -1,17 +1,17 @@ -use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; -use libc::umask; -use r2d2::Pool; -use r2d2_sqlite::SqliteConnectionManager; use std::convert::TryFrom; use std::fmt; use std::time::Duration; -use crate::cache::Id; -use tokio::sync::{Mutex, MutexGuard}; - use kanidm::be::dbvalue::DbPasswordV1; use kanidm::credential::policy::CryptoPolicy; use kanidm::credential::Password; +use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; +use libc::umask; +use r2d2::Pool; +use r2d2_sqlite::SqliteConnectionManager; +use tokio::sync::{Mutex, MutexGuard}; + +use crate::cache::Id; pub struct Db { pool: Pool, @@ -732,9 +732,10 @@ impl<'a> Drop for DbTxn<'a> { #[cfg(test)] mod tests { + use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; + use super::Db; use crate::cache::Id; - use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; const TESTACCOUNT1_PASSWORD_A: &str = "password a for account1 test"; const TESTACCOUNT1_PASSWORD_B: &str = "password b for account1 test"; diff --git a/kanidm_unix_int/src/ssh_authorizedkeys.rs b/kanidm_unix_int/src/ssh_authorizedkeys.rs index 7aca8d673..4633b82f8 100644 --- a/kanidm_unix_int/src/ssh_authorizedkeys.rs +++ b/kanidm_unix_int/src/ssh_authorizedkeys.rs @@ -13,11 +13,10 @@ #[macro_use] extern crate tracing; -use clap::Parser; use std::path::PathBuf; +use clap::Parser; use futures::executor::block_on; - use kanidm_unix_common::client::call_daemon; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; diff --git a/kanidm_unix_int/src/tasks_daemon.rs b/kanidm_unix_int/src/tasks_daemon.rs index c9adeb3a6..ab89644ca 100644 --- a/kanidm_unix_int/src/tasks_daemon.rs +++ b/kanidm_unix_int/src/tasks_daemon.rs @@ -10,35 +10,31 @@ #![deny(clippy::needless_pass_by_value)] #![deny(clippy::trivially_copy_pass_by_ref)] -use users::{get_effective_gid, get_effective_uid}; - -use std::os::unix::fs::symlink; - -use libc::{lchown, umask}; use std::ffi::CString; - -use bytes::{BufMut, BytesMut}; -use futures::SinkExt; -use futures::StreamExt; -use sketching::tracing_forest::{self, traits::*, util::*}; -use std::fs; -use std::io; +use std::os::unix::fs::symlink; use std::path::Path; use std::time::Duration; -use tokio::net::UnixStream; -use tokio::time; -use tokio_util::codec::Framed; -use tokio_util::codec::{Decoder, Encoder}; +use std::{fs, io}; +use bytes::{BufMut, BytesMut}; +use futures::{SinkExt, StreamExt}; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; use kanidm_unix_common::unix_proto::{HomeDirectoryInfo, TaskRequest, TaskResponse}; +use libc::{lchown, umask}; +use sketching::tracing_forest::traits::*; +use sketching::tracing_forest::util::*; +use sketching::tracing_forest::{self}; +use tokio::net::UnixStream; +use tokio::time; +use tokio_util::codec::{Decoder, Encoder, Framed}; +use users::{get_effective_gid, get_effective_uid}; struct TaskCodec; impl Decoder for TaskCodec { - type Item = TaskRequest; type Error = io::Error; + type Item = TaskRequest; fn decode(&mut self, src: &mut BytesMut) -> Result, Self::Error> { match serde_json::from_slice::(&src) { diff --git a/kanidm_unix_int/src/test_auth.rs b/kanidm_unix_int/src/test_auth.rs index 1cb3fa5d3..84dc55976 100644 --- a/kanidm_unix_int/src/test_auth.rs +++ b/kanidm_unix_int/src/test_auth.rs @@ -3,9 +3,7 @@ extern crate tracing; use clap::Parser; - use futures::executor::block_on; - use kanidm_unix_common::client::call_daemon; use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH; use kanidm_unix_common::unix_config::KanidmUnixdConfig; diff --git a/kanidm_unix_int/src/unix_config.rs b/kanidm_unix_int/src/unix_config.rs index d7c9d0df6..aca10cd3d 100644 --- a/kanidm_unix_int/src/unix_config.rs +++ b/kanidm_unix_int/src/unix_config.rs @@ -1,15 +1,17 @@ -use crate::constants::{ - DEFAULT_CACHE_TIMEOUT, DEFAULT_CONN_TIMEOUT, DEFAULT_DB_PATH, DEFAULT_GID_ATTR_MAP, - DEFAULT_HOME_ALIAS, DEFAULT_HOME_ATTR, DEFAULT_HOME_PREFIX, DEFAULT_SHELL, DEFAULT_SOCK_PATH, - DEFAULT_TASK_SOCK_PATH, DEFAULT_UID_ATTR_MAP, -}; -use serde::Deserialize; use std::env; use std::fmt::{Display, Formatter}; use std::fs::File; use std::io::{ErrorKind, Read}; use std::path::Path; +use serde::Deserialize; + +use crate::constants::{ + DEFAULT_CACHE_TIMEOUT, DEFAULT_CONN_TIMEOUT, DEFAULT_DB_PATH, DEFAULT_GID_ATTR_MAP, + DEFAULT_HOME_ALIAS, DEFAULT_HOME_ATTR, DEFAULT_HOME_PREFIX, DEFAULT_SHELL, DEFAULT_SOCK_PATH, + DEFAULT_TASK_SOCK_PATH, DEFAULT_UID_ATTR_MAP, +}; + #[derive(Debug, Deserialize)] struct ConfigInt { db_path: Option, diff --git a/kanidm_unix_int/tests/cache_layer_test.rs b/kanidm_unix_int/tests/cache_layer_test.rs index e42c11140..2ea53d103 100644 --- a/kanidm_unix_int/tests/cache_layer_test.rs +++ b/kanidm_unix_int/tests/cache_layer_test.rs @@ -1,23 +1,19 @@ #![deny(warnings)] +use std::future::Future; use std::net::TcpStream; +use std::pin::Pin; use std::sync::atomic::{AtomicU16, Ordering}; use std::time::Duration; use kanidm::audit::LogLevel; use kanidm::config::{Configuration, IntegrationTestConfig, ServerRole}; -use score::create_server_core; - +use kanidm_client::{KanidmClient, KanidmClientBuilder}; use kanidm_unix_common::cache::{CacheLayer, Id}; use kanidm_unix_common::constants::{ DEFAULT_GID_ATTR_MAP, DEFAULT_HOME_ALIAS, DEFAULT_HOME_ATTR, DEFAULT_HOME_PREFIX, DEFAULT_SHELL, DEFAULT_UID_ATTR_MAP, }; - -use kanidm_client::{KanidmClient, KanidmClientBuilder}; - -use std::future::Future; -use std::pin::Pin; - +use score::create_server_core; use tokio::task; static PORT_ALLOC: AtomicU16 = AtomicU16::new(28080); diff --git a/kanidmd/daemon/Cargo.toml b/kanidmd/daemon/Cargo.toml index 01bc04dbf..9ba1265a7 100644 --- a/kanidmd/daemon/Cargo.toml +++ b/kanidmd/daemon/Cargo.toml @@ -1,14 +1,15 @@ [package] name = "daemon" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Server Daemon" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -17,25 +18,25 @@ name = "kanidmd" path = "src/main.rs" [dependencies] -kanidm = { path = "../idm" } -kanidm_proto = { path = "../../kanidm_proto" } -score = { path = "../score" } -sketching = { path = "../../sketching" } +kanidm.workspace = true +kanidm_proto.workspace = true +score.workspace = true +sketching.workspace = true -clap = { version = "^3.2", features = ["derive", "env"] } -serde = { version = "^1.0.142", features = ["derive"] } -tokio = { version = "^1.21.1", features = ["rt-multi-thread", "macros", "signal"] } -toml = "0.5.9" +clap = { workspace = true, features = ["env"] } +serde = { workspace = true, features = ["derive"] } +tokio = { workspace = true, features = ["rt-multi-thread", "macros", "signal"] } +toml.workspace = true [target.'cfg(target_family = "windows")'.dependencies] -whoami = "^1.2.3" +whoami.workspace = true [target.'cfg(not(target_family = "windows"))'.dependencies] -users = "^0.11.0" -tikv-jemallocator = "0.5" +users.workspace = true +tikv-jemallocator.workspace = true [build-dependencies] -serde = { version = "1", features = ["derive"] } -clap = { version = "^3.2", features = ["derive"] } -clap_complete = "^3.2.5" -profiles = { path = "../../profiles" } +serde = { workspace = true, features = ["derive"] } +clap = { workspace = true, features = ["derive"] } +clap_complete.workspace = true +profiles.workspace = true diff --git a/kanidmd/daemon/build.rs b/kanidmd/daemon/build.rs index e04885924..73ef51b0e 100644 --- a/kanidmd/daemon/build.rs +++ b/kanidmd/daemon/build.rs @@ -5,7 +5,6 @@ use std::path::PathBuf; use clap::{Args, IntoApp, Parser, Subcommand}; use clap_complete::{generate_to, Shell}; - use serde::{Deserialize, Serialize}; include!("../idm/src/audit_loglevel.rs"); diff --git a/kanidmd/daemon/src/main.rs b/kanidmd/daemon/src/main.rs index 4b08825d1..98cbd4878 100644 --- a/kanidmd/daemon/src/main.rs +++ b/kanidmd/daemon/src/main.rs @@ -14,25 +14,15 @@ #[global_allocator] static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; -#[cfg(not(target_family = "windows"))] // not needed for windows builds -use users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid}; -#[cfg(target_family = "windows")] // for windows builds -use whoami; - -use serde::Deserialize; use std::fs::{metadata, File, Metadata}; - +use std::io::Read; #[cfg(target_family = "unix")] use std::os::unix::fs::MetadataExt; - -use std::io::Read; -use std::path::Path; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::exit; use std::str::FromStr; -use sketching::tracing_forest::{self, traits::*, util::*}; - +use clap::{Args, Parser, Subcommand}; use kanidm::audit::LogLevel; use kanidm::config::{Configuration, OnlineBackup, ServerRole}; #[cfg(not(target_family = "windows"))] @@ -43,8 +33,14 @@ use score::{ domain_rename_core, recover_account_core, reindex_server_core, restore_server_core, vacuum_server_core, verify_server_core, }; - -use clap::{Args, Parser, Subcommand}; +use serde::Deserialize; +use sketching::tracing_forest::traits::*; +use sketching::tracing_forest::util::*; +use sketching::tracing_forest::{self}; +#[cfg(not(target_family = "windows"))] // not needed for windows builds +use users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid}; +#[cfg(target_family = "windows")] // for windows builds +use whoami; include!("./opt.rs"); diff --git a/kanidmd/idm/Cargo.toml b/kanidmd/idm/Cargo.toml index 8b994cc9c..ce55858d1 100644 --- a/kanidmd/idm/Cargo.toml +++ b/kanidmd/idm/Cargo.toml @@ -1,98 +1,91 @@ [package] name = "kanidm" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Server Library and Binary" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [lib] name = "kanidm" path = "src/lib.rs" +[[bench]] +name = "scaling_10k" +harness = false + [dependencies] -async-std = { version = "^1.12.0", features = ["tokio1"] } -async-trait = "^0.1.57" -base64 = "^0.13.0" -base64urlsafedata = "0.1.0" -chrono = "^0.4.20" -compact_jwt = "^0.2.3" -concread = "^0.4.0" -dyn-clone = "^1.0.9" -fernet = { version = "^0.2.0", features = ["fernet_danger_timestamps"] } -filetime = "^0.2.17" -futures = "^0.3.21" -futures-util = "^0.3.21" -hashbrown = { version = "0.12.3", features = ["serde", "inline-more", "ahash"] } -idlset = { version = "^0.2.4" } -kanidm_proto = { path = "../../kanidm_proto" } -lazy_static = "^1.4.0" -ldap3_proto = "^0.2.3" -libc = "^0.2.127" -libsqlite3-sys = "^0.25.0" -num_enum = "^0.5.7" -openssl = "^0.10.41" -r2d2 = "^0.8.9" -r2d2_sqlite = "^0.21.0" -rand = "^0.8.5" -regex = "^1.5.6" -saffron = "^0.1.0" -serde = { version = "^1.0.142", features = ["derive"] } -serde_cbor = "^0.11.2" -serde_json = "^1.0.83" -smartstring = { version = "^1.0.1", features = ["serde"] } -smolset = "^1.3.1" -sshkeys = "^0.3.1" -tide = "^0.16.0" -time = { version = "=0.2.27", features = ["serde", "std"] } -tokio = { version = "^1.21.1", features = ["net", "sync", "time"] } -tokio-util = { version = "^0.7.4", features = ["codec"] } -toml = "^0.5.9" -touch = "^0.0.1" +async-std.workspace = true +async-trait.workspace = true +base64.workspace = true +base64urlsafedata.workspace = true +chrono.workspace = true +compact_jwt.workspace = true +concread.workspace = true +dyn-clone.workspace = true +fernet = { workspace = true, features = ["fernet_danger_timestamps"] } +filetime.workspace = true +futures.workspace = true +futures-util.workspace = true +hashbrown.workspace = true +idlset.workspace = true +kanidm_proto.workspace = true +lazy_static.workspace = true +ldap3_proto.workspace = true +libc.workspace = true +libsqlite3-sys.workspace = true +num_enum.workspace = true +openssl.workspace = true +r2d2.workspace = true +r2d2_sqlite.workspace = true +rand.workspace = true +regex.workspace = true +saffron.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_cbor.workspace = true +serde_json.workspace = true +sketching.workspace = true +smartstring = { workspace = true, features = ["serde"] } +smolset.workspace = true +sshkeys.workspace = true +tide.workspace = true +time = { workspace = true, features = ["serde", "std"] } +tokio = { workspace = true, features = ["net", "sync", "time"] } +tokio-util = { workspace = true, features = ["codec"] } +toml.workspace = true +touch.workspace = true -sketching = { path = "../../sketching" } -tracing = { version = "^0.1.35", features = ["attributes"] } +tracing = { workspace = true, features = ["attributes"] } -url = { version = "^2.3.1", features = ["serde"] } -urlencoding = "2.1.2" -uuid = { version = "^1.1.2", features = ["serde", "v4" ] } -validator = { version = "^0.16.0", features = ["phone"] } +url = { workspace = true, features = ["serde"] } +urlencoding.workspace = true +uuid = { workspace = true, features = ["serde", "v4" ] } +validator = { workspace = true, features = ["phone"] } webauthn-rs = { workspace = true, features = ["resident-key-support", "preview-features", "danger-credential-internals"] } webauthn-rs-core.workspace = true -zxcvbn = "^2.2.1" +zxcvbn.workspace = true # because windows really can't build without the bundled one -[target.'cfg(target_family = "windows")'.dependencies.rusqlite] -version = "^0.28.0" -features = ["bundled"] - -[target.'cfg(not(target_family = "windows"))'.dependencies.rusqlite] -version = "^0.28.0" - [target.'cfg(target_family = "windows")'.dependencies] -whoami = "^1.2.3" - +rusqlite = { workspace = true, features = ["bundled"] } +whoami.workspace = true [target.'cfg(not(target_family = "windows"))'.dependencies] -users = "^0.11.0" - +rusqlite.workspace = true +users.workspace = true [features] # default = [ "libsqlite3-sys/bundled", "openssl/vendored" ] [dev-dependencies] -criterion = { version = "^0.4.0", features = ["html_reports"] } -# For testing webauthn +criterion = { workspace = true, features = ["html_reports"] } webauthn-authenticator-rs.workspace = true [build-dependencies] -profiles = { path = "../../profiles" } - -[[bench]] -name = "scaling_10k" -harness = false +profiles.workspace = true diff --git a/kanidmd/idm/benches/scaling_10k.rs b/kanidmd/idm/benches/scaling_10k.rs index f0aa25e3c..cc3a49ad6 100644 --- a/kanidmd/idm/benches/scaling_10k.rs +++ b/kanidmd/idm/benches/scaling_10k.rs @@ -1,7 +1,9 @@ +use std::time::{Duration, Instant}; + +use async_std::task; use criterion::{ criterion_group, criterion_main, BenchmarkId, Criterion, SamplingMode, Throughput, }; - use kanidm; use kanidm::entry::{Entry, EntryInit, EntryNew}; use kanidm::entry_init; @@ -11,9 +13,6 @@ use kanidm::server::QueryServer; use kanidm::utils::duration_from_epoch_now; use kanidm::value::Value; -use async_std::task; -use std::time::{Duration, Instant}; - pub fn scaling_user_create_single(c: &mut Criterion) { let mut group = c.benchmark_group("user_create_single"); group.sample_size(10); diff --git a/kanidmd/idm/src/access.rs b/kanidmd/idm/src/access.rs index 485c77c43..52deafe49 100644 --- a/kanidmd/idm/src/access.rs +++ b/kanidmd/idm/src/access.rs @@ -12,31 +12,28 @@ //! search. //! - the ability to turn an entry into a partial-entry for results send //! requirements (also search). -//! // use concread::collections::bptree::*; -use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn}; -use concread::cowcell::*; -use kanidm_proto::v1::Filter as ProtoFilter; -use kanidm_proto::v1::OperationError; -use std::collections::BTreeSet; // use hashbrown::HashSet; use std::cell::Cell; +use std::collections::BTreeSet; use std::ops::DerefMut; use std::sync::Arc; + +use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn}; +use concread::cowcell::*; +use kanidm_proto::v1::{Filter as ProtoFilter, OperationError}; +use tracing::trace; use uuid::Uuid; use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced, EntrySealed}; +use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent}; use crate::filter::{Filter, FilterValid, FilterValidResolved}; +use crate::identity::{IdentType, IdentityId}; use crate::modify::Modify; use crate::prelude::*; use crate::value::PartialValue; -use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent}; -use crate::identity::{IdentType, IdentityId}; - -use tracing::trace; - // const ACP_RELATED_SEARCH_CACHE_MAX: usize = 2048; // const ACP_RELATED_SEARCH_CACHE_LOCAL: usize = 16; @@ -408,6 +405,7 @@ pub trait AccessControlsTransaction<'a> { &self, ) -> &mut ARCacheReadTxn<'a, (IdentityId, Filter), Filter, ()>; + #[instrument(level = "debug", name = "access::search_related_acp", skip_all)] fn search_related_acp<'b>( &'b self, rec_entry: &Entry, @@ -443,61 +441,51 @@ pub trait AccessControlsTransaction<'a> { } else { */ // else, we calculate this, and then stash/cache the uuids. - let related_acp: Vec<(&AccessControlSearch, Filter)> = - spanned!("access::search_related_acp", { - search_state - .iter() - .filter_map(|acs| { - // Now resolve the receiver filter - // Okay, so in filter resolution, the primary error case - // is that we have a non-user in the event. We have already - // checked for this above BUT we should still check here - // properly just in case. - // - // In this case, we assume that if the event is internal - // that the receiver can NOT match because it has no selfuuid - // and can as a result, never return true. This leads to this - // acp not being considered in that case ... which should never - // happen because we already bypassed internal ops above! - // - // A possible solution is to change the filter resolve function - // such that it takes an entry, rather than an event, but that - // would create issues in search. - match (&acs.acp.receiver).resolve( - ident, - None, - Some(acp_resolve_filter_cache), - ) { - Ok(f_res) => { - if rec_entry.entry_match_no_index(&f_res) { - // Now, for each of the acp's that apply to our receiver, resolve their - // related target filters. - (&acs.acp.targetscope) - .resolve(ident, None, Some(acp_resolve_filter_cache)) - .map_err(|e| { - admin_error!( - ?e, - "A internal filter/event was passed for resolution!?!?" - ); - e - }) - .ok() - .map(|f_res| (acs, f_res)) - } else { - None - } - } - Err(e) => { - admin_error!( - ?e, - "A internal filter/event was passed for resolution!?!?" - ); - None - } + let related_acp: Vec<(&AccessControlSearch, Filter)> = search_state + .iter() + .filter_map(|acs| { + // Now resolve the receiver filter + // Okay, so in filter resolution, the primary error case + // is that we have a non-user in the event. We have already + // checked for this above BUT we should still check here + // properly just in case. + // + // In this case, we assume that if the event is internal + // that the receiver can NOT match because it has no selfuuid + // and can as a result, never return true. This leads to this + // acp not being considered in that case ... which should never + // happen because we already bypassed internal ops above! + // + // A possible solution is to change the filter resolve function + // such that it takes an entry, rather than an event, but that + // would create issues in search. + match (&acs.acp.receiver).resolve(ident, None, Some(acp_resolve_filter_cache)) { + Ok(f_res) => { + if rec_entry.entry_match_no_index(&f_res) { + // Now, for each of the acp's that apply to our receiver, resolve their + // related target filters. + (&acs.acp.targetscope) + .resolve(ident, None, Some(acp_resolve_filter_cache)) + .map_err(|e| { + admin_error!( + ?e, + "A internal filter/event was passed for resolution!?!?" + ); + e + }) + .ok() + .map(|f_res| (acs, f_res)) + } else { + None } - }) - .collect() - }); + } + Err(e) => { + admin_error!(?e, "A internal filter/event was passed for resolution!?!?"); + None + } + } + }) + .collect(); /* // Stash the uuids into the cache. @@ -511,6 +499,7 @@ pub trait AccessControlsTransaction<'a> { } // Contains all the way to eval acps to entries + #[instrument(level = "debug", name = "access::search_filter_entries", skip_all)] fn search_filter_entries( &self, se: &SearchEvent, @@ -525,27 +514,24 @@ pub trait AccessControlsTransaction<'a> { } IdentType::User(u) => &u.entry, }; - spanned!("access::search_filter_entries", { - trace!(event = %se.ident, "Access check for search (filter) event"); + trace!(event = %se.ident, "Access check for search (filter) event"); - // First get the set of acps that apply to this receiver - let related_acp: Vec<(&AccessControlSearch, _)> = - self.search_related_acp(rec_entry, &se.ident); + // First get the set of acps that apply to this receiver + let related_acp: Vec<(&AccessControlSearch, _)> = + self.search_related_acp(rec_entry, &se.ident); - /* - related_acp.iter().for_each(|racp| { - security_access!(acs = ?racp.acp.name, "Event Origin Related acs"); - }); - */ + /* + related_acp.iter().for_each(|racp| { + security_access!(acs = ?racp.acp.name, "Event Origin Related acs"); + }); + */ - // Get the set of attributes requested by this se filter. This is what we are - // going to access check. - let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set(); + // Get the set of attributes requested by this se filter. This is what we are + // going to access check. + let requested_attrs: BTreeSet<&str> = se.filter_orig.get_attr_set(); - // For each entry - let allowed_entries: Vec> = spanned!( - "access::search_filter_entries", - { + // For each entry + let allowed_entries: Vec> = entries .into_iter() .filter(|e| { @@ -580,20 +566,22 @@ pub trait AccessControlsTransaction<'a> { security_access!(?decision, "search attr decision"); decision }) - .collect() - } - ); + .collect(); - if allowed_entries.is_empty() { - security_access!("denied ❌"); - } else { - security_access!("allowed {} entries ✅", allowed_entries.len()); - } + if allowed_entries.is_empty() { + security_access!("denied ❌"); + } else { + security_access!("allowed {} entries ✅", allowed_entries.len()); + } - Ok(allowed_entries) - }) + Ok(allowed_entries) } + #[instrument( + level = "debug", + name = "access::search_filter_entry_attributes", + skip_all + )] fn search_filter_entry_attributes( &self, se: &SearchEvent, @@ -620,108 +608,100 @@ pub trait AccessControlsTransaction<'a> { IdentType::User(u) => &u.entry, }; - spanned!("access::search_filter_entry_attributes", { - /* - * Super similar to above (could even re-use some parts). Given a set of entries, - * reduce the attribute sets on them to "what is visible". This is ONLY called on - * the server edge, such that clients only see what they can, but internally, - * impersonate and such actually still get the whole entry back as not to break - * modify and co. - */ + /* + * Super similar to above (could even re-use some parts). Given a set of entries, + * reduce the attribute sets on them to "what is visible". This is ONLY called on + * the server edge, such that clients only see what they can, but internally, + * impersonate and such actually still get the whole entry back as not to break + * modify and co. + */ - trace!("Access check for search (reduce) event: {}", se.ident); + trace!("Access check for search (reduce) event: {}", se.ident); - // Get the relevant acps for this receiver. - let related_acp: Vec<(&AccessControlSearch, _)> = - self.search_related_acp(rec_entry, &se.ident); - let related_acp: Vec<(&AccessControlSearch, _)> = - if let Some(r_attrs) = se.attrs.as_ref() { - related_acp - .into_iter() - .filter(|(acs, _)| !acs.attrs.is_disjoint(r_attrs)) - .collect() - } else { - related_acp - }; + // Get the relevant acps for this receiver. + let related_acp: Vec<(&AccessControlSearch, _)> = + self.search_related_acp(rec_entry, &se.ident); + let related_acp: Vec<(&AccessControlSearch, _)> = if let Some(r_attrs) = se.attrs.as_ref() { + related_acp + .into_iter() + .filter(|(acs, _)| !acs.attrs.is_disjoint(r_attrs)) + .collect() + } else { + related_acp + }; - /* - related_acp.iter().for_each(|racp| { - lsecurity_access!( "Related acs -> {:?}", racp.acp.name); - }); - */ + /* + related_acp.iter().for_each(|racp| { + lsecurity_access!( "Related acs -> {:?}", racp.acp.name); + }); + */ - // Build a reference set from the req_attrs. This is what we test against - // to see if the attribute is something we currently want. - let req_attrs: Option> = se - .attrs - .as_ref() - .map(|vs| vs.iter().map(|s| s.as_str()).collect()); + // Build a reference set from the req_attrs. This is what we test against + // to see if the attribute is something we currently want. + let req_attrs: Option> = se + .attrs + .as_ref() + .map(|vs| vs.iter().map(|s| s.as_str()).collect()); - // For each entry - let allowed_entries: Vec> = - spanned!("access::search_filter_entry_attributes", { - entries - .into_iter() - .map(|e| { - // Get the set of attributes you can see for this entry - // this is within your related acp scope. - let allowed_attrs: BTreeSet<&str> = related_acp - .iter() - .filter_map(|(acs, f_res)| { - if e.entry_match_no_index(f_res) { - security_access!( - target = ?e.get_uuid(), - acs = %acs.acp.name, - "target entry matches acs", - ); - // add search_attrs to allowed iterator - Some(acs.attrs.iter().map(|s| s.as_str()).filter(|s| { - req_attrs - .as_ref() - .map(|r_attrs| r_attrs.contains(s)) - .unwrap_or(true) - })) - } else { - trace!( - target = ?e.get_uuid(), - acs = %acs.acp.name, - "target entry DOES NOT match acs", - ); - None - } - }) - .flatten() - .collect(); - - // Remove all others that are present on the entry. + // For each entry + let allowed_entries: Vec> = entries + .into_iter() + .map(|e| { + // Get the set of attributes you can see for this entry + // this is within your related acp scope. + let allowed_attrs: BTreeSet<&str> = related_acp + .iter() + .filter_map(|(acs, f_res)| { + if e.entry_match_no_index(f_res) { security_access!( - requested = ?req_attrs, - allowed = ?allowed_attrs, - "attributes" + target = ?e.get_uuid(), + acs = %acs.acp.name, + "target entry matches acs", ); + // add search_attrs to allowed iterator + Some(acs.attrs.iter().map(|s| s.as_str()).filter(|s| { + req_attrs + .as_ref() + .map(|r_attrs| r_attrs.contains(s)) + .unwrap_or(true) + })) + } else { + trace!( + target = ?e.get_uuid(), + acs = %acs.acp.name, + "target entry DOES NOT match acs", + ); + None + } + }) + .flatten() + .collect(); - // Now purge the attrs that are NOT allowed. - spanned!( - "access::search_filter_entry_attributes", - { e.reduce_attributes(&allowed_attrs) } - ) - }) - .collect() - }); - - if allowed_entries.is_empty() { - security_access!("reduced to empty set on all entries ❌"); - } else { + // Remove all others that are present on the entry. security_access!( - "attribute set reduced on {} entries ✅", - allowed_entries.len() + requested = ?req_attrs, + allowed = ?allowed_attrs, + "attributes" ); - } - Ok(allowed_entries) - }) + // Now purge the attrs that are NOT allowed. + e.reduce_attributes(&allowed_attrs) + }) + .collect(); + + if allowed_entries.is_empty() { + security_access!("reduced to empty set on all entries ❌"); + } else { + security_access!( + "attribute set reduced on {} entries ✅", + allowed_entries.len() + ); + } + + Ok(allowed_entries) } + #[instrument(level = "debug", name = "access::modify_related_acp", skip_all)] fn modify_related_acp<'b>( &'b self, rec_entry: &Entry, @@ -733,9 +713,7 @@ pub trait AccessControlsTransaction<'a> { // Find the acps that relate to the caller, and compile their related // target filters. - let related_acp: Vec<(&AccessControlModify, _)> = spanned!( - "access::modify_related_acp", - { + let related_acp: Vec<(&AccessControlModify, _)> = modify_state .iter() .filter_map(|acs| { @@ -766,14 +744,13 @@ pub trait AccessControlsTransaction<'a> { } } }) - .collect() - } - ); + .collect(); related_acp } #[allow(clippy::cognitive_complexity)] + #[instrument(level = "debug", name = "access::modify_allow_operation", skip_all)] fn modify_allow_operation( &self, me: &ModifyEvent, @@ -787,155 +764,154 @@ pub trait AccessControlsTransaction<'a> { } IdentType::User(u) => &u.entry, }; - spanned!("access::modify_allow_operation", { - trace!("Access check for modify event: {}", me.ident); + trace!("Access check for modify event: {}", me.ident); - // Pre-check if the no-no purge class is present - let disallow = me - .modlist - .iter() - .any(|m| matches!(m, Modify::Purged(a) if a == "class")); + // Pre-check if the no-no purge class is present + let disallow = me + .modlist + .iter() + .any(|m| matches!(m, Modify::Purged(a) if a == "class")); - if disallow { - security_access!("Disallowing purge class in modification"); - return Ok(false); - } + if disallow { + security_access!("Disallowing purge class in modification"); + return Ok(false); + } - // Find the acps that relate to the caller, and compile their related - // target filters. - let related_acp: Vec<(&AccessControlModify, _)> = - self.modify_related_acp(rec_entry, &me.ident); + // Find the acps that relate to the caller, and compile their related + // target filters. + let related_acp: Vec<(&AccessControlModify, _)> = + self.modify_related_acp(rec_entry, &me.ident); - related_acp.iter().for_each(|racp| { - trace!("Related acs -> {:?}", racp.0.acp.name); - }); + related_acp.iter().for_each(|racp| { + trace!("Related acs -> {:?}", racp.0.acp.name); + }); - // build two sets of "requested pres" and "requested rem" - let requested_pres: BTreeSet<&str> = me - .modlist - .iter() - .filter_map(|m| match m { - Modify::Present(a, _) => Some(a.as_str()), - _ => None, - }) - .collect(); + // build two sets of "requested pres" and "requested rem" + let requested_pres: BTreeSet<&str> = me + .modlist + .iter() + .filter_map(|m| match m { + Modify::Present(a, _) => Some(a.as_str()), + _ => None, + }) + .collect(); - let requested_rem: BTreeSet<&str> = me - .modlist - .iter() - .filter_map(|m| match m { - Modify::Removed(a, _) => Some(a.as_str()), - Modify::Purged(a) => Some(a.as_str()), - _ => None, - }) - .collect(); + let requested_rem: BTreeSet<&str> = me + .modlist + .iter() + .filter_map(|m| match m { + Modify::Removed(a, _) => Some(a.as_str()), + Modify::Purged(a) => Some(a.as_str()), + _ => None, + }) + .collect(); - // Build the set of classes that we to work on, only in terms of "addition". To remove - // I think we have no limit, but ... william of the future may find a problem with this - // policy. - let requested_classes: BTreeSet<&str> = me - .modlist - .iter() - .filter_map(|m| match m { - Modify::Present(a, v) => { - if a.as_str() == "class" { - // Here we have an option<&str> which could mean there is a risk of - // a malicious entity attempting to trick us by masking class mods - // in non-iutf8 types. However, the server first won't respect their - // existance, and second, we would have failed the mod at schema checking - // earlier in the process as these were not correctly type. As a result - // we can trust these to be correct here and not to be "None". - v.to_str() - } else { - None - } + // Build the set of classes that we to work on, only in terms of "addition". To remove + // I think we have no limit, but ... william of the future may find a problem with this + // policy. + let requested_classes: BTreeSet<&str> = me + .modlist + .iter() + .filter_map(|m| match m { + Modify::Present(a, v) => { + if a.as_str() == "class" { + // Here we have an option<&str> which could mean there is a risk of + // a malicious entity attempting to trick us by masking class mods + // in non-iutf8 types. However, the server first won't respect their + // existance, and second, we would have failed the mod at schema checking + // earlier in the process as these were not correctly type. As a result + // we can trust these to be correct here and not to be "None". + v.to_str() + } else { + None } - Modify::Removed(a, v) => { - if a.as_str() == "class" { - v.to_str() - } else { - None - } + } + Modify::Removed(a, v) => { + if a.as_str() == "class" { + v.to_str() + } else { + None + } + } + _ => None, + }) + .collect(); + + security_access!(?requested_pres, "Requested present set"); + security_access!(?requested_rem, "Requested remove set"); + security_access!(?requested_classes, "Requested class set"); + + let r = entries.iter().all(|e| { + // For this entry, find the acp's that apply to it from the + // set that apply to the entry that is performing the operation + let scoped_acp: Vec<&AccessControlModify> = related_acp + .iter() + .filter_map(|(acm, f_res)| { + if e.entry_match_no_index(f_res) { + Some(*acm) + } else { + None } - _ => None, }) .collect(); + // Build the sets of classes, pres and rem we are allowed to modify, extend + // or use based on the set of matched acps. + let allowed_pres: BTreeSet<&str> = scoped_acp + .iter() + .flat_map(|acp| acp.presattrs.iter().map(|v| v.as_str())) + .collect(); - security_access!(?requested_pres, "Requested present set"); - security_access!(?requested_rem, "Requested remove set"); - security_access!(?requested_classes, "Requested class set"); + let allowed_rem: BTreeSet<&str> = scoped_acp + .iter() + .flat_map(|acp| acp.remattrs.iter().map(|v| v.as_str())) + .collect(); - let r = entries.iter().all(|e| { - // For this entry, find the acp's that apply to it from the - // set that apply to the entry that is performing the operation - let scoped_acp: Vec<&AccessControlModify> = related_acp - .iter() - .filter_map(|(acm, f_res)| { - if e.entry_match_no_index(f_res) { - Some(*acm) - } else { - None - } - }) - .collect(); - // Build the sets of classes, pres and rem we are allowed to modify, extend - // or use based on the set of matched acps. - let allowed_pres: BTreeSet<&str> = scoped_acp - .iter() - .flat_map(|acp| acp.presattrs.iter().map(|v| v.as_str())) - .collect(); + let allowed_classes: BTreeSet<&str> = scoped_acp + .iter() + .flat_map(|acp| acp.classes.iter().map(|v| v.as_str())) + .collect(); - let allowed_rem: BTreeSet<&str> = scoped_acp - .iter() - .flat_map(|acp| acp.remattrs.iter().map(|v| v.as_str())) - .collect(); - - let allowed_classes: BTreeSet<&str> = scoped_acp - .iter() - .flat_map(|acp| acp.classes.iter().map(|v| v.as_str())) - .collect(); - - // Now check all the subsets are true. Remember, purge class - // is already checked above. - if !requested_pres.is_subset(&allowed_pres) { - security_access!("requested_pres is not a subset of allowed"); - security_access!( - "requested_pres: {:?} !⊆ allowed: {:?}", - requested_pres, - allowed_pres - ); - false - } else if !requested_rem.is_subset(&allowed_rem) { - security_access!("requested_rem is not a subset of allowed"); - security_access!( - "requested_rem: {:?} !⊆ allowed: {:?}", - requested_rem, - allowed_rem - ); - false - } else if !requested_classes.is_subset(&allowed_classes) { - security_access!("requested_classes is not a subset of allowed"); - security_access!( - "requested_classes: {:?} !⊆ allowed: {:?}", - requested_classes, - allowed_classes - ); - false - } else { - security_access!("passed pres, rem, classes check."); - true - } // if acc == false - }); - if r { - security_access!("allowed ✅"); + // Now check all the subsets are true. Remember, purge class + // is already checked above. + if !requested_pres.is_subset(&allowed_pres) { + security_access!("requested_pres is not a subset of allowed"); + security_access!( + "requested_pres: {:?} !⊆ allowed: {:?}", + requested_pres, + allowed_pres + ); + false + } else if !requested_rem.is_subset(&allowed_rem) { + security_access!("requested_rem is not a subset of allowed"); + security_access!( + "requested_rem: {:?} !⊆ allowed: {:?}", + requested_rem, + allowed_rem + ); + false + } else if !requested_classes.is_subset(&allowed_classes) { + security_access!("requested_classes is not a subset of allowed"); + security_access!( + "requested_classes: {:?} !⊆ allowed: {:?}", + requested_classes, + allowed_classes + ); + false } else { - security_access!("denied ❌"); - } - Ok(r) - }) + security_access!("passed pres, rem, classes check."); + true + } // if acc == false + }); + if r { + security_access!("allowed ✅"); + } else { + security_access!("denied ❌"); + } + Ok(r) } #[allow(clippy::cognitive_complexity)] + #[instrument(level = "debug", name = "access::create_allow_operation", skip_all)] fn create_allow_operation( &self, ce: &CreateEvent, @@ -949,125 +925,124 @@ pub trait AccessControlsTransaction<'a> { } IdentType::User(u) => &u.entry, }; - spanned!("access::create_allow_operation", { - trace!("Access check for create event: {}", ce.ident); + trace!("Access check for create event: {}", ce.ident); - // Some useful references we'll use for the remainder of the operation - let create_state = self.get_create(); - let acp_resolve_filter_cache = self.get_acp_resolve_filter_cache(); + // Some useful references we'll use for the remainder of the operation + let create_state = self.get_create(); + let acp_resolve_filter_cache = self.get_acp_resolve_filter_cache(); - // Find the acps that relate to the caller. - let related_acp: Vec<(&AccessControlCreate, _)> = create_state - .iter() - .filter_map(|acs| { - match acs + // Find the acps that relate to the caller. + let related_acp: Vec<(&AccessControlCreate, _)> = create_state + .iter() + .filter_map(|acs| { + match acs + .acp + .receiver + .resolve(&ce.ident, None, Some(acp_resolve_filter_cache)) + { + Ok(f_res) if rec_entry.entry_match_no_index(&f_res) => acs .acp - .receiver + .targetscope .resolve(&ce.ident, None, Some(acp_resolve_filter_cache)) - { - Ok(f_res) if rec_entry.entry_match_no_index(&f_res) => acs - .acp - .targetscope - .resolve(&ce.ident, None, Some(acp_resolve_filter_cache)) - .map_err(|e| { - admin_error!( - "A internal filter/event was passed for resolution!?!? {:?}", - e - ); - e - }) - .ok() - .map(|f_res| (acs, f_res)), - Ok(_) => None, - Err(e) => { + .map_err(|e| { admin_error!( "A internal filter/event was passed for resolution!?!? {:?}", e ); - None - } + e + }) + .ok() + .map(|f_res| (acs, f_res)), + Ok(_) => None, + Err(e) => { + admin_error!( + "A internal filter/event was passed for resolution!?!? {:?}", + e + ); + None } - }) - .collect(); + } + }) + .collect(); - // lsecurity_access!( "Related acc -> {:?}", related_acp); + // lsecurity_access!( "Related acc -> {:?}", related_acp); - // For each entry - let r = entries.iter().all(|e| { - // Build the set of requested classes and attrs here. - let create_attrs: BTreeSet<&str> = e.get_ava_names().collect(); - // If this is empty, we make an empty set, which is fine because - // the empty class set despite matching is_subset, will have the - // following effect: - // * there is no class on entry, so schema will fail - // * plugin-base will add object to give a class, but excess - // attrs will cause fail (could this be a weakness?) - // * class is a "may", so this could be empty in the rules, so - // if the accr is empty this would not be a true subset, - // so this would "fail", but any content in the accr would - // have to be validated. - // - // I still think if this is None, we should just fail here ... - // because it shouldn't be possible to match. + // For each entry + let r = entries.iter().all(|e| { + // Build the set of requested classes and attrs here. + let create_attrs: BTreeSet<&str> = e.get_ava_names().collect(); + // If this is empty, we make an empty set, which is fine because + // the empty class set despite matching is_subset, will have the + // following effect: + // * there is no class on entry, so schema will fail + // * plugin-base will add object to give a class, but excess + // attrs will cause fail (could this be a weakness?) + // * class is a "may", so this could be empty in the rules, so + // if the accr is empty this would not be a true subset, + // so this would "fail", but any content in the accr would + // have to be validated. + // + // I still think if this is None, we should just fail here ... + // because it shouldn't be possible to match. - let create_classes: BTreeSet<&str> = match e.get_ava_iter_iutf8("class") { - Some(s) => s.collect(), - None => { - admin_error!("Class set failed to build - corrupted entry?"); + let create_classes: BTreeSet<&str> = match e.get_ava_iter_iutf8("class") { + Some(s) => s.collect(), + None => { + admin_error!("Class set failed to build - corrupted entry?"); + return false; + } + }; + + related_acp.iter().any(|(accr, f_res)| { + // Check to see if allowed. + if e.entry_match_no_index(f_res) { + security_access!(?e, acs = ?accr, "entry matches acs"); + // It matches, so now we have to check attrs and classes. + // Remember, we have to match ALL requested attrs + // and classes to pass! + let allowed_attrs: BTreeSet<&str> = + accr.attrs.iter().map(|s| s.as_str()).collect(); + let allowed_classes: BTreeSet<&str> = + accr.classes.iter().map(|s| s.as_str()).collect(); + + if !create_attrs.is_subset(&allowed_attrs) { + security_access!("create_attrs is not a subset of allowed"); + security_access!("{:?} !⊆ {:?}", create_attrs, allowed_attrs); return false; } - }; - - related_acp.iter().any(|(accr, f_res)| { - // Check to see if allowed. - if e.entry_match_no_index(f_res) { - security_access!(?e, acs = ?accr, "entry matches acs"); - // It matches, so now we have to check attrs and classes. - // Remember, we have to match ALL requested attrs - // and classes to pass! - let allowed_attrs: BTreeSet<&str> = - accr.attrs.iter().map(|s| s.as_str()).collect(); - let allowed_classes: BTreeSet<&str> = - accr.classes.iter().map(|s| s.as_str()).collect(); - - if !create_attrs.is_subset(&allowed_attrs) { - security_access!("create_attrs is not a subset of allowed"); - security_access!("{:?} !⊆ {:?}", create_attrs, allowed_attrs); - return false; - } - if !create_classes.is_subset(&allowed_classes) { - security_access!("create_classes is not a subset of allowed"); - security_access!("{:?} !⊆ {:?}", create_classes, allowed_classes); - return false; - } - security_access!("passed"); - - true - } else { - trace!(?e, acs = %accr.acp.name, "entry DOES NOT match acs"); - // Does not match, fail this rule. - false + if !create_classes.is_subset(&allowed_classes) { + security_access!("create_classes is not a subset of allowed"); + security_access!("{:?} !⊆ {:?}", create_classes, allowed_classes); + return false; } - }) - // Find the set of related acps for this entry. - // - // For each "created" entry. - // If the created entry is 100% allowed by this acp - // IE: all attrs to be created AND classes match classes - // allow - // if no acp allows, fail operation. - }); + security_access!("passed"); - if r { - security_access!("allowed ✅"); - } else { - security_access!("denied ❌"); - } + true + } else { + trace!(?e, acs = %accr.acp.name, "entry DOES NOT match acs"); + // Does not match, fail this rule. + false + } + }) + // Find the set of related acps for this entry. + // + // For each "created" entry. + // If the created entry is 100% allowed by this acp + // IE: all attrs to be created AND classes match classes + // allow + // if no acp allows, fail operation. + }); - Ok(r) - }) + if r { + security_access!("allowed ✅"); + } else { + security_access!("denied ❌"); + } + + Ok(r) } + #[instrument(level = "debug", name = "access::delete_allow_operation", skip_all)] fn delete_allow_operation( &self, de: &DeleteEvent, @@ -1081,15 +1056,14 @@ pub trait AccessControlsTransaction<'a> { } IdentType::User(u) => &u.entry, }; - spanned!("access::delete_allow_operation", { - trace!("Access check for delete event: {}", de.ident); + trace!("Access check for delete event: {}", de.ident); - // Some useful references we'll use for the remainder of the operation - let delete_state = self.get_delete(); - let acp_resolve_filter_cache = self.get_acp_resolve_filter_cache(); + // Some useful references we'll use for the remainder of the operation + let delete_state = self.get_delete(); + let acp_resolve_filter_cache = self.get_acp_resolve_filter_cache(); - // Find the acps that relate to the caller. - let related_acp: Vec<(&AccessControlDelete, _)> = delete_state + // Find the acps that relate to the caller. + let related_acp: Vec<(&AccessControlDelete, _)> = delete_state .iter() .filter_map(|acs| { match acs.acp.receiver.resolve(&de.ident, None, Some(acp_resolve_filter_cache)) { @@ -1123,44 +1097,44 @@ pub trait AccessControlsTransaction<'a> { }) .collect(); - /* - related_acp.iter().for_each(|racp| { - lsecurity_access!( "Related acs -> {:?}", racp.acp.name); - }); - */ + /* + related_acp.iter().for_each(|racp| { + lsecurity_access!( "Related acs -> {:?}", racp.acp.name); + }); + */ - // For each entry - let r = entries.iter().all(|e| { - related_acp.iter().any(|(acd, f_res)| { - if e.entry_match_no_index(f_res) { - security_access!( - entry_uuid = ?e.get_uuid(), - acs = %acd.acp.name, - "entry matches acs" - ); - // It matches, so we can delete this! - security_access!("passed"); - true - } else { - trace!( - "entry {:?} DOES NOT match acs {}", - e.get_uuid(), - acd.acp.name - ); - // Does not match, fail. - false - } // else - }) // any related_acp - }); - if r { - security_access!("allowed ✅"); - } else { - security_access!("denied ❌"); - } - Ok(r) - }) + // For each entry + let r = entries.iter().all(|e| { + related_acp.iter().any(|(acd, f_res)| { + if e.entry_match_no_index(f_res) { + security_access!( + entry_uuid = ?e.get_uuid(), + acs = %acd.acp.name, + "entry matches acs" + ); + // It matches, so we can delete this! + security_access!("passed"); + true + } else { + trace!( + "entry {:?} DOES NOT match acs {}", + e.get_uuid(), + acd.acp.name + ); + // Does not match, fail. + false + } // else + }) // any related_acp + }); + if r { + security_access!("allowed ✅"); + } else { + security_access!("denied ❌"); + } + Ok(r) } + #[instrument(level = "debug", name = "access::effective_permission_check", skip_all)] fn effective_permission_check( &self, ident: &Identity, @@ -1186,116 +1160,114 @@ pub trait AccessControlsTransaction<'a> { IdentType::User(u) => &u.entry, }; - spanned!("access::effective_permission_check", { - trace!(ident = %ident, "Effective permission check"); - // I think we seperate this to multiple checks ...? + trace!(ident = %ident, "Effective permission check"); + // I think we seperate this to multiple checks ...? - // == search == - // Get the relevant acps for this receiver. - let search_related_acp: Vec<(&AccessControlSearch, _)> = - self.search_related_acp(rec_entry, ident); - let search_related_acp: Vec<(&AccessControlSearch, _)> = - if let Some(r_attrs) = attrs.as_ref() { - search_related_acp - .into_iter() - .filter(|(acs, _)| !acs.attrs.is_disjoint(r_attrs)) - .collect() + // == search == + // Get the relevant acps for this receiver. + let search_related_acp: Vec<(&AccessControlSearch, _)> = + self.search_related_acp(rec_entry, ident); + let search_related_acp: Vec<(&AccessControlSearch, _)> = + if let Some(r_attrs) = attrs.as_ref() { + search_related_acp + .into_iter() + .filter(|(acs, _)| !acs.attrs.is_disjoint(r_attrs)) + .collect() + } else { + search_related_acp + }; + + /* + search_related_acp.iter().for_each(|(racp, _)| { + trace!("Related acs -> {:?}", racp.acp.name); + }); + */ + + // == modify == + + let modify_related_acp: Vec<(&AccessControlModify, _)> = + self.modify_related_acp(rec_entry, ident); + + /* + modify_related_acp.iter().for_each(|(racp, _)| { + trace!("Related acm -> {:?}", racp.acp.name); + }); + */ + + let effective_permissions: Vec<_> = entries + .iter() + .map(|e| { + // == search == + let allowed_attrs: BTreeSet = search_related_acp + .iter() + .filter_map(|(acs, f_res)| { + // if it applies + if e.entry_match_no_index(f_res) { + // security_access!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry matches acs"); + Some(acs.attrs.iter().cloned()) + } else { + trace!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry DOES NOT match acs"); // should this be `security_access`? + None + } + }) + .flatten() + .collect(); + + security_access!( + requested = ?attrs, + allows = ?allowed_attrs, + "attributes", + ); + + // intersect? + let search_effective = if let Some(r_attrs) = attrs.as_ref() { + r_attrs & &allowed_attrs } else { - search_related_acp + allowed_attrs }; - /* - search_related_acp.iter().for_each(|(racp, _)| { - trace!("Related acs -> {:?}", racp.acp.name); - }); - */ + // == modify == + let modify_scoped_acp: Vec<&AccessControlModify> = modify_related_acp + .iter() + .filter_map(|(acm, f_res)| { + if e.entry_match_no_index(f_res) { + Some(*acm) + } else { + None + } + }) + .collect(); - // == modify == + let modify_pres: BTreeSet = modify_scoped_acp + .iter() + .flat_map(|acp| acp.presattrs.iter().cloned()) + .collect(); - let modify_related_acp: Vec<(&AccessControlModify, _)> = - self.modify_related_acp(rec_entry, ident); + let modify_rem: BTreeSet = modify_scoped_acp + .iter() + .flat_map(|acp| acp.remattrs.iter().cloned()) + .collect(); - /* - modify_related_acp.iter().for_each(|(racp, _)| { - trace!("Related acm -> {:?}", racp.acp.name); - }); - */ + let modify_class: BTreeSet = modify_scoped_acp + .iter() + .flat_map(|acp| acp.classes.iter().cloned()) + .collect(); - let effective_permissions: Vec<_> = entries - .iter() - .map(|e| { - // == search == - let allowed_attrs: BTreeSet = search_related_acp - .iter() - .filter_map(|(acs, f_res)| { - // if it applies - if e.entry_match_no_index(f_res) { - // security_access!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry matches acs"); - Some(acs.attrs.iter().cloned()) - } else { - trace!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry DOES NOT match acs"); // should this be `security_access`? - None - } - }) - .flatten() - .collect(); + AccessEffectivePermission { + target: e.get_uuid(), + search: search_effective, + modify_pres, + modify_rem, + modify_class, + } + }) + .collect(); - security_access!( - requested = ?attrs, - allows = ?allowed_attrs, - "attributes", - ); + effective_permissions.iter().for_each(|ep| { + trace!(?ep); + }); - // intersect? - let search_effective = if let Some(r_attrs) = attrs.as_ref() { - r_attrs & &allowed_attrs - } else { - allowed_attrs - }; - - // == modify == - let modify_scoped_acp: Vec<&AccessControlModify> = modify_related_acp - .iter() - .filter_map(|(acm, f_res)| { - if e.entry_match_no_index(f_res) { - Some(*acm) - } else { - None - } - }) - .collect(); - - let modify_pres: BTreeSet = modify_scoped_acp - .iter() - .flat_map(|acp| acp.presattrs.iter().cloned()) - .collect(); - - let modify_rem: BTreeSet = modify_scoped_acp - .iter() - .flat_map(|acp| acp.remattrs.iter().cloned()) - .collect(); - - let modify_class: BTreeSet = modify_scoped_acp - .iter() - .flat_map(|acp| acp.classes.iter().cloned()) - .collect(); - - AccessEffectivePermission { - target: e.get_uuid(), - search: search_effective, - modify_pres, - modify_rem, - modify_class, - } - }) - .collect(); - - effective_permissions.iter().for_each(|ep| { - trace!(?ep); - }); - - Ok(effective_permissions) - }) + Ok(effective_permissions) } } @@ -1522,15 +1494,17 @@ impl AccessControls { #[cfg(test)] mod tests { + use std::collections::BTreeSet; + use std::sync::Arc; + + use uuid::uuid; + use crate::access::{ AccessControlCreate, AccessControlDelete, AccessControlModify, AccessControlProfile, AccessControlSearch, AccessControls, AccessControlsTransaction, AccessEffectivePermission, }; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, SearchEvent}; use crate::prelude::*; - use std::collections::BTreeSet; - use std::sync::Arc; - use uuid::uuid; macro_rules! acp_from_entry_err { ( diff --git a/kanidmd/idm/src/actors/v1_read.rs b/kanidmd/idm/src/actors/v1_read.rs index 38d1cba8b..65db4166d 100644 --- a/kanidmd/idm/src/actors/v1_read.rs +++ b/kanidmd/idm/src/actors/v1_read.rs @@ -1,23 +1,28 @@ -use tracing::{error, info, instrument, trace}; - -use chrono::{DateTime, SecondsFormat, Utc}; +use std::convert::TryFrom; +use std::fs; +use std::path::{Path, PathBuf}; use std::sync::Arc; -use crate::prelude::*; +use kanidm_proto::v1::{ + ApiToken, AuthRequest, BackupCodesView, CURequest, CUSessionToken, CUStatus, CredentialStatus, + Entry as ProtoEntry, OperationError, RadiusAuthToken, SearchRequest, SearchResponse, + UnixGroupToken, UnixUserToken, WhoamiResponse, +}; +use ldap3_proto::simple::*; +use regex::Regex; +use tracing::{error, info, instrument, trace}; +use uuid::Uuid; use crate::be::BackendTransaction; - use crate::event::{ AuthEvent, AuthResult, OnlineBackupEvent, SearchEvent, SearchResult, WhoamiResult, }; +use crate::filter::{Filter, FilterInvalid}; use crate::idm::credupdatesession::CredentialUpdateSessionToken; use crate::idm::event::{ CredentialStatusEvent, RadiusAuthTokenEvent, ReadBackupCodeEvent, UnixGroupTokenEvent, UnixUserAuthEvent, UnixUserTokenEvent, }; -use kanidm_proto::v1::{BackupCodesView, OperationError, RadiusAuthToken}; - -use crate::filter::{Filter, FilterInvalid}; use crate::idm::oauth2::{ AccessTokenIntrospectRequest, AccessTokenIntrospectResponse, AccessTokenRequest, AccessTokenResponse, AuthorisationRequest, AuthorisePermitSuccess, AuthoriseResponse, @@ -26,20 +31,7 @@ use crate::idm::oauth2::{ use crate::idm::server::{IdmServer, IdmServerTransaction}; use crate::idm::serviceaccount::ListApiTokenEvent; use crate::ldap::{LdapBoundToken, LdapResponseState, LdapServer}; - -use kanidm_proto::v1::Entry as ProtoEntry; -use kanidm_proto::v1::{ - ApiToken, AuthRequest, CURequest, CUSessionToken, CUStatus, CredentialStatus, SearchRequest, - SearchResponse, UnixGroupToken, UnixUserToken, WhoamiResponse, -}; - -use regex::Regex; -use std::fs; -use std::path::{Path, PathBuf}; -use uuid::Uuid; - -use ldap3_proto::simple::*; -use std::convert::TryFrom; +use crate::prelude::*; // =========================================================== @@ -94,35 +86,25 @@ impl QueryServerReadV1 { // Begin a read let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - // ! NOTICE: The inner function contains a short-circuiting `return`, which is only exits the closure. - // ! If we removed the `lperf_op_segment` and kept the inside, this would short circuit before logging `audit`. - // ! However, since we immediately return `res` after logging `audit`, and we should be removing the lperf stuff - // ! and the logging of `audit` at the same time, it is ok if the inner code short circuits the whole function because - // ! there is no work to be done afterwards. - // ! However, if we want to do work after `res` is calculated, we need to pass `spanned` a closure instead of a block - // ! in order to not short-circuit the entire function. - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(?e, "Invalid identity"); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(?e, "Invalid identity"); + e + })?; - // Make an event from the request - let search = - SearchEvent::from_message(ident, &req, &idms_prox_read.qs_read).map_err(|e| { - admin_error!(?e, "Failed to begin search"); - e - })?; + // Make an event from the request + let search = + SearchEvent::from_message(ident, &req, &idms_prox_read.qs_read).map_err(|e| { + admin_error!(?e, "Failed to begin search"); + e + })?; - trace!(?search, "Begin event"); + trace!(?search, "Begin event"); - let entries = idms_prox_read.qs_read.search_ext(&search)?; + let entries = idms_prox_read.qs_read.search_ext(&search)?; - SearchResult::new(&idms_prox_read.qs_read, &entries).map(SearchResult::response) - }); - res + SearchResult::new(&idms_prox_read.qs_read, &entries).map(SearchResult::response) } #[instrument( @@ -184,34 +166,33 @@ impl QueryServerReadV1 { ) -> Result<(), OperationError> { trace!(eventid = ?msg.eventid, "Begin online backup event"); - let now: DateTime = Utc::now(); - let timestamp = now.to_rfc3339_opts(SecondsFormat::Secs, true); + #[allow(deprecated)] + let now = time::OffsetDateTime::now_local(); + let timestamp = now.format(time::Format::Rfc3339); let dest_file = format!("{}/backup-{}.json", outpath, timestamp); - match Path::new(&dest_file).exists() { - true => { - error!( - "Online backup file {} already exists, will not owerwrite it.", - dest_file - ); - return Err(OperationError::InvalidState); - } - false => { - let idms_prox_read = self.idms.proxy_read_async().await; - spanned!("actors::v1_read::handle", { - idms_prox_read - .qs_read - .get_be_txn() - .backup(&dest_file) - .map(|()| { - info!("Online backup created {} successfully", dest_file); - }) - .map_err(|e| { - error!("Online backup failed to create {}: {:?}", dest_file, e); - OperationError::InvalidState - })?; - }); - } + if Path::new(&dest_file).exists() { + error!( + "Online backup file {} already exists, will not overwrite it.", + dest_file + ); + return Err(OperationError::InvalidState); + } + + // Scope to limit the read txn. + { + let idms_prox_read = self.idms.proxy_read_async().await; + idms_prox_read + .qs_read + .get_be_txn() + .backup(&dest_file) + .map(|()| { + info!("Online backup created {} successfully", dest_file); + }) + .map_err(|e| { + error!("Online backup failed to create {}: {:?}", dest_file, e); + OperationError::InvalidState + })?; } // pattern to find automatically generated backup files @@ -314,53 +295,42 @@ impl QueryServerReadV1 { // Begin a read let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - // ! NOTICE: The inner function contains a short-circuiting `return`, which is only exits the closure. - // ! If we removed the `lperf_op_segment` and kept the inside, this would short circuit before logging `audit`. - // ! However, since we immediately return `res` after logging `audit`, and we should be removing the lperf stuff - // ! and the logging of `audit` at the same time, it is ok if the inner code short circuits the whole function because - // ! there is no work to be done afterwards. - // ! However, if we want to do work after `res` is calculated, we need to pass `spanned` a closure instead of a block - // ! in order to not short-circuit the entire function. - let res = spanned!("actors::v1_read::handle", { - // Make an event from the whoami request. This will process the event and - // generate a selfuuid search. - // - // This current handles the unauthenticated check, and will - // trigger the failure, but if we can manage to work out async - // then move this to core.rs, and don't allow Option to get - // this far. - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(?e, "Invalid identity"); - e - })?; + // Make an event from the whoami request. This will process the event and + // generate a selfuuid search. + // + // This current handles the unauthenticated check, and will + // trigger the failure, but if we can manage to work out async + // then move this to core.rs, and don't allow Option to get + // this far. + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(?e, "Invalid identity"); + e + })?; - let srch = - SearchEvent::from_whoami_request(ident, &idms_prox_read.qs_read).map_err(|e| { - admin_error!(?e, "Failed to begin whoami"); - e - })?; + let srch = + SearchEvent::from_whoami_request(ident, &idms_prox_read.qs_read).map_err(|e| { + admin_error!(?e, "Failed to begin whoami"); + e + })?; - trace!(search = ?srch, "Begin event"); + trace!(search = ?srch, "Begin event"); - let mut entries = idms_prox_read.qs_read.search_ext(&srch)?; + let mut entries = idms_prox_read.qs_read.search_ext(&srch)?; - match entries.pop() { - Some(e) if entries.is_empty() => { - WhoamiResult::new(&idms_prox_read.qs_read, &e).map(WhoamiResult::response) - } - Some(_) => Err(OperationError::InvalidState), // Somehow matched multiple entries... - _ => Err(OperationError::NoMatchingEntries), + match entries.pop() { + Some(e) if entries.is_empty() => { + WhoamiResult::new(&idms_prox_read.qs_read, &e).map(WhoamiResult::response) } - }); - res + Some(_) => Err(OperationError::InvalidState), /* Somehow matched multiple entries... */ + _ => Err(OperationError::NoMatchingEntries), + } } #[instrument( level = "info", - name = "search2", - skip(self, uat, filter, attrs, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalsearch( @@ -372,42 +342,38 @@ impl QueryServerReadV1 { ) -> Result, OperationError> { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; - // Make an event from the request - let srch = match SearchEvent::from_internal_message( - ident, - &filter, - attrs.as_deref(), - &idms_prox_read.qs_read, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin internal api search: {:?}", e); - return Err(e); - } - }; - - trace!(?srch, "Begin event"); - - match idms_prox_read.qs_read.search_ext(&srch) { - Ok(entries) => SearchResult::new(&idms_prox_read.qs_read, &entries) - .map(|ok_sr| ok_sr.into_proto_array()), - Err(e) => Err(e), + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; + // Make an event from the request + let srch = match SearchEvent::from_internal_message( + ident, + &filter, + attrs.as_deref(), + &idms_prox_read.qs_read, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin internal api search: {:?}", e); + return Err(e); } - }); - res + }; + + trace!(?srch, "Begin event"); + + match idms_prox_read.qs_read.search_ext(&srch) { + Ok(entries) => SearchResult::new(&idms_prox_read.qs_read, &entries) + .map(|ok_sr| ok_sr.into_proto_array()), + Err(e) => Err(e), + } } #[instrument( level = "info", - name = "search_recycled", - skip(self, uat, filter, attrs, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalsearchrecycled( @@ -420,42 +386,38 @@ impl QueryServerReadV1 { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; - // Make an event from the request - let srch = match SearchEvent::from_internal_recycle_message( - ident, - &filter, - attrs.as_deref(), - &idms_prox_read.qs_read, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin recycled search: {:?}", e); - return Err(e); - } - }; - - trace!(?srch, "Begin event"); - - match idms_prox_read.qs_read.search_ext(&srch) { - Ok(entries) => SearchResult::new(&idms_prox_read.qs_read, &entries) - .map(|ok_sr| ok_sr.into_proto_array()), - Err(e) => Err(e), + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; + // Make an event from the request + let srch = match SearchEvent::from_internal_recycle_message( + ident, + &filter, + attrs.as_deref(), + &idms_prox_read.qs_read, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin recycled search: {:?}", e); + return Err(e); } - }); - res + }; + + trace!(?srch, "Begin event"); + + match idms_prox_read.qs_read.search_ext(&srch) { + Ok(entries) => SearchResult::new(&idms_prox_read.qs_read, &entries) + .map(|ok_sr| ok_sr.into_proto_array()), + Err(e) => Err(e), + } } #[instrument( level = "info", - name = "radius_read", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalradiusread( @@ -466,60 +428,56 @@ impl QueryServerReadV1 { ) -> Result, OperationError> { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!("Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!("Error resolving id to target"); + e + })?; - // Make an event from the request - let srch = match SearchEvent::from_target_uuid_request( - ident, - target_uuid, - &idms_prox_read.qs_read, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin radius read: {:?}", e); - return Err(e); - } - }; - - trace!(?srch, "Begin event"); - - // We have to use search_ext to guarantee acs was applied. - match idms_prox_read.qs_read.search_ext(&srch) { - Ok(mut entries) => { - let r = entries - .pop() - // From the entry, turn it into the value - .and_then(|entry| { - entry - .get_ava_single("radius_secret") - .and_then(|v| v.get_secret_str().map(str::to_string)) - }); - Ok(r) - } - Err(e) => Err(e), + // Make an event from the request + let srch = match SearchEvent::from_target_uuid_request( + ident, + target_uuid, + &idms_prox_read.qs_read, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin radius read: {:?}", e); + return Err(e); } - }); - res + }; + + trace!(?srch, "Begin event"); + + // We have to use search_ext to guarantee acs was applied. + match idms_prox_read.qs_read.search_ext(&srch) { + Ok(mut entries) => { + let r = entries + .pop() + // From the entry, turn it into the value + .and_then(|entry| { + entry + .get_ava_single("radius_secret") + .and_then(|v| v.get_secret_str().map(str::to_string)) + }); + Ok(r) + } + Err(e) => Err(e), + } } #[instrument( level = "info", - name = "radius_token_read", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalradiustokenread( @@ -531,46 +489,42 @@ impl QueryServerReadV1 { let ct = duration_from_epoch_now(); let mut idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!("Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!("Error resolving id to target"); + e + })?; - // Make an event from the request - let rate = match RadiusAuthTokenEvent::from_parts( - // &idms_prox_read.qs_read, - ident, - target_uuid, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin radius token read: {:?}", e); - return Err(e); - } - }; + // Make an event from the request + let rate = match RadiusAuthTokenEvent::from_parts( + // &idms_prox_read.qs_read, + ident, + target_uuid, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin radius token read: {:?}", e); + return Err(e); + } + }; - trace!(?rate, "Begin event"); + trace!(?rate, "Begin event"); - idms_prox_read.get_radiusauthtoken(&rate, ct) - }); - res + idms_prox_read.get_radiusauthtoken(&rate, ct) } #[instrument( level = "info", - name = "unix_user_token_read", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalunixusertokenread( @@ -582,49 +536,42 @@ impl QueryServerReadV1 { let ct = duration_from_epoch_now(); let mut idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!( - "actors::v1_read::handle", - { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_info!( - err = ?e, - "Error resolving {} as gidnumber continuing ...", - uuid_or_name - ); - e - })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_info!( + err = ?e, + "Error resolving {} as gidnumber continuing ...", + uuid_or_name + ); + e + })?; - // Make an event from the request - let rate = match UnixUserTokenEvent::from_parts(ident, target_uuid) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin unix token read: {:?}", e); - return Err(e); - } - }; - - trace!(?rate, "Begin event"); - - idms_prox_read.get_unixusertoken(&rate, ct) + // Make an event from the request + let rate = match UnixUserTokenEvent::from_parts(ident, target_uuid) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin unix token read: {:?}", e); + return Err(e); } - ); - res + }; + + trace!(?rate, "Begin event"); + + idms_prox_read.get_unixusertoken(&rate, ct) } #[instrument( level = "info", - name = "unix_group_token_read", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalunixgrouptokenread( @@ -635,49 +582,42 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let mut idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!( - "actors::v1_read::handle", - { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_info!(err = ?e, "Error resolving as gidnumber continuing"); - e - })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_info!(err = ?e, "Error resolving as gidnumber continuing"); + e + })?; - // Make an event from the request - let rate = match UnixGroupTokenEvent::from_parts( - // &idms_prox_read.qs_read, - ident, - target_uuid, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin unix group token read: {:?}", e); - return Err(e); - } - }; - - trace!(?rate, "Begin event"); - - idms_prox_read.get_unixgrouptoken(&rate) + // Make an event from the request + let rate = match UnixGroupTokenEvent::from_parts( + // &idms_prox_read.qs_read, + ident, + target_uuid, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin unix group token read: {:?}", e); + return Err(e); } - ); - res + }; + + trace!(?rate, "Begin event"); + + idms_prox_read.get_unixgrouptoken(&rate) } #[instrument( level = "info", - name = "ssh_key_read", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalsshkeyread( @@ -688,62 +628,58 @@ impl QueryServerReadV1 { ) -> Result, OperationError> { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!("Error resolving id to target"); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!("Error resolving id to target"); + e + })?; - // Make an event from the request - let srch = match SearchEvent::from_target_uuid_request( - ident, - target_uuid, - &idms_prox_read.qs_read, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin ssh key read: {:?}", e); - return Err(e); - } - }; - - trace!(?srch, "Begin event"); - - match idms_prox_read.qs_read.search_ext(&srch) { - Ok(mut entries) => { - let r = entries - .pop() - // get the first entry - .and_then(|e| { - // From the entry, turn it into the value - e.get_ava_iter_sshpubkeys("ssh_publickey") - .map(|i| i.map(|s| s.to_string()).collect()) - }) - .unwrap_or_else(|| { - // No matching entry? Return none. - Vec::new() - }); - Ok(r) - } - Err(e) => Err(e), + // Make an event from the request + let srch = match SearchEvent::from_target_uuid_request( + ident, + target_uuid, + &idms_prox_read.qs_read, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin ssh key read: {:?}", e); + return Err(e); } - }); - res + }; + + trace!(?srch, "Begin event"); + + match idms_prox_read.qs_read.search_ext(&srch) { + Ok(mut entries) => { + let r = entries + .pop() + // get the first entry + .and_then(|e| { + // From the entry, turn it into the value + e.get_ava_iter_sshpubkeys("ssh_publickey") + .map(|i| i.map(|s| s.to_string()).collect()) + }) + .unwrap_or_else(|| { + // No matching entry? Return none. + Vec::new() + }); + Ok(r) + } + Err(e) => Err(e), + } } #[instrument( level = "info", - name = "ssh_key_tag_read", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalsshkeytagread( @@ -755,64 +691,60 @@ impl QueryServerReadV1 { ) -> Result, OperationError> { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_info!("Error resolving id to target"); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_info!("Error resolving id to target"); + e + })?; - // Make an event from the request - let srch = match SearchEvent::from_target_uuid_request( - ident, - target_uuid, - &idms_prox_read.qs_read, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin sshkey tag read: {:?}", e); - return Err(e); - } - }; - - trace!(?srch, "Begin event"); - - match idms_prox_read.qs_read.search_ext(&srch) { - Ok(mut entries) => { - let r = entries - .pop() - // get the first entry - .map(|e| { - // From the entry, turn it into the value - e.get_ava_set("ssh_publickey").and_then(|vs| { - // Get the one tagged value - vs.get_ssh_tag(&tag).map(str::to_string) - }) - }) - .unwrap_or_else(|| { - // No matching entry? Return none. - None - }); - Ok(r) - } - Err(e) => Err(e), + // Make an event from the request + let srch = match SearchEvent::from_target_uuid_request( + ident, + target_uuid, + &idms_prox_read.qs_read, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin sshkey tag read: {:?}", e); + return Err(e); } - }); - res + }; + + trace!(?srch, "Begin event"); + + match idms_prox_read.qs_read.search_ext(&srch) { + Ok(mut entries) => { + let r = entries + .pop() + // get the first entry + .map(|e| { + // From the entry, turn it into the value + e.get_ava_set("ssh_publickey").and_then(|vs| { + // Get the one tagged value + vs.get_ssh_tag(&tag).map(str::to_string) + }) + }) + .unwrap_or_else(|| { + // No matching entry? Return none. + None + }); + Ok(r) + } + Err(e) => Err(e), + } } #[instrument( level = "info", - name = "service_account_api_token_get", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_service_account_api_token_get( @@ -844,8 +776,7 @@ impl QueryServerReadV1 { #[instrument( level = "info", - name = "idm_account_unix_auth", - skip(self, uat, uuid_or_name, cred, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmaccountunixauth( @@ -857,7 +788,6 @@ impl QueryServerReadV1 { ) -> Result, OperationError> { let ct = duration_from_epoch_now(); let mut idm_auth = self.idms.auth_async().await; - // let res = spanned!("actors::v1_read::handle", { // resolve the id let ident = idm_auth .validate_and_parse_token_to_ident(uat.as_deref(), ct) @@ -891,14 +821,12 @@ impl QueryServerReadV1 { security_info!(?res, "Sending result"); - // res }); res } #[instrument( level = "info", - name = "idm_credential_status", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialstatus( @@ -910,45 +838,41 @@ impl QueryServerReadV1 { let ct = duration_from_epoch_now(); let mut idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - // Make an event from the request - let cse = match CredentialStatusEvent::from_parts( - // &idms_prox_read.qs_read, - ident, - target_uuid, - ) { - Ok(s) => s, - Err(e) => { - admin_error!(err = ?e, "Failed to begin credential status read"); - return Err(e); - } - }; + // Make an event from the request + let cse = match CredentialStatusEvent::from_parts( + // &idms_prox_read.qs_read, + ident, + target_uuid, + ) { + Ok(s) => s, + Err(e) => { + admin_error!(err = ?e, "Failed to begin credential status read"); + return Err(e); + } + }; - trace!(?cse, "Begin event"); + trace!(?cse, "Begin event"); - idms_prox_read.get_credentialstatus(&cse) - }); - res + idms_prox_read.get_credentialstatus(&cse) } #[instrument( level = "info", - name = "idm_backup_code_view", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmbackupcodeview( @@ -960,45 +884,41 @@ impl QueryServerReadV1 { let ct = duration_from_epoch_now(); let mut idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let ident = idms_prox_read - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; - let target_uuid = idms_prox_read - .qs_read - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!("Error resolving id to target"); - e - })?; + let ident = idms_prox_read + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; + let target_uuid = idms_prox_read + .qs_read + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!("Error resolving id to target"); + e + })?; - // Make an event from the request - let rbce = match ReadBackupCodeEvent::from_parts( - // &idms_prox_read.qs_read, - ident, - target_uuid, - ) { - Ok(s) => s, - Err(e) => { - admin_error!("Failed to begin backup code read: {:?}", e); - return Err(e); - } - }; + // Make an event from the request + let rbce = match ReadBackupCodeEvent::from_parts( + // &idms_prox_read.qs_read, + ident, + target_uuid, + ) { + Ok(s) => s, + Err(e) => { + admin_error!("Failed to begin backup code read: {:?}", e); + return Err(e); + } + }; - trace!(?rbce, "Begin event"); + trace!(?rbce, "Begin event"); - idms_prox_read.get_backup_codes(&rbce) - }); - res + idms_prox_read.get_backup_codes(&rbce) } #[instrument( level = "info", - name = "idm_credential_update_status", - skip(self, session_token, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialupdatestatus( @@ -1008,29 +928,25 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_cred_update = self.idms.cred_update_transaction_async().await; - let res = spanned!("actors::v1_read::handle", { - let session_token = CredentialUpdateSessionToken { - token_enc: session_token.token, - }; + let session_token = CredentialUpdateSessionToken { + token_enc: session_token.token, + }; - idms_cred_update - .credential_update_status(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_update_status", - ); - e - }) - .map(|sta| sta.into()) - }); - res + idms_cred_update + .credential_update_status(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_update_status", + ); + e + }) + .map(|sta| sta.into()) } #[instrument( level = "info", - name = "idm_credential_update", - skip(self, session_token, scr, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialupdate( @@ -1041,132 +957,128 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_cred_update = self.idms.cred_update_transaction_async().await; - let res = spanned!("actors::v1_read::handle", { - let session_token = CredentialUpdateSessionToken { - token_enc: session_token.token, - }; + let session_token = CredentialUpdateSessionToken { + token_enc: session_token.token, + }; - debug!(?scr); + debug!(?scr); - match scr { - CURequest::PrimaryRemove => idms_cred_update - .credential_primary_delete(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_delete", - ); - e - }), - CURequest::Password(pw) => idms_cred_update - .credential_primary_set_password(&session_token, ct, &pw) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_set_password", - ); - e - }), - CURequest::CancelMFAReg => idms_cred_update - .credential_update_cancel_mfareg(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_update_cancel_mfareg", - ); - e - }), - CURequest::TotpGenerate => idms_cred_update - .credential_primary_init_totp(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_init_totp", - ); - e - }), - CURequest::TotpVerify(totp_chal) => idms_cred_update - .credential_primary_check_totp(&session_token, ct, totp_chal) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_check_totp", - ); - e - }), - CURequest::TotpAcceptSha1 => idms_cred_update - .credential_primary_accept_sha1_totp(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_accept_sha1_totp", - ); - e - }), - CURequest::TotpRemove => idms_cred_update - .credential_primary_remove_totp(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_remove_totp", - ); - e - }), - CURequest::BackupCodeGenerate => idms_cred_update - .credential_primary_init_backup_codes(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_init_backup_codes", - ); - e - }), - CURequest::BackupCodeRemove => idms_cred_update - .credential_primary_remove_backup_codes(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_primary_remove_backup_codes", - ); - e - }), - CURequest::PasskeyInit => idms_cred_update - .credential_passkey_init(&session_token, ct) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_passkey_init", - ); - e - }), - CURequest::PasskeyFinish(label, rpkc) => idms_cred_update - .credential_passkey_finish(&session_token, ct, label, &rpkc) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_passkey_init", - ); - e - }), - CURequest::PasskeyRemove(uuid) => idms_cred_update - .credential_passkey_remove(&session_token, ct, uuid) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin credential_passkey_init", - ); - e - }), - } - .map(|sta| sta.into()) - }); - res + match scr { + CURequest::PrimaryRemove => idms_cred_update + .credential_primary_delete(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_delete", + ); + e + }), + CURequest::Password(pw) => idms_cred_update + .credential_primary_set_password(&session_token, ct, &pw) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_set_password", + ); + e + }), + CURequest::CancelMFAReg => idms_cred_update + .credential_update_cancel_mfareg(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_update_cancel_mfareg", + ); + e + }), + CURequest::TotpGenerate => idms_cred_update + .credential_primary_init_totp(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_init_totp", + ); + e + }), + CURequest::TotpVerify(totp_chal) => idms_cred_update + .credential_primary_check_totp(&session_token, ct, totp_chal) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_check_totp", + ); + e + }), + CURequest::TotpAcceptSha1 => idms_cred_update + .credential_primary_accept_sha1_totp(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_accept_sha1_totp", + ); + e + }), + CURequest::TotpRemove => idms_cred_update + .credential_primary_remove_totp(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_remove_totp", + ); + e + }), + CURequest::BackupCodeGenerate => idms_cred_update + .credential_primary_init_backup_codes(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_init_backup_codes", + ); + e + }), + CURequest::BackupCodeRemove => idms_cred_update + .credential_primary_remove_backup_codes(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_primary_remove_backup_codes", + ); + e + }), + CURequest::PasskeyInit => idms_cred_update + .credential_passkey_init(&session_token, ct) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_passkey_init", + ); + e + }), + CURequest::PasskeyFinish(label, rpkc) => idms_cred_update + .credential_passkey_finish(&session_token, ct, label, &rpkc) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_passkey_init", + ); + e + }), + CURequest::PasskeyRemove(uuid) => idms_cred_update + .credential_passkey_remove(&session_token, ct, uuid) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin credential_passkey_init", + ); + e + }), + } + .map(|sta| sta.into()) } #[instrument( level = "info", - name = "oauth2_authorise", - skip(self, uat, auth_req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_authorise( @@ -1177,29 +1089,25 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let (ident, uat) = idms_prox_read - .validate_and_parse_uat(uat.as_deref(), ct) - .and_then(|uat| { - idms_prox_read - .process_uat_to_identity(&uat, ct) - .map(|ident| (ident, uat)) - }) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - Oauth2Error::AuthenticationRequired - })?; + let (ident, uat) = idms_prox_read + .validate_and_parse_uat(uat.as_deref(), ct) + .and_then(|uat| { + idms_prox_read + .process_uat_to_identity(&uat, ct) + .map(|ident| (ident, uat)) + }) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + Oauth2Error::AuthenticationRequired + })?; - // Now we can send to the idm server for authorisation checking. - idms_prox_read.check_oauth2_authorisation(&ident, &uat, &auth_req, ct) - }); - res + // Now we can send to the idm server for authorisation checking. + idms_prox_read.check_oauth2_authorisation(&ident, &uat, &auth_req, ct) } #[instrument( level = "info", - name = "oauth2_authorise_permit", - skip(self, uat, consent_req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_authorise_permit( @@ -1210,28 +1118,24 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let (ident, uat) = idms_prox_read - .validate_and_parse_uat(uat.as_deref(), ct) - .and_then(|uat| { - idms_prox_read - .process_uat_to_identity(&uat, ct) - .map(|ident| (ident, uat)) - }) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; + let (ident, uat) = idms_prox_read + .validate_and_parse_uat(uat.as_deref(), ct) + .and_then(|uat| { + idms_prox_read + .process_uat_to_identity(&uat, ct) + .map(|ident| (ident, uat)) + }) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; - idms_prox_read.check_oauth2_authorise_permit(&ident, &uat, &consent_req, ct) - }); - res + idms_prox_read.check_oauth2_authorise_permit(&ident, &uat, &consent_req, ct) } #[instrument( level = "info", - name = "oauth2_authorise_reject", - skip(self, uat, consent_req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_authorise_reject( @@ -1242,28 +1146,24 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - let (ident, uat) = idms_prox_read - .validate_and_parse_uat(uat.as_deref(), ct) - .and_then(|uat| { - idms_prox_read - .process_uat_to_identity(&uat, ct) - .map(|ident| (ident, uat)) - }) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; + let (ident, uat) = idms_prox_read + .validate_and_parse_uat(uat.as_deref(), ct) + .and_then(|uat| { + idms_prox_read + .process_uat_to_identity(&uat, ct) + .map(|ident| (ident, uat)) + }) + .map_err(|e| { + admin_error!("Invalid identity: {:?}", e); + e + })?; - idms_prox_read.check_oauth2_authorise_reject(&ident, &uat, &consent_req, ct) - }); - res + idms_prox_read.check_oauth2_authorise_reject(&ident, &uat, &consent_req, ct) } #[instrument( level = "info", - name = "oauth2_token_exchange", - skip(self, client_authz, token_req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_token_exchange( @@ -1274,17 +1174,13 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - // Now we can send to the idm server for authorisation checking. - idms_prox_read.check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct) - }); - res + // Now we can send to the idm server for authorisation checking. + idms_prox_read.check_oauth2_token_exchange(client_authz.as_deref(), &token_req, ct) } #[instrument( level = "info", - name = "oauth2_token_introspect", - skip(self, client_authz, intr_req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_token_introspect( @@ -1295,17 +1191,13 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - // Now we can send to the idm server for introspection checking. - idms_prox_read.check_oauth2_token_introspect(&client_authz, &intr_req, ct) - }); - res + // Now we can send to the idm server for introspection checking. + idms_prox_read.check_oauth2_token_introspect(&client_authz, &intr_req, ct) } #[instrument( level = "info", - name = "oauth2_openid_userinfo", - skip(self, client_id, client_authz, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_openid_userinfo( @@ -1316,16 +1208,12 @@ impl QueryServerReadV1 { ) -> Result { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - idms_prox_read.oauth2_openid_userinfo(&client_id, &client_authz, ct) - }); - res + idms_prox_read.oauth2_openid_userinfo(&client_id, &client_authz, ct) } #[instrument( level = "info", - name = "oauth2_openid_discovery", - skip(self, client_id, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_openid_discovery( @@ -1334,16 +1222,12 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - idms_prox_read.oauth2_openid_discovery(&client_id) - }); - res + idms_prox_read.oauth2_openid_discovery(&client_id) } #[instrument( level = "info", - name = "oauth2_openid_publickey", - skip(self, client_id, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_openid_publickey( @@ -1352,30 +1236,22 @@ impl QueryServerReadV1 { eventid: Uuid, ) -> Result { let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - idms_prox_read.oauth2_openid_publickey(&client_id) - }); - res + idms_prox_read.oauth2_openid_publickey(&client_id) } #[instrument( level = "info", - name = "get_domain_display_name", - skip(self, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn get_domain_display_name(&self, eventid: Uuid) -> String { let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - idms_prox_read.qs_read.get_domain_display_name().to_string() - }); - res + idms_prox_read.qs_read.get_domain_display_name().to_string() } #[instrument( level = "info", - name = "auth_valid", - skip(self, uat, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_auth_valid( @@ -1386,22 +1262,18 @@ impl QueryServerReadV1 { let ct = duration_from_epoch_now(); let idms_prox_read = self.idms.proxy_read_async().await; - let res = spanned!("actors::v1_read::handle", { - idms_prox_read - .validate_and_parse_uat(uat.as_deref(), ct) - .map(|_| ()) - .map_err(|e| { - admin_error!("Invalid token: {:?}", e); - e - }) - }); - res + idms_prox_read + .validate_and_parse_uat(uat.as_deref(), ct) + .map(|_| ()) + .map_err(|e| { + admin_error!("Invalid token: {:?}", e); + e + }) } #[instrument( level = "info", - name = "ldap_request", - skip(self, eventid, protomsg, uat) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_ldaprequest( @@ -1410,7 +1282,6 @@ impl QueryServerReadV1 { protomsg: LdapMsg, uat: Option, ) -> Option { - // let res = spanned!( "actors::v1_read::handle", { let res = match ServerOps::try_from(protomsg) { Ok(server_op) => self .ldap @@ -1428,7 +1299,6 @@ impl QueryServerReadV1 { format!("Invalid Request {:?}", &eventid).as_str(), )), }; - // }); Some(res) } } diff --git a/kanidmd/idm/src/actors/v1_write.rs b/kanidmd/idm/src/actors/v1_write.rs index 7a17cc096..ce17fedef 100644 --- a/kanidmd/idm/src/actors/v1_write.rs +++ b/kanidmd/idm/src/actors/v1_write.rs @@ -1,43 +1,35 @@ use std::iter; use std::sync::Arc; use std::time::Duration; -use tracing::{info, instrument, span, trace, Level}; -use crate::prelude::*; - -use crate::idm::credupdatesession::{ - CredentialUpdateIntentToken, CredentialUpdateSessionToken, InitCredentialUpdateEvent, - InitCredentialUpdateIntentEvent, +use kanidm_proto::v1::{ + AccountUnixExtend, CUIntentToken, CUSessionToken, CUStatus, CreateRequest, DeleteRequest, + Entry as ProtoEntry, GroupUnixExtend, Modify as ProtoModify, ModifyList as ProtoModifyList, + ModifyRequest, OperationError, }; +use time::OffsetDateTime; +use tracing::{info, instrument, span, trace, Level}; +use uuid::Uuid; use crate::event::{ CreateEvent, DeleteEvent, ModifyEvent, PurgeRecycledEvent, PurgeTombstoneEvent, ReviveRecycledEvent, }; +use crate::filter::{Filter, FilterInvalid}; +use crate::idm::credupdatesession::{ + CredentialUpdateIntentToken, CredentialUpdateSessionToken, InitCredentialUpdateEvent, + InitCredentialUpdateIntentEvent, +}; +use crate::idm::delayed::DelayedAction; use crate::idm::event::{ GeneratePasswordEvent, RegenerateRadiusSecretEvent, UnixPasswordChangeEvent, }; -use crate::modify::{Modify, ModifyInvalid, ModifyList}; -use crate::value::{PartialValue, Value}; -use kanidm_proto::v1::OperationError; - -use crate::filter::{Filter, FilterInvalid}; -use crate::idm::delayed::DelayedAction; use crate::idm::server::{IdmServer, IdmServerTransaction}; use crate::idm::serviceaccount::{DestroyApiTokenEvent, GenerateApiTokenEvent}; +use crate::modify::{Modify, ModifyInvalid, ModifyList}; +use crate::prelude::*; use crate::utils::duration_from_epoch_now; - -use kanidm_proto::v1::Entry as ProtoEntry; -use kanidm_proto::v1::Modify as ProtoModify; -use kanidm_proto::v1::ModifyList as ProtoModifyList; -use kanidm_proto::v1::{ - AccountUnixExtend, CUIntentToken, CUSessionToken, CUStatus, CreateRequest, DeleteRequest, - GroupUnixExtend, ModifyRequest, -}; - -use time::OffsetDateTime; - -use uuid::Uuid; +use crate::value::{PartialValue, Value}; pub struct QueryServerWriteV1 { _log_level: Option, @@ -63,6 +55,7 @@ impl QueryServerWriteV1 { &(*x_ptr) } + #[instrument(level = "debug", skip_all)] async fn modify_from_parts( &self, uat: Option, @@ -71,47 +64,46 @@ impl QueryServerWriteV1 { filter: Filter, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("modify_from_parts", { - let ct = duration_from_epoch_now(); + let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - let mdf = match ModifyEvent::from_parts( - ident, - target_uuid, - proto_ml, - filter, - &idms_prox_write.qs_write, - ) { - Ok(m) => m, - Err(e) => { - admin_error!(err=?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_parts( + ident, + target_uuid, + proto_ml, + filter, + &idms_prox_write.qs_write, + ) { + Ok(m) => m, + Err(e) => { + admin_error!(err=?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } + #[instrument(level = "debug", skip_all)] async fn modify_from_internal_parts( &self, uat: Option, @@ -120,54 +112,51 @@ impl QueryServerWriteV1 { filter: Filter, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("modify_from_internal_parts", { - let ct = duration_from_epoch_now(); + let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name) - .map_err(|e| { - admin_error!("Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name) + .map_err(|e| { + admin_error!("Error resolving id to target"); + e + })?; - let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid))); - // Add any supplemental conditions we have. - let joined_filter = Filter::join_parts_and(f_uuid, filter); + let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid))); + // Add any supplemental conditions we have. + let joined_filter = Filter::join_parts_and(f_uuid, filter); - let mdf = match ModifyEvent::from_internal_parts( - ident, - ml, - &joined_filter, - &idms_prox_write.qs_write, - ) { - Ok(m) => m, - Err(e) => { - admin_error!(err = ?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_internal_parts( + ident, + ml, + &joined_filter, + &idms_prox_write.qs_write, + ) { + Ok(m) => m, + Err(e) => { + admin_error!(err = ?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } #[instrument( level = "info", - name = "create", - skip(self, uat, req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_create( @@ -177,39 +166,34 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - let res = spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); + let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let crt = match CreateEvent::from_message(ident, &req, &idms_prox_write.qs_write) { - Ok(c) => c, - Err(e) => { - admin_warn!(err = ?e, "Failed to begin create"); - return Err(e); - } - }; + let crt = match CreateEvent::from_message(ident, &req, &idms_prox_write.qs_write) { + Ok(c) => c, + Err(e) => { + admin_warn!(err = ?e, "Failed to begin create"); + return Err(e); + } + }; - trace!(?crt, "Begin create event"); + trace!(?crt, "Begin create event"); - idms_prox_write - .qs_write - .create(&crt) - .and_then(|_| idms_prox_write.commit()) - }); - // At the end of the event we send it for logging. - res + idms_prox_write + .qs_write + .create(&crt) + .and_then(|_| idms_prox_write.commit()) } #[instrument( level = "info", - name = "modify", - skip(self, uat, req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_modify( @@ -219,37 +203,33 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - let res = spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ct = duration_from_epoch_now(); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let mdf = match ModifyEvent::from_message(ident, &req, &idms_prox_write.qs_write) { - Ok(m) => m, - Err(e) => { - admin_error!(err = ?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_message(ident, &req, &idms_prox_write.qs_write) { + Ok(m) => m, + Err(e) => { + admin_error!(err = ?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit()) - }); - res + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit()) } #[instrument( level = "info", - name = "delete", - skip(self, uat, req, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_delete( @@ -259,36 +239,32 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - let res = spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let del = match DeleteEvent::from_message(ident, &req, &idms_prox_write.qs_write) { - Ok(d) => d, - Err(e) => { - admin_error!(err = ?e, "Failed to begin delete"); - return Err(e); - } - }; + let ct = duration_from_epoch_now(); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let del = match DeleteEvent::from_message(ident, &req, &idms_prox_write.qs_write) { + Ok(d) => d, + Err(e) => { + admin_error!(err = ?e, "Failed to begin delete"); + return Err(e); + } + }; - trace!(?del, "Begin delete event"); + trace!(?del, "Begin delete event"); - idms_prox_write - .qs_write - .delete(&del) - .and_then(|_| idms_prox_write.commit()) - }); - res + idms_prox_write + .qs_write + .delete(&del) + .and_then(|_| idms_prox_write.commit()) } #[instrument( level = "info", - name = "patch", - skip(self, uat, filter, update, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internalpatch( @@ -300,47 +276,38 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { // Given a protoEntry, turn this into a modification set. let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - let res = spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - - // Transform the ProtoEntry to a Modlist - let modlist = - ModifyList::from_patch(&update, &idms_prox_write.qs_write).map_err(|e| { - admin_error!(err = ?e, "Invalid Patch Request"); - e - })?; - - let mdf = ModifyEvent::from_internal_parts( - ident, - &modlist, - &filter, - &idms_prox_write.qs_write, - ) + let ct = duration_from_epoch_now(); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + + // Transform the ProtoEntry to a Modlist + let modlist = ModifyList::from_patch(&update, &idms_prox_write.qs_write).map_err(|e| { + admin_error!(err = ?e, "Invalid Patch Request"); + e + })?; + + let mdf = + ModifyEvent::from_internal_parts(ident, &modlist, &filter, &idms_prox_write.qs_write) + .map_err(|e| { admin_error!(err = ?e, "Failed to begin modify"); e })?; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit()) - }); - res.map(|_| ()) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit()) } #[instrument( level = "info", - name = "delete2", - skip(self, uat, filter, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_internaldelete( @@ -350,36 +317,32 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - let res = spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let del = match DeleteEvent::from_parts(ident, &filter, &idms_prox_write.qs_write) { - Ok(d) => d, - Err(e) => { - admin_error!(err = ?e, "Failed to begin delete"); - return Err(e); - } - }; + let ct = duration_from_epoch_now(); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let del = match DeleteEvent::from_parts(ident, &filter, &idms_prox_write.qs_write) { + Ok(d) => d, + Err(e) => { + admin_error!(err = ?e, "Failed to begin delete"); + return Err(e); + } + }; - trace!(?del, "Begin delete event"); + trace!(?del, "Begin delete event"); - idms_prox_write - .qs_write - .delete(&del) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }); - res + idms_prox_write + .qs_write + .delete(&del) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } #[instrument( level = "info", - name = "revive_recycled", - skip(self, uat, filter, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_reviverecycled( @@ -389,37 +352,32 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - let res = spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let rev = - match ReviveRecycledEvent::from_parts(ident, &filter, &idms_prox_write.qs_write) { - Ok(r) => r, - Err(e) => { - admin_error!(err = ?e, "Failed to begin revive"); - return Err(e); - } - }; + let ct = duration_from_epoch_now(); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let rev = match ReviveRecycledEvent::from_parts(ident, &filter, &idms_prox_write.qs_write) { + Ok(r) => r, + Err(e) => { + admin_error!(err = ?e, "Failed to begin revive"); + return Err(e); + } + }; - trace!(?rev, "Begin revive event"); + trace!(?rev, "Begin revive event"); - idms_prox_write - .qs_write - .revive_recycled(&rev) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }); - res + idms_prox_write + .qs_write + .revive_recycled(&rev) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } #[instrument( level = "info", - name = "service_account_credential_generate", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_service_account_credential_generate( @@ -430,45 +388,41 @@ impl QueryServerWriteV1 { ) -> Result { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - - // given the uuid_or_name, determine the target uuid. - // We can either do this by trying to parse the name or by creating a filter - // to find the entry - there are risks to both TBH ... especially when the uuid - // is also an entries name, but that they aren't the same entry. - - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; - - let gpe = GeneratePasswordEvent::from_parts(ident, target_uuid).map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin handle_service_account_credential_generate", - ); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); e })?; - idms_prox_write - .generate_account_password(&gpe) - .and_then(|r| idms_prox_write.commit().map(|_| r)) - }); - res + + // given the uuid_or_name, determine the target uuid. + // We can either do this by trying to parse the name or by creating a filter + // to find the entry - there are risks to both TBH ... especially when the uuid + // is also an entries name, but that they aren't the same entry. + + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; + + let gpe = GeneratePasswordEvent::from_parts(ident, target_uuid).map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin handle_service_account_credential_generate", + ); + e + })?; + idms_prox_write + .generate_account_password(&gpe) + .and_then(|r| idms_prox_write.commit().map(|_| r)) } #[instrument( level = "info", - name = "service_account_credential_generate", - skip(self, uat, uuid_or_name, label, expiry, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_service_account_api_token_generate( @@ -510,7 +464,6 @@ impl QueryServerWriteV1 { #[instrument( level = "info", - name = "service_account_credential_generate", skip_all, fields(uuid = ?eventid) )] @@ -551,8 +504,7 @@ impl QueryServerWriteV1 { #[instrument( level = "info", - name = "idm_credential_update", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialupdate( @@ -563,48 +515,44 @@ impl QueryServerWriteV1 { ) -> Result<(CUSessionToken, CUStatus), OperationError> { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - idms_prox_write - .init_credential_update(&InitCredentialUpdateEvent::new(ident, target_uuid), ct) - .and_then(|tok| idms_prox_write.commit().map(|_| tok)) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin init_credential_update", - ); - e - }) - .map(|(tok, sta)| { - ( - CUSessionToken { - token: tok.token_enc, - }, - sta.into(), - ) - }) - }); - res + idms_prox_write + .init_credential_update(&InitCredentialUpdateEvent::new(ident, target_uuid), ct) + .and_then(|tok| idms_prox_write.commit().map(|_| tok)) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin init_credential_update", + ); + e + }) + .map(|(tok, sta)| { + ( + CUSessionToken { + token: tok.token_enc, + }, + sta.into(), + ) + }) } #[instrument( level = "info", - name = "idm_credential_update_intent", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialupdateintent( @@ -616,46 +564,42 @@ impl QueryServerWriteV1 { ) -> Result { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - idms_prox_write - .init_credential_update_intent( - &InitCredentialUpdateIntentEvent::new(ident, target_uuid, ttl), - ct, - ) - .and_then(|tok| idms_prox_write.commit().map(|_| tok)) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin init_credential_update_intent", - ); - e - }) - .map(|tok| CUIntentToken { - token: tok.intent_id, - }) - }); - res + idms_prox_write + .init_credential_update_intent( + &InitCredentialUpdateIntentEvent::new(ident, target_uuid, ttl), + ct, + ) + .and_then(|tok| idms_prox_write.commit().map(|_| tok)) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin init_credential_update_intent", + ); + e + }) + .map(|tok| CUIntentToken { + token: tok.intent_id, + }) } #[instrument( level = "info", - name = "idm_credential_exchange_intent", - skip(self, intent_token, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialexchangeintent( @@ -665,37 +609,33 @@ impl QueryServerWriteV1 { ) -> Result<(CUSessionToken, CUStatus), OperationError> { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let intent_token = CredentialUpdateIntentToken { - intent_id: intent_token.token, - }; - // TODO: this is throwing a 500 error when a session is already in use, that seems bad? - idms_prox_write - .exchange_intent_credential_update(intent_token, ct) - .and_then(|tok| idms_prox_write.commit().map(|_| tok)) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin exchange_intent_credential_update", - ); - e - }) - .map(|(tok, sta)| { - ( - CUSessionToken { - token: tok.token_enc, - }, - sta.into(), - ) - }) - }); - res + let intent_token = CredentialUpdateIntentToken { + intent_id: intent_token.token, + }; + // TODO: this is throwing a 500 error when a session is already in use, that seems bad? + idms_prox_write + .exchange_intent_credential_update(intent_token, ct) + .and_then(|tok| idms_prox_write.commit().map(|_| tok)) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin exchange_intent_credential_update", + ); + e + }) + .map(|(tok, sta)| { + ( + CUSessionToken { + token: tok.token_enc, + }, + sta.into(), + ) + }) } #[instrument( level = "info", - name = "idm_credential_update_commit", - skip(self, session_token, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialupdatecommit( @@ -705,29 +645,25 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let session_token = CredentialUpdateSessionToken { - token_enc: session_token.token, - }; + let session_token = CredentialUpdateSessionToken { + token_enc: session_token.token, + }; - idms_prox_write - .commit_credential_update(&session_token, ct) - .and_then(|tok| idms_prox_write.commit().map(|_| tok)) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin commit_credential_update", - ); - e - }) - }); - res + idms_prox_write + .commit_credential_update(&session_token, ct) + .and_then(|tok| idms_prox_write.commit().map(|_| tok)) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin commit_credential_update", + ); + e + }) } #[instrument( level = "info", - name = "idm_credential_update_cancel", - skip(self, session_token, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmcredentialupdatecancel( @@ -737,29 +673,25 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let session_token = CredentialUpdateSessionToken { - token_enc: session_token.token, - }; + let session_token = CredentialUpdateSessionToken { + token_enc: session_token.token, + }; - idms_prox_write - .cancel_credential_update(&session_token, ct) - .and_then(|tok| idms_prox_write.commit().map(|_| tok)) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin commit_credential_cancel", - ); - e - }) - }); - res + idms_prox_write + .cancel_credential_update(&session_token, ct) + .and_then(|tok| idms_prox_write.commit().map(|_| tok)) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin commit_credential_cancel", + ); + e + }) } #[instrument( level = "info", - name = "handle_service_account_into_person", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_service_account_into_person( @@ -770,32 +702,28 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); let idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - idms_prox_write - .service_account_into_person(&ident, target_uuid) - .and_then(|_| idms_prox_write.commit()) - }); - res + idms_prox_write + .service_account_into_person(&ident, target_uuid) + .and_then(|_| idms_prox_write.commit()) } #[instrument( level = "info", - name = "regenerate_radius_secret", - skip(self, uat, uuid_or_name, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_regenerateradius( @@ -806,49 +734,42 @@ impl QueryServerWriteV1 { ) -> Result { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!( - "actors::v1_write::handle", - { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - let rrse = RegenerateRadiusSecretEvent::from_parts( - // &idms_prox_write.qs_write, - ident, - target_uuid, - ) - .map_err(|e| { - admin_error!( - err = ?e, - "Failed to begin idm_account_regenerate_radius", - ); - e - })?; + let rrse = RegenerateRadiusSecretEvent::from_parts( + // &idms_prox_write.qs_write, + ident, + target_uuid, + ) + .map_err(|e| { + admin_error!( + err = ?e, + "Failed to begin idm_account_regenerate_radius", + ); + e + })?; - idms_prox_write - .regenerate_radius_secret(&rrse) - .and_then(|r| idms_prox_write.commit().map(|_| r)) - } - ); - res + idms_prox_write + .regenerate_radius_secret(&rrse) + .and_then(|r| idms_prox_write.commit().map(|_| r)) } #[instrument( level = "info", - name = "purge_attribute", - skip(self, uat, uuid_or_name, attr, filter, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_purgeattribute( @@ -861,48 +782,45 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); let idms_prox_write = self.idms.proxy_write_async(ct).await; - spanned!("actors::v1_write::handle", { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - let mdf = match ModifyEvent::from_target_uuid_attr_purge( - ident, - target_uuid, - &attr, - filter, - &idms_prox_write.qs_write, - ) { - Ok(m) => m, - Err(e) => { - admin_error!(err = ?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_target_uuid_attr_purge( + ident, + target_uuid, + &attr, + filter, + &idms_prox_write.qs_write, + ) { + Ok(m) => m, + Err(e) => { + admin_error!(err = ?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } #[instrument( level = "info", - name = "remove_attribute_values", - skip(self, uat, uuid_or_name, attr, values, filter, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_removeattributevalues( @@ -915,50 +833,48 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("actors::v1_write::handle", { - let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - let target_uuid = idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving id to target"); - e - })?; + let ct = duration_from_epoch_now(); + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; + let target_uuid = idms_prox_write + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving id to target"); + e + })?; - let proto_ml = ProtoModifyList::new_list( - values - .into_iter() - .map(|v| ProtoModify::Removed(attr.clone(), v)) - .collect(), - ); + let proto_ml = ProtoModifyList::new_list( + values + .into_iter() + .map(|v| ProtoModify::Removed(attr.clone(), v)) + .collect(), + ); - let mdf = match ModifyEvent::from_parts( - ident, - target_uuid, - &proto_ml, - filter, - &idms_prox_write.qs_write, - ) { - Ok(m) => m, - Err(e) => { - admin_error!(err = ?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_parts( + ident, + target_uuid, + &proto_ml, + filter, + &idms_prox_write.qs_write, + ) { + Ok(m) => m, + Err(e) => { + admin_error!(err = ?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } #[instrument( @@ -1121,8 +1037,7 @@ impl QueryServerWriteV1 { #[instrument( level = "info", - name = "idm_account_unix_set_cred", - skip(self, uat, uuid_or_name, cred, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_idmaccountunixsetcred( @@ -1134,46 +1049,42 @@ impl QueryServerWriteV1 { ) -> Result<(), OperationError> { let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - let res = spanned!("actors::v1_write::handle", { - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; - - let target_uuid = Uuid::parse_str(uuid_or_name.as_str()).or_else(|_| { - idms_prox_write - .qs_write - .name_to_uuid(uuid_or_name.as_str()) - .map_err(|e| { - admin_info!("Error resolving as gidnumber continuing ..."); - e - }) - })?; - - let upce = UnixPasswordChangeEvent::from_parts( - // &idms_prox_write.qs_write, - ident, - target_uuid, - cred, - ) + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) .map_err(|e| { - admin_error!(err = ?e, "Failed to begin UnixPasswordChangeEvent"); + admin_error!(err = ?e, "Invalid identity"); e })?; + + let target_uuid = Uuid::parse_str(uuid_or_name.as_str()).or_else(|_| { idms_prox_write - .set_unix_account_password(&upce) - .and_then(|_| idms_prox_write.commit()) - .map(|_| ()) - }); - res + .qs_write + .name_to_uuid(uuid_or_name.as_str()) + .map_err(|e| { + admin_info!("Error resolving as gidnumber continuing ..."); + e + }) + })?; + + let upce = UnixPasswordChangeEvent::from_parts( + // &idms_prox_write.qs_write, + ident, + target_uuid, + cred, + ) + .map_err(|e| { + admin_error!(err = ?e, "Failed to begin UnixPasswordChangeEvent"); + e + })?; + idms_prox_write + .set_unix_account_password(&upce) + .and_then(|_| idms_prox_write.commit()) + .map(|_| ()) } #[instrument( level = "info", - name = "oauth2_scopemap_create", - skip(self, uat, filter, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_scopemap_create( @@ -1187,61 +1098,54 @@ impl QueryServerWriteV1 { // Because this is from internal, we can generate a real modlist, rather // than relying on the proto ones. let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("handle_oauth2_scopemap_create", { - let ct = duration_from_epoch_now(); + let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let group_uuid = idms_prox_write - .qs_write - .name_to_uuid(group.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving group name to target"); - e - })?; + let group_uuid = idms_prox_write + .qs_write + .name_to_uuid(group.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving group name to target"); + e + })?; - let ml = ModifyList::new_append( - "oauth2_rs_scope_map", - Value::new_oauthscopemap(group_uuid, scopes.into_iter().collect()).ok_or_else( - || { - OperationError::InvalidAttribute( - "Invalid Oauth Scope Map syntax".to_string(), - ) - }, - )?, - ); + let ml = ModifyList::new_append( + "oauth2_rs_scope_map", + Value::new_oauthscopemap(group_uuid, scopes.into_iter().collect()).ok_or_else( + || OperationError::InvalidAttribute("Invalid Oauth Scope Map syntax".to_string()), + )?, + ); - let mdf = match ModifyEvent::from_internal_parts( - ident, - &ml, - &filter, - &idms_prox_write.qs_write, - ) { - Ok(m) => m, - Err(e) => { - admin_error!(err = ?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_internal_parts( + ident, + &ml, + &filter, + &idms_prox_write.qs_write, + ) { + Ok(m) => m, + Err(e) => { + admin_error!(err = ?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } #[instrument( level = "info", - name = "oauth2_scopemap_delete", - skip(self, uat, filter, eventid) + skip_all, fields(uuid = ?eventid) )] pub async fn handle_oauth2_scopemap_delete( @@ -1252,88 +1156,80 @@ impl QueryServerWriteV1 { eventid: Uuid, ) -> Result<(), OperationError> { let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("handle_oauth2_scopemap_create", { - let ct = duration_from_epoch_now(); + let ct = duration_from_epoch_now(); - let ident = idms_prox_write - .validate_and_parse_token_to_ident(uat.as_deref(), ct) - .map_err(|e| { - admin_error!(err = ?e, "Invalid identity"); - e - })?; + let ident = idms_prox_write + .validate_and_parse_token_to_ident(uat.as_deref(), ct) + .map_err(|e| { + admin_error!(err = ?e, "Invalid identity"); + e + })?; - let group_uuid = idms_prox_write - .qs_write - .name_to_uuid(group.as_str()) - .map_err(|e| { - admin_error!(err = ?e, "Error resolving group name to target"); - e - })?; + let group_uuid = idms_prox_write + .qs_write + .name_to_uuid(group.as_str()) + .map_err(|e| { + admin_error!(err = ?e, "Error resolving group name to target"); + e + })?; - let ml = ModifyList::new_remove("oauth2_rs_scope_map", PartialValue::Refer(group_uuid)); + let ml = ModifyList::new_remove("oauth2_rs_scope_map", PartialValue::Refer(group_uuid)); - let mdf = match ModifyEvent::from_internal_parts( - ident, - &ml, - &filter, - &idms_prox_write.qs_write, - ) { - Ok(m) => m, - Err(e) => { - admin_error!(err = ?e, "Failed to begin modify"); - return Err(e); - } - }; + let mdf = match ModifyEvent::from_internal_parts( + ident, + &ml, + &filter, + &idms_prox_write.qs_write, + ) { + Ok(m) => m, + Err(e) => { + admin_error!(err = ?e, "Failed to begin modify"); + return Err(e); + } + }; - trace!(?mdf, "Begin modify event"); + trace!(?mdf, "Begin modify event"); - idms_prox_write - .qs_write - .modify(&mdf) - .and_then(|_| idms_prox_write.commit().map(|_| ())) - }) + idms_prox_write + .qs_write + .modify(&mdf) + .and_then(|_| idms_prox_write.commit().map(|_| ())) } // ===== These below are internal only event types. ===== #[instrument( level = "info", - name = "purge_tombstone_event", - skip(self, msg) + skip_all, fields(uuid = ?msg.eventid) )] pub(crate) async fn handle_purgetombstoneevent(&self, msg: PurgeTombstoneEvent) { trace!(?msg, "Begin purge tombstone event"); let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("actors::v1_write::handle", { - let res = idms_prox_write - .qs_write - .purge_tombstones() - .and_then(|_| idms_prox_write.commit()); - admin_info!(?res, "Purge tombstones result"); - #[allow(clippy::expect_used)] - res.expect("Invalid Server State"); - }); + let res = idms_prox_write + .qs_write + .purge_tombstones() + .and_then(|_| idms_prox_write.commit()); + admin_info!(?res, "Purge tombstones result"); + #[allow(clippy::expect_used)] + res.expect("Invalid Server State"); } #[instrument( level = "info", - name = "purge_recycled_event", - skip(self, msg) + skip_all, fields(uuid = ?msg.eventid) )] pub(crate) async fn handle_purgerecycledevent(&self, msg: PurgeRecycledEvent) { trace!(?msg, "Begin purge recycled event"); let idms_prox_write = self.idms.proxy_write_async(duration_from_epoch_now()).await; - spanned!("actors::v1_write::handle", { - let res = idms_prox_write - .qs_write - .purge_recycled() - .and_then(|_| idms_prox_write.commit()); - admin_info!(?res, "Purge recycled result"); - #[allow(clippy::expect_used)] - res.expect("Invalid Server State"); - }); + let res = idms_prox_write + .qs_write + .purge_recycled() + .and_then(|_| idms_prox_write.commit()); + admin_info!(?res, "Purge recycled result"); + #[allow(clippy::expect_used)] + res.expect("Invalid Server State"); } pub(crate) async fn handle_delayedaction(&self, da: DelayedAction) { @@ -1344,13 +1240,11 @@ impl QueryServerWriteV1 { trace!("Begin delayed action ..."); let ct = duration_from_epoch_now(); let mut idms_prox_write = self.idms.proxy_write_async(ct).await; - spanned!("actors::v1_write::handle", { - if let Err(res) = idms_prox_write - .process_delayedaction(da) - .and_then(|_| idms_prox_write.commit()) - { - admin_info!(?res, "delayed action error"); - } - }); + if let Err(res) = idms_prox_write + .process_delayedaction(da) + .and_then(|_| idms_prox_write.commit()) + { + admin_info!(?res, "delayed action error"); + } } } diff --git a/kanidmd/idm/src/audit.rs b/kanidmd/idm/src/audit.rs index 89435428e..ee7f1661b 100644 --- a/kanidmd/idm/src/audit.rs +++ b/kanidmd/idm/src/audit.rs @@ -1,6 +1,7 @@ -use serde::{Deserialize, Serialize}; use std::fmt; +use serde::{Deserialize, Serialize}; + include!("./audit_loglevel.rs"); pub const AUDIT_LINE_SIZE: usize = 512; diff --git a/kanidmd/idm/src/be/dbentry.rs b/kanidmd/idm/src/be/dbentry.rs index ec438bd50..f29c3440c 100644 --- a/kanidmd/idm/src/be/dbentry.rs +++ b/kanidmd/idm/src/be/dbentry.rs @@ -1,11 +1,13 @@ -use crate::be::dbvalue::{DbValueEmailAddressV1, DbValuePhoneNumberV1, DbValueSetV2, DbValueV1}; -use crate::prelude::OperationError; -use serde::{Deserialize, Serialize}; -use smartstring::alias::String as AttrString; use std::collections::BTreeMap; use std::time::Duration; + +use serde::{Deserialize, Serialize}; +use smartstring::alias::String as AttrString; use uuid::Uuid; +use crate::be::dbvalue::{DbValueEmailAddressV1, DbValuePhoneNumberV1, DbValueSetV2, DbValueV1}; +use crate::prelude::OperationError; + #[derive(Serialize, Deserialize, Debug)] pub struct DbEntryV1 { pub attrs: BTreeMap>, diff --git a/kanidmd/idm/src/be/dbvalue.rs b/kanidmd/idm/src/be/dbvalue.rs index 76f8eba9d..c2c681750 100644 --- a/kanidmd/idm/src/be/dbvalue.rs +++ b/kanidmd/idm/src/be/dbvalue.rs @@ -1,15 +1,15 @@ -use hashbrown::HashSet; -use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; + +use hashbrown::HashSet; +use serde::{Deserialize, Serialize}; use url::Url; use uuid::Uuid; +use webauthn_rs::prelude::{ + DeviceKey as DeviceKeyV4, Passkey as PasskeyV4, SecurityKey as SecurityKeyV4, +}; use webauthn_rs_core::proto::{COSEKey, UserVerificationPolicy}; -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; -use webauthn_rs::prelude::SecurityKey as SecurityKeyV4; - #[derive(Serialize, Deserialize, Debug)] pub struct DbCidV1 { #[serde(rename = "d")] @@ -556,11 +556,11 @@ impl DbValueSetV2 { #[cfg(test)] mod tests { - use super::DbCred; - use super::{DbBackupCodeV1, DbPasswordV1, DbTotpV1, DbWebauthnV1}; use serde::{Deserialize, Serialize}; use uuid::Uuid; + use super::{DbBackupCodeV1, DbCred, DbPasswordV1, DbTotpV1, DbWebauthnV1}; + fn dbcred_type_default_pw() -> DbCredTypeV1 { DbCredTypeV1::Pw } diff --git a/kanidmd/idm/src/be/idl_arc_sqlite.rs b/kanidmd/idm/src/be/idl_arc_sqlite.rs index 6d71049d1..1483c3683 100644 --- a/kanidmd/idm/src/be/idl_arc_sqlite.rs +++ b/kanidmd/idm/src/be/idl_arc_sqlite.rs @@ -1,3 +1,18 @@ +use std::collections::BTreeSet; +use std::convert::TryInto; +use std::ops::DerefMut; +use std::sync::Arc; +use std::time::Duration; + +use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn, ARCacheWriteTxn}; +use concread::cowcell::*; +use hashbrown::HashMap; +use idlset::v2::IDLBitRange; +use idlset::AndNot; +use kanidm_proto::v1::{ConsistencyError, OperationError}; +use tracing::trace; +use uuid::Uuid; + use crate::be::idl_sqlite::{ IdlSqlite, IdlSqliteReadTransaction, IdlSqliteTransaction, IdlSqliteWriteTransaction, }; @@ -6,24 +21,8 @@ use crate::be::idxkey::{ }; use crate::be::{BackendConfig, IdList, IdRawEntry}; use crate::entry::{Entry, EntryCommitted, EntrySealed}; -use crate::value::IndexType; -use crate::value::Value; - -use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn, ARCacheWriteTxn}; -use concread::cowcell::*; -use idlset::{v2::IDLBitRange, AndNot}; -use kanidm_proto::v1::{ConsistencyError, OperationError}; - -use hashbrown::HashMap; -use std::collections::BTreeSet; -use std::convert::TryInto; -use std::ops::DerefMut; -use std::sync::Arc; -use std::time::Duration; -use uuid::Uuid; - use crate::prelude::*; -use tracing::trace; +use crate::value::{IndexType, Value}; // use std::borrow::Borrow; @@ -82,58 +81,56 @@ macro_rules! get_identry { $idl:expr, $is_read_op:expr ) => {{ - spanned!("be::idl_arc_sqlite::get_identry", { - let mut result: Vec> = Vec::new(); - match $idl { - IdList::Partial(idli) | IdList::PartialThreshold(idli) | IdList::Indexed(idli) => { - let mut nidl = IDLBitRange::new(); + let mut result: Vec> = Vec::new(); + match $idl { + IdList::Partial(idli) | IdList::PartialThreshold(idli) | IdList::Indexed(idli) => { + let mut nidl = IDLBitRange::new(); - idli.into_iter().for_each(|i| { - // For all the id's in idl. - // is it in the cache? - match $self.entry_cache.get(&i) { - Some(eref) => result.push(eref.clone()), - None => unsafe { nidl.push_id(i) }, - } + idli.into_iter().for_each(|i| { + // For all the id's in idl. + // is it in the cache? + match $self.entry_cache.get(&i) { + Some(eref) => result.push(eref.clone()), + None => unsafe { nidl.push_id(i) }, + } + }); + + if !nidl.is_empty() { + // Now, get anything from nidl that is needed. + let mut db_result = $self.db.get_identry(&IdList::Partial(nidl))?; + // Clone everything from db_result into the cache. + if $is_read_op { + db_result.iter().for_each(|e| { + $self.entry_cache.insert(e.get_id(), e.clone()); + }); + } + // Merge the two vecs + result.append(&mut db_result); + } + } + IdList::AllIds => { + // VERY similar to above, but we skip adding the entries to the cache + // on miss to prevent scan/invalidation attacks. + let idli = (*$self.allids).clone(); + let mut nidl = IDLBitRange::new(); + + (&idli) + .into_iter() + .for_each(|i| match $self.entry_cache.get(&i) { + Some(eref) => result.push(eref.clone()), + None => unsafe { nidl.push_id(i) }, }); - if !nidl.is_empty() { - // Now, get anything from nidl that is needed. - let mut db_result = $self.db.get_identry(&IdList::Partial(nidl))?; - // Clone everything from db_result into the cache. - if $is_read_op { - db_result.iter().for_each(|e| { - $self.entry_cache.insert(e.get_id(), e.clone()); - }); - } - // Merge the two vecs - result.append(&mut db_result); - } + if !nidl.is_empty() { + // Now, get anything from nidl that is needed. + let mut db_result = $self.db.get_identry(&IdList::Partial(nidl))?; + // Merge the two vecs + result.append(&mut db_result); } - IdList::AllIds => { - // VERY similar to above, but we skip adding the entries to the cache - // on miss to prevent scan/invalidation attacks. - let idli = (*$self.allids).clone(); - let mut nidl = IDLBitRange::new(); - - (&idli) - .into_iter() - .for_each(|i| match $self.entry_cache.get(&i) { - Some(eref) => result.push(eref.clone()), - None => unsafe { nidl.push_id(i) }, - }); - - if !nidl.is_empty() { - // Now, get anything from nidl that is needed. - let mut db_result = $self.db.get_identry(&IdList::Partial(nidl))?; - // Merge the two vecs - result.append(&mut db_result); - } - } - }; - // Return - Ok(result) - }) + } + }; + // Return + Ok(result) }}; } @@ -165,7 +162,6 @@ macro_rules! get_idl { $itype:expr, $idx_key:expr ) => {{ - spanned!("be::idl_arc_sqlite::get_idl", { // SEE ALSO #259: Find a way to implement borrow for this properly. // I don't think this is possible. When we make this dyn, the arc // needs the dyn trait to be sized so that it *could* claim a clone @@ -188,10 +184,9 @@ macro_rules! get_idl { // If hit, continue. if let Some(ref data) = cache_r { trace!( - %data, - "Got cached idl for index {:?} {:?}", - $itype, - $attr, + cached_index = ?$itype, + attr = ?$attr, + idl = %data, ); return Ok(Some(data.as_ref().clone())); } @@ -206,7 +201,6 @@ macro_rules! get_idl { $self.idl_cache.insert(ncache_key, Box::new(idl.clone())) } Ok(db_r) - }) }}; } @@ -215,24 +209,22 @@ macro_rules! name2uuid { $self:expr, $name:expr ) => {{ - spanned!("be::idl_arc_sqlite::name2uuid", { - let cache_key = NameCacheKey::Name2Uuid($name.to_string()); - let cache_r = $self.name_cache.get(&cache_key); - if let Some(NameCacheValue::U(uuid)) = cache_r { - trace!(?uuid, "Got cached name2uuid"); - return Ok(Some(uuid.clone())); - } else { - trace!("Cache miss uuid for name2uuid"); - } + let cache_key = NameCacheKey::Name2Uuid($name.to_string()); + let cache_r = $self.name_cache.get(&cache_key); + if let Some(NameCacheValue::U(uuid)) = cache_r { + trace!(?uuid, "Got cached name2uuid"); + return Ok(Some(uuid.clone())); + } else { + trace!("Cache miss uuid for name2uuid"); + } - let db_r = $self.db.name2uuid($name)?; - if let Some(uuid) = db_r { - $self - .name_cache - .insert(cache_key, NameCacheValue::U(uuid.clone())) - } - Ok(db_r) - }) + let db_r = $self.db.name2uuid($name)?; + if let Some(uuid) = db_r { + $self + .name_cache + .insert(cache_key, NameCacheValue::U(uuid.clone())) + } + Ok(db_r) }}; } @@ -241,24 +233,22 @@ macro_rules! uuid2spn { $self:expr, $uuid:expr ) => {{ - spanned!("be::idl_arc_sqlite::uuid2spn", { - let cache_key = NameCacheKey::Uuid2Spn($uuid); - let cache_r = $self.name_cache.get(&cache_key); - if let Some(NameCacheValue::S(ref spn)) = cache_r { - trace!(?spn, "Got cached uuid2spn"); - return Ok(Some(spn.as_ref().clone())); - } else { - trace!("Cache miss spn for uuid2spn"); - } + let cache_key = NameCacheKey::Uuid2Spn($uuid); + let cache_r = $self.name_cache.get(&cache_key); + if let Some(NameCacheValue::S(ref spn)) = cache_r { + trace!(?spn, "Got cached uuid2spn"); + return Ok(Some(spn.as_ref().clone())); + } else { + trace!("Cache miss spn for uuid2spn"); + } - let db_r = $self.db.uuid2spn($uuid)?; - if let Some(ref data) = db_r { - $self - .name_cache - .insert(cache_key, NameCacheValue::S(Box::new(data.clone()))) - } - Ok(db_r) - }) + let db_r = $self.db.uuid2spn($uuid)?; + if let Some(ref data) = db_r { + $self + .name_cache + .insert(cache_key, NameCacheValue::S(Box::new(data.clone()))) + } + Ok(db_r) }}; } @@ -267,23 +257,21 @@ macro_rules! uuid2rdn { $self:expr, $uuid:expr ) => {{ - spanned!("be::idl_arc_sqlite::uuid2rdn", { - let cache_key = NameCacheKey::Uuid2Rdn($uuid); - let cache_r = $self.name_cache.get(&cache_key); - if let Some(NameCacheValue::R(ref rdn)) = cache_r { - return Ok(Some(rdn.clone())); - } else { - trace!("Cache miss rdn for uuid2rdn"); - } + let cache_key = NameCacheKey::Uuid2Rdn($uuid); + let cache_r = $self.name_cache.get(&cache_key); + if let Some(NameCacheValue::R(ref rdn)) = cache_r { + return Ok(Some(rdn.clone())); + } else { + trace!("Cache miss rdn for uuid2rdn"); + } - let db_r = $self.db.uuid2rdn($uuid)?; - if let Some(ref data) = db_r { - $self - .name_cache - .insert(cache_key, NameCacheValue::R(data.clone())) - } - Ok(db_r) - }) + let db_r = $self.db.uuid2rdn($uuid)?; + if let Some(ref data) = db_r { + $self + .name_cache + .insert(cache_key, NameCacheValue::R(data.clone())) + } + Ok(db_r) }}; } @@ -528,88 +516,83 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> { } impl<'a> IdlArcSqliteWriteTransaction<'a> { + #[instrument(level = "debug", name = "idl_arc_sqlite::commit", skip_all)] pub fn commit(self) -> Result<(), OperationError> { - spanned!("be::idl_arc_sqlite::commit", { - let IdlArcSqliteWriteTransaction { - db, - mut entry_cache, - mut idl_cache, - mut name_cache, - op_ts_max, - allids, - maxid, - } = self; + let IdlArcSqliteWriteTransaction { + db, + mut entry_cache, + mut idl_cache, + mut name_cache, + op_ts_max, + allids, + maxid, + } = self; - // Write any dirty items to the disk. - spanned!("be::idl_arc_sqlite::commit", { - entry_cache - .iter_mut_mark_clean() - .try_for_each(|(k, v)| match v { - Some(e) => db.write_identry(e), - None => db.delete_identry(*k), - }) + // Write any dirty items to the disk. + entry_cache + .iter_mut_mark_clean() + .try_for_each(|(k, v)| match v { + Some(e) => db.write_identry(e), + None => db.delete_identry(*k), }) .map_err(|e| { admin_error!(?e, "Failed to sync entry cache to sqlite"); e })?; - spanned!("be::idl_arc_sqlite::commit", { - idl_cache.iter_mut_mark_clean().try_for_each(|(k, v)| { - match v { - Some(idl) => db.write_idl(k.a.as_str(), k.i, k.k.as_str(), idl), - #[allow(clippy::unreachable)] - None => { - // Due to how we remove items, we always write an empty idl - // to the cache, so this should never be none. - // - // If it is none, this means we have memory corruption so we MUST - // panic. - // Why is `v` the `Option` type then? - unreachable!(); - } + idl_cache + .iter_mut_mark_clean() + .try_for_each(|(k, v)| { + match v { + Some(idl) => db.write_idl(k.a.as_str(), k.i, k.k.as_str(), idl), + #[allow(clippy::unreachable)] + None => { + // Due to how we remove items, we always write an empty idl + // to the cache, so this should never be none. + // + // If it is none, this means we have memory corruption so we MUST + // panic. + // Why is `v` the `Option` type then? + unreachable!(); } - }) + } }) .map_err(|e| { admin_error!(?e, "Failed to sync idl cache to sqlite"); e })?; - spanned!("be::idl_arc_sqlite::commit", { - name_cache - .iter_mut_mark_clean() - .try_for_each(|(k, v)| match (k, v) { - (NameCacheKey::Name2Uuid(k), Some(NameCacheValue::U(v))) => { - db.write_name2uuid_add(k, *v) - } - (NameCacheKey::Name2Uuid(k), None) => db.write_name2uuid_rem(k), - (NameCacheKey::Uuid2Spn(uuid), Some(NameCacheValue::S(v))) => { - db.write_uuid2spn(*uuid, Some(v)) - } - (NameCacheKey::Uuid2Spn(uuid), None) => db.write_uuid2spn(*uuid, None), - (NameCacheKey::Uuid2Rdn(uuid), Some(NameCacheValue::R(v))) => { - db.write_uuid2rdn(*uuid, Some(v)) - } - (NameCacheKey::Uuid2Rdn(uuid), None) => db.write_uuid2rdn(*uuid, None), + name_cache + .iter_mut_mark_clean() + .try_for_each(|(k, v)| match (k, v) { + (NameCacheKey::Name2Uuid(k), Some(NameCacheValue::U(v))) => { + db.write_name2uuid_add(k, *v) + } + (NameCacheKey::Name2Uuid(k), None) => db.write_name2uuid_rem(k), + (NameCacheKey::Uuid2Spn(uuid), Some(NameCacheValue::S(v))) => { + db.write_uuid2spn(*uuid, Some(v)) + } + (NameCacheKey::Uuid2Spn(uuid), None) => db.write_uuid2spn(*uuid, None), + (NameCacheKey::Uuid2Rdn(uuid), Some(NameCacheValue::R(v))) => { + db.write_uuid2rdn(*uuid, Some(v)) + } + (NameCacheKey::Uuid2Rdn(uuid), None) => db.write_uuid2rdn(*uuid, None), - _ => Err(OperationError::InvalidCacheState), - }) + _ => Err(OperationError::InvalidCacheState), }) .map_err(|e| { admin_error!(?e, "Failed to sync name cache to sqlite"); e })?; - // Undo the caches in the reverse order. - db.commit().map(|()| { - op_ts_max.commit(); - name_cache.commit(); - idl_cache.commit(); - entry_cache.commit(); - allids.commit(); - maxid.commit(); - }) + // Undo the caches in the reverse order. + db.commit().map(|()| { + op_ts_max.commit(); + name_cache.commit(); + idl_cache.commit(); + entry_cache.commit(); + allids.commit(); + maxid.commit(); }) } @@ -626,18 +609,16 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { where I: Iterator>, { - spanned!("be::idl_arc_sqlite::write_identries", { - entries.try_for_each(|e| { - trace!("Inserting {:?} to cache", e.get_id()); - if e.get_id() == 0 { - Err(OperationError::InvalidEntryId) - } else { - (*self.allids).insert_id(e.get_id()); - self.entry_cache - .insert_dirty(e.get_id(), Arc::new(e.clone())); - Ok(()) - } - }) + entries.try_for_each(|e| { + trace!("Inserting {:?} to cache", e.get_id()); + if e.get_id() == 0 { + Err(OperationError::InvalidEntryId) + } else { + (*self.allids).insert_id(e.get_id()); + self.entry_cache + .insert_dirty(e.get_id(), Arc::new(e.clone())); + Ok(()) + } }) } @@ -661,17 +642,15 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { where I: Iterator, { - spanned!("be::idl_arc_sqlite::delete_identry", { - idl.try_for_each(|i| { - trace!("Removing {:?} from cache", i); - if i == 0 { - Err(OperationError::InvalidEntryId) - } else { - (*self.allids).remove_id(i); - self.entry_cache.remove_dirty(i); - Ok(()) - } - }) + idl.try_for_each(|i| { + trace!("Removing {:?} from cache", i); + if i == 0 { + Err(OperationError::InvalidEntryId) + } else { + (*self.allids).remove_id(i); + self.entry_cache.remove_dirty(i); + Ok(()) + } }) } @@ -682,32 +661,30 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { idx_key: &str, idl: &IDLBitRange, ) -> Result<(), OperationError> { - spanned!("be::idl_arc_sqlite::write_idl", { - let cache_key = IdlCacheKey { - a: attr.into(), - i: itype, - k: idx_key.into(), - }; - // On idl == 0 the db will remove this, and synthesise an empty IdList on a miss - // but we can cache this as a new empty IdList instead, so that we can avoid the - // db lookup on this idl. - if idl.is_empty() { - self.idl_cache - .insert_dirty(cache_key, Box::new(IDLBitRange::new())); - } else { - self.idl_cache - .insert_dirty(cache_key, Box::new(idl.clone())); - } - // self.db.write_idl(audit, attr, itype, idx_key, idl) - Ok(()) - }) + let cache_key = IdlCacheKey { + a: attr.into(), + i: itype, + k: idx_key.into(), + }; + // On idl == 0 the db will remove this, and synthesise an empty IdList on a miss + // but we can cache this as a new empty IdList instead, so that we can avoid the + // db lookup on this idl. + if idl.is_empty() { + self.idl_cache + .insert_dirty(cache_key, Box::new(IDLBitRange::new())); + } else { + self.idl_cache + .insert_dirty(cache_key, Box::new(idl.clone())); + } + // self.db.write_idl(audit, attr, itype, idx_key, idl) + Ok(()) } pub fn optimise_dirty_idls(&mut self) { self.idl_cache.iter_mut_dirty().for_each(|(k, maybe_idl)| { if let Some(idl) = maybe_idl { if idl.maybe_compress() { - filter_trace!(?k, "Compressed idl"); + trace!(?k, "Compressed idl"); } } }) @@ -971,27 +948,21 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { uuid: Uuid, add: BTreeSet, ) -> Result<(), OperationError> { - spanned!("be::idl_arc_sqlite::write_name2uuid_add", { - add.into_iter().for_each(|k| { - let cache_key = NameCacheKey::Name2Uuid(k); - let cache_value = NameCacheValue::U(uuid); - self.name_cache.insert_dirty(cache_key, cache_value) - }); - Ok(()) - }) + add.into_iter().for_each(|k| { + let cache_key = NameCacheKey::Name2Uuid(k); + let cache_value = NameCacheValue::U(uuid); + self.name_cache.insert_dirty(cache_key, cache_value) + }); + Ok(()) } pub fn write_name2uuid_rem(&mut self, rem: BTreeSet) -> Result<(), OperationError> { - spanned!("be::idl_arc_sqlite::write_name2uuid_rem", { - // self.db.write_name2uuid_rem(audit, &rem).and_then(|_| { - rem.into_iter().for_each(|k| { - // why not just a for loop here... - let cache_key = NameCacheKey::Name2Uuid(k); - self.name_cache.remove_dirty(cache_key) - }); - Ok(()) - // }) - }) + rem.into_iter().for_each(|k| { + // why not just a for loop here... + let cache_key = NameCacheKey::Name2Uuid(k); + self.name_cache.remove_dirty(cache_key) + }); + Ok(()) } pub fn create_uuid2spn(&self) -> Result<(), OperationError> { @@ -999,16 +970,14 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } pub fn write_uuid2spn(&mut self, uuid: Uuid, k: Option) -> Result<(), OperationError> { - spanned!("be::idl_arc_sqlite::write_uuid2spn", { - let cache_key = NameCacheKey::Uuid2Spn(uuid); - match k { - Some(v) => self - .name_cache - .insert_dirty(cache_key, NameCacheValue::S(Box::new(v))), - None => self.name_cache.remove_dirty(cache_key), - } - Ok(()) - }) + let cache_key = NameCacheKey::Uuid2Spn(uuid); + match k { + Some(v) => self + .name_cache + .insert_dirty(cache_key, NameCacheValue::S(Box::new(v))), + None => self.name_cache.remove_dirty(cache_key), + } + Ok(()) } pub fn create_uuid2rdn(&self) -> Result<(), OperationError> { @@ -1016,16 +985,14 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } pub fn write_uuid2rdn(&mut self, uuid: Uuid, k: Option) -> Result<(), OperationError> { - spanned!("be::idl_arc_sqlite::write_uuid2rdn", { - let cache_key = NameCacheKey::Uuid2Rdn(uuid); - match k { - Some(s) => self - .name_cache - .insert_dirty(cache_key, NameCacheValue::R(s)), - None => self.name_cache.remove_dirty(cache_key), - } - Ok(()) - }) + let cache_key = NameCacheKey::Uuid2Rdn(uuid); + match k { + Some(s) => self + .name_cache + .insert_dirty(cache_key, NameCacheValue::R(s)), + None => self.name_cache.remove_dirty(cache_key), + } + Ok(()) } pub fn create_idx(&self, attr: &str, itype: IndexType) -> Result<(), OperationError> { diff --git a/kanidmd/idm/src/be/idl_sqlite.rs b/kanidmd/idm/src/be/idl_sqlite.rs index bc264cb5a..ecceb9f17 100644 --- a/kanidmd/idm/src/be/idl_sqlite.rs +++ b/kanidmd/idm/src/be/idl_sqlite.rs @@ -1,24 +1,22 @@ -use crate::be::dbentry::DbEntry; -use crate::be::dbentry::DbIdentSpn; -use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope}; -use crate::entry::{Entry, EntryCommitted, EntrySealed}; -use crate::prelude::*; -use crate::value::{IndexType, Value}; +use std::convert::{TryFrom, TryInto}; +use std::sync::Arc; +use std::time::Duration; + // use crate::valueset; use hashbrown::HashMap; use idlset::v2::IDLBitRange; use kanidm_proto::v1::{ConsistencyError, OperationError}; use r2d2::Pool; use r2d2_sqlite::SqliteConnectionManager; -use rusqlite::Connection; -use rusqlite::OpenFlags; -use rusqlite::OptionalExtension; -use std::convert::{TryFrom, TryInto}; -use std::sync::Arc; -use std::time::Duration; -use tracing::trace; +use rusqlite::{Connection, OpenFlags, OptionalExtension}; use uuid::Uuid; +use crate::be::dbentry::{DbEntry, DbIdentSpn}; +use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope}; +use crate::entry::{Entry, EntryCommitted, EntrySealed}; +use crate::prelude::*; +use crate::value::{IndexType, Value}; + // use uuid::Uuid; const DBV_ID2ENTRY: &str = "id2entry"; @@ -117,12 +115,10 @@ pub trait IdlSqliteTransaction { fn get_conn(&self) -> &r2d2::PooledConnection; fn get_identry(&self, idl: &IdList) -> Result>, OperationError> { - spanned!("be::idl_sqlite::get_identry", { - self.get_identry_raw(idl)? - .into_iter() - .map(|ide| ide.into_entry().map(Arc::new)) - .collect() - }) + self.get_identry_raw(idl)? + .into_iter() + .map(|ide| ide.into_entry().map(Arc::new)) + .collect() } fn get_identry_raw(&self, idl: &IdList) -> Result, OperationError> { @@ -220,112 +216,104 @@ pub trait IdlSqliteTransaction { itype: IndexType, idx_key: &str, ) -> Result, OperationError> { - spanned!("be::idl_sqlite::get_idl", { - if !(self.exists_idx(attr, itype)?) { - filter_error!( - "IdlSqliteTransaction: Index {:?} {:?} not found", - itype, - attr - ); - return Ok(None); - } - // The table exists - lets now get the actual index itself. - - let query = format!( - "SELECT idl FROM idx_{}_{} WHERE key = :idx_key", - itype.as_idx_str(), + if !(self.exists_idx(attr, itype)?) { + filter_error!( + "IdlSqliteTransaction: Index {:?} {:?} not found", + itype, attr ); - let mut stmt = self - .get_conn() - .prepare(query.as_str()) - .map_err(sqlite_error)?; - let idl_raw: Option> = stmt - .query_row(&[(":idx_key", &idx_key)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + return Ok(None); + } + // The table exists - lets now get the actual index itself. - let idl = match idl_raw { - Some(d) => serde_json::from_slice(d.as_slice()).map_err(serde_json_error)?, - // We don't have this value, it must be empty (or we - // have a corrupted index ..... - None => IDLBitRange::new(), - }; - trace!(%idl, "Got idl for index {:?} {:?}", itype, attr); + let query = format!( + "SELECT idl FROM idx_{}_{} WHERE key = :idx_key", + itype.as_idx_str(), + attr + ); + let mut stmt = self + .get_conn() + .prepare(query.as_str()) + .map_err(sqlite_error)?; + let idl_raw: Option> = stmt + .query_row(&[(":idx_key", &idx_key)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - Ok(Some(idl)) - }) + let idl = match idl_raw { + Some(d) => serde_json::from_slice(d.as_slice()).map_err(serde_json_error)?, + // We don't have this value, it must be empty (or we + // have a corrupted index ..... + None => IDLBitRange::new(), + }; + + trace!( + miss_index = ?itype, + attr = ?attr, + idl = %idl, + ); + + Ok(Some(idl)) } fn name2uuid(&mut self, name: &str) -> Result, OperationError> { - spanned!("be::idl_sqlite::name2uuid", { - // The table exists - lets now get the actual index itself. - let mut stmt = self - .get_conn() - .prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name") - .map_err(sqlite_error)?; - let uuid_raw: Option = stmt - .query_row(&[(":name", &name)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + // The table exists - lets now get the actual index itself. + let mut stmt = self + .get_conn() + .prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name") + .map_err(sqlite_error)?; + let uuid_raw: Option = stmt + .query_row(&[(":name", &name)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - let uuid = uuid_raw.as_ref().and_then(|u| Uuid::parse_str(u).ok()); - trace!(%name, ?uuid, "Got uuid for index"); + let uuid = uuid_raw.as_ref().and_then(|u| Uuid::parse_str(u).ok()); - Ok(uuid) - }) + Ok(uuid) } fn uuid2spn(&mut self, uuid: Uuid) -> Result, OperationError> { - spanned!("be::idl_sqlite::uuid2spn", { - let uuids = uuid.as_hyphenated().to_string(); - // The table exists - lets now get the actual index itself. - let mut stmt = self - .get_conn() - .prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid") - .map_err(sqlite_error)?; - let spn_raw: Option> = stmt - .query_row(&[(":uuid", &uuids)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + let uuids = uuid.as_hyphenated().to_string(); + // The table exists - lets now get the actual index itself. + let mut stmt = self + .get_conn() + .prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid") + .map_err(sqlite_error)?; + let spn_raw: Option> = stmt + .query_row(&[(":uuid", &uuids)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - let spn: Option = match spn_raw { - Some(d) => { - let dbv: DbIdentSpn = - serde_json::from_slice(d.as_slice()).map_err(serde_json_error)?; + let spn: Option = match spn_raw { + Some(d) => { + let dbv: DbIdentSpn = + serde_json::from_slice(d.as_slice()).map_err(serde_json_error)?; - Some(Value::from(dbv)) - } - None => None, - }; + Some(Value::from(dbv)) + } + None => None, + }; - trace!(?uuid, ?spn, "Got spn for uuid"); - - Ok(spn) - }) + Ok(spn) } fn uuid2rdn(&mut self, uuid: Uuid) -> Result, OperationError> { - spanned!("be::idl_sqlite::uuid2rdn", { - let uuids = uuid.as_hyphenated().to_string(); - // The table exists - lets now get the actual index itself. - let mut stmt = self - .get_conn() - .prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid") - .map_err(sqlite_error)?; - let rdn: Option = stmt - .query_row(&[(":uuid", &uuids)], |row| row.get(0)) - // We don't mind if it doesn't exist - .optional() - .map_err(sqlite_error)?; + let uuids = uuid.as_hyphenated().to_string(); + // The table exists - lets now get the actual index itself. + let mut stmt = self + .get_conn() + .prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid") + .map_err(sqlite_error)?; + let rdn: Option = stmt + .query_row(&[(":uuid", &uuids)], |row| row.get(0)) + // We don't mind if it doesn't exist + .optional() + .map_err(sqlite_error)?; - trace!(?uuid, ?rdn, "Got rdn for uuid"); - - Ok(rdn) - }) + Ok(rdn) } fn get_db_s_uuid(&self) -> Result, OperationError> { @@ -422,8 +410,8 @@ pub trait IdlSqliteTransaction { }) } + #[instrument(level = "debug", name = "idl_sqlite::get_allids", skip_all)] fn get_allids(&self) -> Result { - trace!("Building allids..."); let mut stmt = self .get_conn() .prepare("SELECT id FROM id2entry") @@ -604,20 +592,18 @@ impl IdlSqliteWriteTransaction { } } + #[instrument(level = "debug", name = "idl_sqlite::commit", skip_all)] pub fn commit(mut self) -> Result<(), OperationError> { - spanned!("be::idl_sqlite::commit", { - trace!("Commiting BE WR txn"); - assert!(!self.committed); - self.committed = true; + assert!(!self.committed); + self.committed = true; - self.conn - .execute("COMMIT TRANSACTION", []) - .map(|_| ()) - .map_err(|e| { - admin_error!(?e, "CRITICAL: failed to commit sqlite txn"); - OperationError::BackendEngine - }) - }) + self.conn + .execute("COMMIT TRANSACTION", []) + .map(|_| ()) + .map_err(|e| { + admin_error!(?e, "CRITICAL: failed to commit sqlite txn"); + OperationError::BackendEngine + }) } pub fn get_id2entry_max_id(&self) -> Result { @@ -770,46 +756,42 @@ impl IdlSqliteWriteTransaction { idx_key: &str, idl: &IDLBitRange, ) -> Result<(), OperationError> { - spanned!("be::idl_sqlite::write_idl", { - if idl.is_empty() { - trace!(?idl, "purging idl"); - // delete it - // Delete this idx_key from the table. - let query = format!( - "DELETE FROM idx_{}_{} WHERE key = :key", - itype.as_idx_str(), - attr - ); + if idl.is_empty() { + // delete it + // Delete this idx_key from the table. + let query = format!( + "DELETE FROM idx_{}_{} WHERE key = :key", + itype.as_idx_str(), + attr + ); - self.conn - .prepare(query.as_str()) - .and_then(|mut stmt| stmt.execute(&[(":key", &idx_key)])) - .map_err(sqlite_error) - } else { - trace!(?idl, "writing idl"); - // Serialise the IdList to Vec - let idl_raw = serde_json::to_vec(idl).map_err(serde_json_error)?; + self.conn + .prepare(query.as_str()) + .and_then(|mut stmt| stmt.execute(&[(":key", &idx_key)])) + .map_err(sqlite_error) + } else { + // Serialise the IdList to Vec + let idl_raw = serde_json::to_vec(idl).map_err(serde_json_error)?; - // update or create it. - let query = format!( - "INSERT OR REPLACE INTO idx_{}_{} (key, idl) VALUES(:key, :idl)", - itype.as_idx_str(), - attr - ); + // update or create it. + let query = format!( + "INSERT OR REPLACE INTO idx_{}_{} (key, idl) VALUES(:key, :idl)", + itype.as_idx_str(), + attr + ); - self.conn - .prepare(query.as_str()) - .and_then(|mut stmt| { - stmt.execute(named_params! { - ":key": &idx_key, - ":idl": &idl_raw - }) + self.conn + .prepare(query.as_str()) + .and_then(|mut stmt| { + stmt.execute(named_params! { + ":key": &idx_key, + ":idl": &idl_raw }) - .map_err(sqlite_error) - } - // Get rid of the sqlite rows usize - .map(|_| ()) - }) + }) + .map_err(sqlite_error) + } + // Get rid of the sqlite rows usize + .map(|_| ()) } pub fn create_name2uuid(&self) -> Result<(), OperationError> { @@ -944,7 +926,7 @@ impl IdlSqliteWriteTransaction { itype.as_idx_str(), attr ); - trace!(idx = %idx_stmt, "Creating index"); + trace!(idx = %idx_stmt, "creating index"); self.conn .execute(idx_stmt.as_str(), []) @@ -1034,7 +1016,6 @@ impl IdlSqliteWriteTransaction { } pub unsafe fn purge_id2entry(&self) -> Result<(), OperationError> { - trace!("purge id2entry ..."); self.conn .execute("DELETE FROM id2entry", []) .map(|_| ()) @@ -1175,7 +1156,6 @@ impl IdlSqliteWriteTransaction { // If the table is empty, populate the versions as 0. let mut dbv_id2entry = self.get_db_version_key(DBV_ID2ENTRY); - trace!(initial = %dbv_id2entry, "dbv_id2entry"); // Check db_version here. // * if 0 -> create v1. @@ -1374,13 +1354,12 @@ impl IdlSqlite { } pub(crate) fn get_allids_count(&self) -> Result { - trace!("Counting allids..."); #[allow(clippy::expect_used)] self.pool .try_get() .expect("Unable to get connection from pool!!!") .query_row("select count(id) from id2entry", [], |row| row.get(0)) - .map_err(sqlite_error) // this was initially `ltrace`, but I think that was a mistake so I replaced it anyways. + .map_err(sqlite_error) } pub fn read(&self) -> IdlSqliteReadTransaction { diff --git a/kanidmd/idm/src/be/idxkey.rs b/kanidmd/idm/src/be/idxkey.rs index b27603103..3cdb4a9f0 100644 --- a/kanidmd/idm/src/be/idxkey.rs +++ b/kanidmd/idm/src/be/idxkey.rs @@ -1,9 +1,11 @@ -use crate::value::IndexType; -use smartstring::alias::String as AttrString; use std::borrow::Borrow; use std::cmp::Ordering; use std::hash::{Hash, Hasher}; +use smartstring::alias::String as AttrString; + +use crate::value::IndexType; + pub type IdxSlope = u8; // Huge props to https://github.com/sunshowers/borrow-complex-key-example/blob/master/src/lib.rs diff --git a/kanidmd/idm/src/be/mod.rs b/kanidmd/idm/src/be/mod.rs index 680e0d544..0059697b9 100644 --- a/kanidmd/idm/src/be/mod.rs +++ b/kanidmd/idm/src/be/mod.rs @@ -4,35 +4,32 @@ //! is to persist content safely to disk, load that content, and execute queries //! utilising indexes in the most effective way possible. -use std::fs; - -use crate::prelude::*; -use crate::value::IndexType; -use hashbrown::HashMap as Map; -use hashbrown::HashSet; use std::cell::UnsafeCell; +use std::fs; +use std::ops::DerefMut; use std::sync::Arc; +use std::time::Duration; + +use concread::cowcell::*; +use hashbrown::{HashMap as Map, HashSet}; +use idlset::v2::IDLBitRange; +use idlset::AndNot; +use kanidm_proto::v1::{ConsistencyError, OperationError}; +use smartstring::alias::String as AttrString; use tracing::{trace, trace_span}; +use uuid::Uuid; use crate::be::dbentry::{DbBackup, DbEntry}; use crate::entry::{Entry, EntryCommitted, EntryNew, EntrySealed}; use crate::filter::{Filter, FilterPlan, FilterResolved, FilterValidResolved}; use crate::identity::Limits; -use crate::value::Value; -use concread::cowcell::*; -use idlset::v2::IDLBitRange; -use idlset::AndNot; -use kanidm_proto::v1::{ConsistencyError, OperationError}; -use smartstring::alias::String as AttrString; -use std::ops::DerefMut; -use std::time::Duration; -use uuid::Uuid; - +use crate::prelude::*; use crate::repl::cid::Cid; use crate::repl::ruv::{ ReplicationUpdateVector, ReplicationUpdateVectorReadTransaction, ReplicationUpdateVectorTransaction, ReplicationUpdateVectorWriteTransaction, }; +use crate::value::{IndexType, Value}; pub mod dbentry; pub mod dbvalue; @@ -41,12 +38,10 @@ mod idl_sqlite; pub(crate) mod idxkey; pub(crate) use self::idxkey::{IdxKey, IdxKeyRef, IdxKeyToRef, IdxSlope}; - use crate::be::idl_arc_sqlite::{ IdlArcSqlite, IdlArcSqliteReadTransaction, IdlArcSqliteTransaction, IdlArcSqliteWriteTransaction, }; - // Re-export this pub use crate::be::idl_sqlite::FsType; @@ -175,6 +170,7 @@ pub trait BackendTransaction { /// Recursively apply a filter, transforming into IdList's on the way. This builds a query /// execution log, so that it can be examined how an operation proceeded. #[allow(clippy::cognitive_complexity)] + #[instrument(level = "debug", name = "be::filter2idl", skip_all)] fn filter2idl( &self, filt: &FilterResolved, @@ -534,6 +530,7 @@ pub trait BackendTransaction { }) } + #[instrument(level = "debug", name = "be::search", skip_all)] fn search( &self, erl: &Limits, @@ -543,165 +540,150 @@ pub trait BackendTransaction { // Unlike DS, even if we don't get the index back, we can just pass // to the in-memory filter test and be done. - spanned!("be::search", { - filter_trace!(?filt, "filter optimized"); + debug!(filter_optimised = ?filt); - let (idl, fplan) = trace_span!("be::search -> filter2idl").in_scope(|| { - spanned!("be::search -> filter2idl", { - self.filter2idl(filt.to_inner(), FILTER_SEARCH_TEST_THRESHOLD) - }) - })?; + let (idl, fplan) = trace_span!("be::search -> filter2idl") + .in_scope(|| self.filter2idl(filt.to_inner(), FILTER_SEARCH_TEST_THRESHOLD))?; - filter_trace!(?fplan, "filter executed plan"); + debug!(filter_executed_plan = ?fplan); - match &idl { - IdList::AllIds => { - if !erl.unindexed_allow { - admin_error!( - "filter (search) is fully unindexed, and not allowed by resource limits" - ); - return Err(OperationError::ResourceLimit); - } + match &idl { + IdList::AllIds => { + if !erl.unindexed_allow { + admin_error!( + "filter (search) is fully unindexed, and not allowed by resource limits" + ); + return Err(OperationError::ResourceLimit); } - IdList::Partial(idl_br) => { - // if idl_br.len() > erl.search_max_filter_test { - if !idl_br.below_threshold(erl.search_max_filter_test) { - admin_error!("filter (search) is partial indexed and greater than search_max_filter_test allowed by resource limits"); - return Err(OperationError::ResourceLimit); - } - } - IdList::PartialThreshold(_) => { - // Since we opted for this, this is not the fault - // of the user and we should not penalise them by limiting on partial. - } - IdList::Indexed(idl_br) => { - // We know this is resolved here, so we can attempt the limit - // check. This has to fold the whole index, but you know, class=pres is - // indexed ... - // if idl_br.len() > erl.search_max_results { - if !idl_br.below_threshold(erl.search_max_results) { - admin_error!("filter (search) is indexed and greater than search_max_results allowed by resource limits"); - return Err(OperationError::ResourceLimit); - } - } - }; - - let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { - admin_error!(?e, "get_identry failed"); - e - })?; - - let entries_filtered = match idl { - IdList::AllIds => trace_span!("be::search").in_scope(|| { - spanned!("be::search", { - entries - .into_iter() - .filter(|e| e.entry_match_no_index(filt)) - .collect() - }) - }), - IdList::Partial(_) => { - trace_span!("be::search").in_scope(|| { - entries - .into_iter() - .filter(|e| e.entry_match_no_index(filt)) - .collect() - }) - } - IdList::PartialThreshold(_) => trace_span!("be::search") - .in_scope(|| { - spanned!("be::search", { - entries - .into_iter() - .filter(|e| e.entry_match_no_index(filt)) - .collect() - }) - }), - // Since the index fully resolved, we can shortcut the filter test step here! - IdList::Indexed(_) => { - filter_trace!("filter (search) was fully indexed 👏"); - entries - } - }; - - // If the idl was not indexed, apply the resource limit now. Avoid the needless match since the - // if statement is quick. - if entries_filtered.len() > erl.search_max_results { - admin_error!("filter (search) is resolved and greater than search_max_results allowed by resource limits"); - return Err(OperationError::ResourceLimit); } + IdList::Partial(idl_br) => { + // if idl_br.len() > erl.search_max_filter_test { + if !idl_br.below_threshold(erl.search_max_filter_test) { + admin_error!("filter (search) is partial indexed and greater than search_max_filter_test allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + IdList::PartialThreshold(_) => { + // Since we opted for this, this is not the fault + // of the user and we should not penalise them by limiting on partial. + } + IdList::Indexed(idl_br) => { + // We know this is resolved here, so we can attempt the limit + // check. This has to fold the whole index, but you know, class=pres is + // indexed ... + // if idl_br.len() > erl.search_max_results { + if !idl_br.below_threshold(erl.search_max_results) { + admin_error!("filter (search) is indexed and greater than search_max_results allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + } + }; - Ok(entries_filtered) - }) + let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { + admin_error!(?e, "get_identry failed"); + e + })?; + + let entries_filtered = match idl { + IdList::AllIds => trace_span!("be::search").in_scope(|| { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(filt)) + .collect() + }), + IdList::Partial(_) => trace_span!("be::search").in_scope(|| { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(filt)) + .collect() + }), + IdList::PartialThreshold(_) => trace_span!("be::search") + .in_scope(|| { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(filt)) + .collect() + }), + // Since the index fully resolved, we can shortcut the filter test step here! + IdList::Indexed(_) => { + filter_trace!("filter (search) was fully indexed 👏"); + entries + } + }; + + // If the idl was not indexed, apply the resource limit now. Avoid the needless match since the + // if statement is quick. + if entries_filtered.len() > erl.search_max_results { + admin_error!("filter (search) is resolved and greater than search_max_results allowed by resource limits"); + return Err(OperationError::ResourceLimit); + } + + Ok(entries_filtered) } /// Given a filter, assert some condition exists. /// Basically, this is a specialised case of search, where we don't need to /// load any candidates if they match. This is heavily used in uuid /// refint and attr uniqueness. + #[instrument(level = "debug", name = "be::exists", skip_all)] fn exists( &self, erl: &Limits, filt: &Filter, ) -> Result { - let _entered = trace_span!("be::exists").entered(); - spanned!("be::exists", { - filter_trace!(?filt, "filter optimised"); + debug!(filter_optimised = ?filt); - // Using the indexes, resolve the IdList here, or AllIds. - // Also get if the filter was 100% resolved or not. - let (idl, fplan) = spanned!("be::exists -> filter2idl", { - spanned!("be::exists -> filter2idl", { - self.filter2idl(filt.to_inner(), FILTER_EXISTS_TEST_THRESHOLD) - }) - })?; + // Using the indexes, resolve the IdList here, or AllIds. + // Also get if the filter was 100% resolved or not. + let (idl, fplan) = self.filter2idl(filt.to_inner(), FILTER_EXISTS_TEST_THRESHOLD)?; - filter_trace!(?fplan, "filter executed plan"); + debug!(filter_executed_plan = ?fplan); - // Apply limits to the IdList. - match &idl { - IdList::AllIds => { - if !erl.unindexed_allow { - admin_error!("filter (exists) is fully unindexed, and not allowed by resource limits"); - return Err(OperationError::ResourceLimit); - } + // Apply limits to the IdList. + match &idl { + IdList::AllIds => { + if !erl.unindexed_allow { + admin_error!( + "filter (exists) is fully unindexed, and not allowed by resource limits" + ); + return Err(OperationError::ResourceLimit); } - IdList::Partial(idl_br) => { - if !idl_br.below_threshold(erl.search_max_filter_test) { - admin_error!("filter (exists) is partial indexed and greater than search_max_filter_test allowed by resource limits"); - return Err(OperationError::ResourceLimit); - } - } - IdList::PartialThreshold(_) => { - // Since we opted for this, this is not the fault - // of the user and we should not penalise them. - } - IdList::Indexed(_) => {} } - - // Now, check the idl -- if it's fully resolved, we can skip this because the query - // was fully indexed. - match &idl { - IdList::Indexed(idl) => Ok(!idl.is_empty()), - _ => { - let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { - admin_error!(?e, "get_identry failed"); - e - })?; - - // if not 100% resolved query, apply the filter test. - let entries_filtered: Vec<_> = - spanned!("be::exists -> entry_match_no_index", { - entries - .into_iter() - .filter(|e| e.entry_match_no_index(filt)) - .collect() - }); - - Ok(!entries_filtered.is_empty()) + IdList::Partial(idl_br) => { + if !idl_br.below_threshold(erl.search_max_filter_test) { + admin_error!("filter (exists) is partial indexed and greater than search_max_filter_test allowed by resource limits"); + return Err(OperationError::ResourceLimit); } - } // end match idl - }) // end spanned + } + IdList::PartialThreshold(_) => { + // Since we opted for this, this is not the fault + // of the user and we should not penalise them. + } + IdList::Indexed(_) => {} + } + + // Now, check the idl -- if it's fully resolved, we can skip this because the query + // was fully indexed. + match &idl { + IdList::Indexed(idl) => Ok(!idl.is_empty()), + _ => { + let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { + admin_error!(?e, "get_identry failed"); + e + })?; + + // if not 100% resolved query, apply the filter test. + let entries_filtered: Vec<_> = + trace_span!("be::exists").in_scope(|| { + entries + .into_iter() + .filter(|e| e.entry_match_no_index(filt)) + .collect() + }); + + Ok(!entries_filtered.is_empty()) + } + } // end match idl } fn verify(&self) -> Vec> { @@ -878,6 +860,7 @@ pub trait BackendTransaction { impl<'a> BackendTransaction for BackendReadTransaction<'a> { type IdlLayerType = IdlArcSqliteReadTransaction<'a>; + type RuvType = ReplicationUpdateVectorReadTransaction<'a>; #[allow(clippy::mut_from_ref)] fn get_idlayer(&self) -> &mut IdlArcSqliteReadTransaction<'a> { @@ -895,8 +878,6 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> { unsafe { &mut (*self.idlayer.get()) } } - type RuvType = ReplicationUpdateVectorReadTransaction<'a>; - #[allow(clippy::mut_from_ref)] fn get_ruv(&self) -> &mut ReplicationUpdateVectorReadTransaction<'a> { unsafe { &mut (*self.ruv.get()) } @@ -930,14 +911,13 @@ impl<'a> BackendReadTransaction<'a> { impl<'a> BackendTransaction for BackendWriteTransaction<'a> { type IdlLayerType = IdlArcSqliteWriteTransaction<'a>; + type RuvType = ReplicationUpdateVectorWriteTransaction<'a>; #[allow(clippy::mut_from_ref)] fn get_idlayer(&self) -> &mut IdlArcSqliteWriteTransaction<'a> { unsafe { &mut (*self.idlayer.get()) } } - type RuvType = ReplicationUpdateVectorWriteTransaction<'a>; - #[allow(clippy::mut_from_ref)] fn get_ruv(&self) -> &mut ReplicationUpdateVectorWriteTransaction<'a> { unsafe { &mut (*self.ruv.get()) } @@ -949,181 +929,179 @@ impl<'a> BackendTransaction for BackendWriteTransaction<'a> { } impl<'a> BackendWriteTransaction<'a> { + #[instrument(level = "debug", name = "be::create", skip_all)] pub fn create( &self, cid: &Cid, entries: Vec>, ) -> Result>, OperationError> { - spanned!("be::create", { - if entries.is_empty() { - admin_error!("No entries provided to BE to create, invalid server call!"); - return Err(OperationError::EmptyRequest); + if entries.is_empty() { + admin_error!("No entries provided to BE to create, invalid server call!"); + return Err(OperationError::EmptyRequest); + } + + // Check that every entry has a change associated + // that matches the cid? + entries.iter().try_for_each(|e| { + if e.get_changelog().contains_tail_cid(cid) { + Ok(()) + } else { + admin_error!( + "Entry changelog does not contain a change related to this transaction" + ); + Err(OperationError::ReplEntryNotChanged) } + })?; - // Check that every entry has a change associated - // that matches the cid? - entries.iter().try_for_each(|e| { - if e.get_changelog().contains_tail_cid(cid) { - Ok(()) - } else { - admin_error!( - "Entry changelog does not contain a change related to this transaction" - ); - Err(OperationError::ReplEntryNotChanged) - } - })?; + let idlayer = self.get_idlayer(); + // Now, assign id's to all the new entries. - let idlayer = self.get_idlayer(); - // Now, assign id's to all the new entries. + let mut id_max = idlayer.get_id2entry_max_id()?; + let c_entries: Vec<_> = entries + .into_iter() + .map(|e| { + id_max += 1; + e.into_sealed_committed_id(id_max) + }) + .collect(); - let mut id_max = idlayer.get_id2entry_max_id()?; - let c_entries: Vec<_> = entries - .into_iter() - .map(|e| { - id_max += 1; - e.into_sealed_committed_id(id_max) - }) - .collect(); + // All good, lets update the RUV. + // This auto compresses. + let ruv_idl = IDLBitRange::from_iter(c_entries.iter().map(|e| e.get_id())); - // All good, lets update the RUV. - // This auto compresses. - let ruv_idl = IDLBitRange::from_iter(c_entries.iter().map(|e| e.get_id())); + self.get_ruv().insert_change(cid, ruv_idl)?; - self.get_ruv().insert_change(cid, ruv_idl)?; + idlayer.write_identries(c_entries.iter())?; - idlayer.write_identries(c_entries.iter())?; + idlayer.set_id2entry_max_id(id_max); - idlayer.set_id2entry_max_id(id_max); + // Now update the indexes as required. + for e in c_entries.iter() { + self.entry_index(None, Some(e))? + } - // Now update the indexes as required. - for e in c_entries.iter() { - self.entry_index(None, Some(e))? - } - - Ok(c_entries) - }) + Ok(c_entries) } + #[instrument(level = "debug", name = "be::modify", skip_all)] pub fn modify( &self, cid: &Cid, pre_entries: &[Arc], post_entries: &[EntrySealedCommitted], ) -> Result<(), OperationError> { - spanned!("be::modify", { - if post_entries.is_empty() || pre_entries.is_empty() { - admin_error!("No entries provided to BE to modify, invalid server call!"); - return Err(OperationError::EmptyRequest); + if post_entries.is_empty() || pre_entries.is_empty() { + admin_error!("No entries provided to BE to modify, invalid server call!"); + return Err(OperationError::EmptyRequest); + } + + assert!(post_entries.len() == pre_entries.len()); + + post_entries.iter().try_for_each(|e| { + if e.get_changelog().contains_tail_cid(cid) { + Ok(()) + } else { + admin_error!( + "Entry changelog does not contain a change related to this transaction" + ); + Err(OperationError::ReplEntryNotChanged) } + })?; - assert!(post_entries.len() == pre_entries.len()); + // All good, lets update the RUV. + // This auto compresses. + let ruv_idl = IDLBitRange::from_iter(post_entries.iter().map(|e| e.get_id())); + self.get_ruv().insert_change(cid, ruv_idl)?; - post_entries.iter().try_for_each(|e| { - if e.get_changelog().contains_tail_cid(cid) { - Ok(()) - } else { - admin_error!( - "Entry changelog does not contain a change related to this transaction" - ); - Err(OperationError::ReplEntryNotChanged) - } - })?; + // Now, given the list of id's, update them + self.get_idlayer().write_identries(post_entries.iter())?; - // All good, lets update the RUV. - // This auto compresses. - let ruv_idl = IDLBitRange::from_iter(post_entries.iter().map(|e| e.get_id())); - self.get_ruv().insert_change(cid, ruv_idl)?; - - // Now, given the list of id's, update them - self.get_idlayer().write_identries(post_entries.iter())?; - - // Finally, we now reindex all the changed entries. We do this by iterating and zipping - // over the set, because we know the list is in the same order. - pre_entries - .iter() - .zip(post_entries.iter()) - .try_for_each(|(pre, post)| self.entry_index(Some(pre.as_ref()), Some(post))) - }) + // Finally, we now reindex all the changed entries. We do this by iterating and zipping + // over the set, because we know the list is in the same order. + pre_entries + .iter() + .zip(post_entries.iter()) + .try_for_each(|(pre, post)| self.entry_index(Some(pre.as_ref()), Some(post))) } + #[instrument(level = "debug", name = "be::reap_tombstones", skip_all)] pub fn reap_tombstones(&self, cid: &Cid) -> Result { - spanned!("be::reap_tombstones", { - // We plan to clear the RUV up to this cid. So we need to build an IDL - // of all the entries we need to examine. - let idl = self.get_ruv().trim_up_to(cid).map_err(|e| { - admin_error!(?e, "failed to trim RUV to {:?}", cid); + // We plan to clear the RUV up to this cid. So we need to build an IDL + // of all the entries we need to examine. + let idl = self.get_ruv().trim_up_to(cid).map_err(|e| { + admin_error!(?e, "failed to trim RUV to {:?}", cid); + e + })?; + + let entries = self + .get_idlayer() + .get_identry(&IdList::Indexed(idl)) + .map_err(|e| { + admin_error!(?e, "get_identry failed"); e })?; - let entries = self - .get_idlayer() - .get_identry(&IdList::Indexed(idl)) - .map_err(|e| { - admin_error!(?e, "get_identry failed"); - e - })?; + if entries.is_empty() { + admin_info!("No entries affected - reap_tombstones operation success"); + return Ok(0); + } - if entries.is_empty() { - admin_info!("No entries affected - reap_tombstones operation success"); - return Ok(0); - } + // Now that we have a list of entries we need to partition them into + // two sets. The entries that are tombstoned and ready to reap_tombstones, and + // the entries that need to have their change logs trimmed. - // Now that we have a list of entries we need to partition them into - // two sets. The entries that are tombstoned and ready to reap_tombstones, and - // the entries that need to have their change logs trimmed. + // First we trim changelogs. Go through each entry, and trim the CL, and write it back. + let mut entries: Vec<_> = entries.iter().map(|er| er.as_ref().clone()).collect(); - // First we trim changelogs. Go through each entry, and trim the CL, and write it back. - let mut entries: Vec<_> = entries.iter().map(|er| er.as_ref().clone()).collect(); + entries + .iter_mut() + .try_for_each(|e| e.get_changelog_mut().trim_up_to(cid))?; - entries - .iter_mut() - .try_for_each(|e| e.get_changelog_mut().trim_up_to(cid))?; + // Write down the cl trims + self.get_idlayer().write_identries(entries.iter())?; - // Write down the cl trims - self.get_idlayer().write_identries(entries.iter())?; + let (tombstones, leftover): (Vec<_>, Vec<_>) = entries + .into_iter() + .partition(|e| e.get_changelog().can_delete()); - let (tombstones, leftover): (Vec<_>, Vec<_>) = entries - .into_iter() - .partition(|e| e.get_changelog().can_delete()); + // Assert that anything leftover still either is *alive* OR is a tombstone + // and has entries in the RUV! + let ruv_idls = self.get_ruv().ruv_idls(); - // Assert that anything leftover still either is *alive* OR is a tombstone - // and has entries in the RUV! - let ruv_idls = self.get_ruv().ruv_idls(); + if !leftover + .iter() + .all(|e| e.get_changelog().is_live() || ruv_idls.contains(e.get_id())) + { + admin_error!("Left over entries may be orphaned due to missing RUV entries"); + return Err(OperationError::ReplInvalidRUVState); + } - if !leftover - .iter() - .all(|e| e.get_changelog().is_live() || ruv_idls.contains(e.get_id())) - { - admin_error!("Left over entries may be orphaned due to missing RUV entries"); - return Err(OperationError::ReplInvalidRUVState); - } + // Now setup to reap_tombstones the tombstones. Remember, in the post cleanup, it's could + // now have been trimmed to a point we can purge them! - // Now setup to reap_tombstones the tombstones. Remember, in the post cleanup, it's could - // now have been trimmed to a point we can purge them! + // Assert the id's exist on the entry. + let id_list: IDLBitRange = tombstones.iter().map(|e| e.get_id()).collect(); - // Assert the id's exist on the entry. - let id_list: IDLBitRange = tombstones.iter().map(|e| e.get_id()).collect(); + // Ensure nothing here exists in the RUV index, else it means + // we didn't trim properly, or some other state violation has occured. + if !((&ruv_idls & &id_list).is_empty()) { + admin_error!("RUV still contains entries that are going to be removed."); + return Err(OperationError::ReplInvalidRUVState); + } - // Ensure nothing here exists in the RUV index, else it means - // we didn't trim properly, or some other state violation has occured. - if !((&ruv_idls & &id_list).is_empty()) { - admin_error!("RUV still contains entries that are going to be removed."); - return Err(OperationError::ReplInvalidRUVState); - } + // Now, given the list of id's, reap_tombstones them. + let sz = id_list.len(); + self.get_idlayer().delete_identry(id_list.into_iter())?; - // Now, given the list of id's, reap_tombstones them. - let sz = id_list.len(); - self.get_idlayer().delete_identry(id_list.into_iter())?; + // Finally, purge the indexes from the entries we removed. + tombstones + .iter() + .try_for_each(|e| self.entry_index(Some(e), None))?; - // Finally, purge the indexes from the entries we removed. - tombstones - .iter() - .try_for_each(|e| self.entry_index(Some(e), None))?; - - Ok(sz) - }) + Ok(sz) } + #[instrument(level = "debug", name = "be::update_idxmeta", skip_all)] pub fn update_idxmeta(&mut self, idxkeys: Vec) -> Result<(), OperationError> { if self.is_idx_slopeyness_generated()? { trace!("Indexing slopes available"); @@ -1522,25 +1500,24 @@ impl<'a> BackendWriteTransaction<'a> { } } + #[instrument(level = "debug", name = "be::ruv_rebuild", skip_all)] pub fn ruv_rebuild(&mut self) -> Result<(), OperationError> { // Rebuild the ruv! - spanned!("server::ruv_rebuild", { - // For now this has to read from all the entries in the DB, but in the future - // we'll actually store this properly (?). If it turns out this is really fast - // we may just rebuild this always on startup. + // For now this has to read from all the entries in the DB, but in the future + // we'll actually store this properly (?). If it turns out this is really fast + // we may just rebuild this always on startup. - // NOTE: An important detail is that we don't rely on indexes here! + // NOTE: An important detail is that we don't rely on indexes here! - let idl = IdList::AllIds; - let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { - admin_error!(?e, "get_identry failed"); - e - })?; + let idl = IdList::AllIds; + let entries = self.get_idlayer().get_identry(&idl).map_err(|e| { + admin_error!(?e, "get_identry failed"); + e + })?; - self.get_ruv().rebuild(&entries)?; + self.get_ruv().rebuild(&entries)?; - Ok(()) - }) + Ok(()) } pub fn commit(self) -> Result<(), OperationError> { @@ -1639,6 +1616,7 @@ fn get_idx_slope_default(ikey: &IdxKey) -> IdxSlope { // In the future this will do the routing between the chosen backends etc. impl Backend { + #[instrument(level = "debug", name = "be::new", skip_all)] pub fn new( mut cfg: BackendConfig, // path: &str, @@ -1675,40 +1653,38 @@ impl Backend { let ruv = Arc::new(ReplicationUpdateVector::default()); // this has a ::memory() type, but will path == "" work? - spanned!("be::new", { - let idlayer = Arc::new(IdlArcSqlite::new(&cfg, vacuum)?); - let be = Backend { - cfg, - idlayer, - ruv, - idxmeta: Arc::new(CowCell::new(IdxMeta::new(idxkeys))), - }; + let idlayer = Arc::new(IdlArcSqlite::new(&cfg, vacuum)?); + let be = Backend { + cfg, + idlayer, + ruv, + idxmeta: Arc::new(CowCell::new(IdxMeta::new(idxkeys))), + }; - // Now complete our setup with a txn - // In this case we can use an empty idx meta because we don't - // access any parts of - // the indexing subsystem here. - let mut idl_write = be.idlayer.write(); - idl_write - .setup() - .and_then(|_| idl_write.commit()) - .map_err(|e| { - admin_error!(?e, "Failed to setup idlayer"); - e - })?; + // Now complete our setup with a txn + // In this case we can use an empty idx meta because we don't + // access any parts of + // the indexing subsystem here. + let mut idl_write = be.idlayer.write(); + idl_write + .setup() + .and_then(|_| idl_write.commit()) + .map_err(|e| { + admin_error!(?e, "Failed to setup idlayer"); + e + })?; - // Now rebuild the ruv. - let mut be_write = be.write(); - be_write - .ruv_rebuild() - .and_then(|_| be_write.commit()) - .map_err(|e| { - admin_error!(?e, "Failed to reload ruv"); - e - })?; + // Now rebuild the ruv. + let mut be_write = be.write(); + be_write + .ruv_rebuild() + .and_then(|_| be_write.commit()) + .map_err(|e| { + admin_error!(?e, "Failed to reload ruv"); + e + })?; - Ok(be) - }) + Ok(be) } pub fn get_pool_size(&self) -> u32 { @@ -1762,22 +1738,23 @@ impl Backend { #[cfg(test)] mod tests { - use idlset::v2::IDLBitRange; use std::fs; use std::iter::FromIterator; use std::sync::Arc; + use std::time::Duration; + + use idlset::v2::IDLBitRange; use uuid::Uuid; use super::super::entry::{Entry, EntryInit, EntryNew}; use super::{ - Backend, BackendConfig, BackendTransaction, BackendWriteTransaction, IdList, OperationError, + Backend, BackendConfig, BackendTransaction, BackendWriteTransaction, DbBackup, IdList, + IdxKey, OperationError, }; - use super::{DbBackup, IdxKey}; use crate::identity::Limits; use crate::prelude::*; use crate::repl::cid::Cid; use crate::value::{IndexType, PartialValue, Value}; - use std::time::Duration; lazy_static! { static ref CID_ZERO: Cid = unsafe { Cid::new_zero() }; diff --git a/kanidmd/idm/src/config.rs b/kanidmd/idm/src/config.rs index c52035727..99c02e1d6 100644 --- a/kanidmd/idm/src/config.rs +++ b/kanidmd/idm/src/config.rs @@ -4,11 +4,12 @@ //! These components should be "per server". Any "per domain" config should be in the system //! or domain entries that are able to be replicated. +use std::fmt; +use std::str::FromStr; + use kanidm_proto::messages::ConsoleOutputMode; use rand::prelude::*; use serde::{Deserialize, Serialize}; -use std::fmt; -use std::str::FromStr; #[derive(Serialize, Deserialize, Debug)] pub struct IntegrationTestConfig { diff --git a/kanidmd/idm/src/credential/mod.rs b/kanidmd/idm/src/credential/mod.rs index 386feab81..b84d45425 100644 --- a/kanidmd/idm/src/credential/mod.rs +++ b/kanidmd/idm/src/credential/mod.rs @@ -1,20 +1,17 @@ -use crate::be::dbvalue::DbBackupCodeV1; -use crate::be::dbvalue::{DbCred, DbPasswordV1}; -use hashbrown::HashMap as Map; -use hashbrown::HashSet; +use std::convert::TryFrom; +use std::time::{Duration, Instant}; + +use hashbrown::{HashMap as Map, HashSet}; use kanidm_proto::v1::{BackupCodesView, CredentialDetail, CredentialDetailType, OperationError}; use openssl::hash::MessageDigest; use openssl::pkcs5::pbkdf2_hmac; use openssl::sha::Sha512; use rand::prelude::*; -use std::convert::TryFrom; -use std::time::{Duration, Instant}; use uuid::Uuid; - -use webauthn_rs_core::proto::Credential as WebauthnCredential; -use webauthn_rs_core::proto::CredentialV3; - use webauthn_rs::prelude::{AuthenticationResult, Passkey, SecurityKey}; +use webauthn_rs_core::proto::{Credential as WebauthnCredential, CredentialV3}; + +use crate::be::dbvalue::{DbBackupCodeV1, DbCred, DbPasswordV1}; pub mod policy; pub mod softlock; @@ -234,12 +231,15 @@ impl BackupCodes { pub fn new(code_set: HashSet) -> Self { BackupCodes { code_set } } + pub fn verify(&self, code_chal: &str) -> bool { self.code_set.contains(code_chal) } + pub fn remove(&mut self, code_chal: &str) -> bool { self.code_set.remove(code_chal) } + pub fn to_dbbackupcodev1(&self) -> DbBackupCodeV1 { DbBackupCodeV1 { code_set: self.code_set.clone(), @@ -892,9 +892,10 @@ impl CredentialType { #[cfg(test)] mod tests { + use std::convert::TryFrom; + use crate::credential::policy::CryptoPolicy; use crate::credential::*; - use std::convert::TryFrom; #[test] fn test_credential_simple() { diff --git a/kanidmd/idm/src/credential/policy.rs b/kanidmd/idm/src/credential/policy.rs index 51bfa58fc..c0d34f00e 100644 --- a/kanidmd/idm/src/credential/policy.rs +++ b/kanidmd/idm/src/credential/policy.rs @@ -1,6 +1,7 @@ -use super::Password; use std::time::Duration; +use super::Password; + const PBKDF2_MIN_NIST_COST: u64 = 10000; #[derive(Debug)] diff --git a/kanidmd/idm/src/credential/totp.rs b/kanidmd/idm/src/credential/totp.rs index f25f02997..e42d2b50a 100644 --- a/kanidmd/idm/src/credential/totp.rs +++ b/kanidmd/idm/src/credential/totp.rs @@ -1,14 +1,13 @@ -use crate::be::dbvalue::{DbTotpAlgoV1, DbTotpV1}; +use std::convert::{TryFrom, TryInto}; +use std::time::{Duration, SystemTime}; + +use kanidm_proto::v1::{TotpAlgo as ProtoTotpAlgo, TotpSecret as ProtoTotp}; use openssl::hash::MessageDigest; use openssl::pkey::PKey; use openssl::sign::Signer; use rand::prelude::*; -use std::convert::TryFrom; -use std::convert::TryInto; -use std::time::{Duration, SystemTime}; -use kanidm_proto::v1::TotpAlgo as ProtoTotpAlgo; -use kanidm_proto::v1::TotpSecret as ProtoTotp; +use crate::be::dbvalue::{DbTotpAlgoV1, DbTotpV1}; // This is 64 bits of entropy, as the examples in https://tools.ietf.org/html/rfc6238 show. const SECRET_SIZE_BYTES: usize = 8; @@ -192,9 +191,10 @@ impl Totp { #[cfg(test)] mod tests { - use crate::credential::totp::{Totp, TotpAlgo, TotpError, TOTP_DEFAULT_STEP}; use std::time::Duration; + use crate::credential::totp::{Totp, TotpAlgo, TotpError, TOTP_DEFAULT_STEP}; + #[test] fn hotp_basic() { let otp_sha1 = Totp::new(vec![0], 30, TotpAlgo::Sha1); diff --git a/kanidmd/idm/src/crypto.rs b/kanidmd/idm/src/crypto.rs index 419d472af..5bd96da7f 100644 --- a/kanidmd/idm/src/crypto.rs +++ b/kanidmd/idm/src/crypto.rs @@ -1,10 +1,11 @@ //! This module contains cryptographic setup code, a long with what policy //! and ciphers we accept. -use crate::config::Configuration; use openssl::error::ErrorStack; use openssl::ssl::{SslAcceptor, SslAcceptorBuilder, SslFiletype, SslMethod}; +use crate::config::Configuration; + /// From the server configuration, generate an OpenSSL acceptor that we can use /// to build our sockets for https/ldaps. pub fn setup_tls(config: &Configuration) -> Result, ErrorStack> { diff --git a/kanidmd/idm/src/entry.rs b/kanidmd/idm/src/entry.rs index 2c326d036..d6a5243d2 100644 --- a/kanidmd/idm/src/entry.rs +++ b/kanidmd/idm/src/entry.rs @@ -24,6 +24,26 @@ //! [`filter`]: ../filter/index.html //! [`schema`]: ../schema/index.html +use std::cmp::Ordering; +pub use std::collections::BTreeSet as Set; +use std::collections::{BTreeMap as Map, BTreeMap, BTreeSet}; +use std::sync::Arc; + +use compact_jwt::JwsSigner; +use hashbrown::HashMap; +use kanidm_proto::v1::{ + ConsistencyError, Entry as ProtoEntry, Filter as ProtoFilter, OperationError, SchemaError, +}; +use ldap3_proto::simple::{LdapPartialAttribute, LdapSearchResultEntry}; +use smartstring::alias::String as AttrString; +use time::OffsetDateTime; +use tracing::trace; +use uuid::Uuid; +use webauthn_rs::prelude::{DeviceKey as DeviceKeyV4, Passkey as PasskeyV4}; + +use crate::be::dbentry::{DbEntry, DbEntryV2, DbEntryVers}; +use crate::be::dbvalue::DbValueSetV2; +use crate::be::{IdxKey, IdxSlope}; use crate::credential::Credential; use crate::filter::{Filter, FilterInvalid, FilterResolved, FilterValidResolved}; use crate::ldap::ldap_vattr_map; @@ -32,33 +52,8 @@ use crate::prelude::*; use crate::repl::cid::Cid; use crate::repl::entry::EntryChangelog; use crate::schema::{SchemaAttribute, SchemaClass, SchemaTransaction}; -use crate::value::{IndexType, SyntaxType}; -use crate::value::{IntentTokenState, PartialValue, Session, Value}; +use crate::value::{IndexType, IntentTokenState, PartialValue, Session, SyntaxType, Value}; use crate::valueset::{self, ValueSet}; -use kanidm_proto::v1::Entry as ProtoEntry; -use kanidm_proto::v1::Filter as ProtoFilter; -use kanidm_proto::v1::{ConsistencyError, OperationError, SchemaError}; -use tracing::trace; - -use crate::be::dbentry::{DbEntry, DbEntryV2, DbEntryVers}; -use crate::be::dbvalue::DbValueSetV2; -use crate::be::{IdxKey, IdxSlope}; - -use compact_jwt::JwsSigner; -use hashbrown::HashMap; -use ldap3_proto::simple::{LdapPartialAttribute, LdapSearchResultEntry}; -use smartstring::alias::String as AttrString; -use std::cmp::Ordering; -use std::collections::BTreeMap as Map; -pub use std::collections::BTreeSet as Set; -use std::collections::BTreeSet; -use std::sync::Arc; -use time::OffsetDateTime; -use uuid::Uuid; - -use std::collections::BTreeMap; -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; // use std::convert::TryFrom; // use std::str::FromStr; @@ -222,7 +217,6 @@ pub(crate) fn compare_attrs(left: &Eattrs, right: &Eattrs) -> bool { /// [`schema`]: ../schema/index.html /// [`access`]: ../access/index.html /// [`event`]: ../event/index.html -/// pub struct Entry { valid: VALID, state: STATE, @@ -323,29 +317,28 @@ impl Entry { /// Given a proto entry in JSON formed as a serialised string, processed that string /// into an Entry. + #[instrument(level = "debug", skip_all)] pub fn from_proto_entry_str( es: &str, qs: &QueryServerWriteTransaction, ) -> Result { - spanned!("from_proto_entry_str", { - if cfg!(test) { - if es.len() > 256 { - let (dsp_es, _) = es.split_at(255); - trace!("Parsing -> {}...", dsp_es); - } else { - trace!("Parsing -> {}", es); - } + if cfg!(test) { + if es.len() > 256 { + let (dsp_es, _) = es.split_at(255); + trace!("Parsing -> {}...", dsp_es); + } else { + trace!("Parsing -> {}", es); } - // str -> Proto entry - let pe: ProtoEntry = serde_json::from_str(es).map_err(|e| { - // We probably shouldn't print ES here because that would allow users - // to inject content into our logs :) - admin_error!(?e, "SerdeJson Failure"); - OperationError::SerdeJsonError - })?; - // now call from_proto_entry - Self::from_proto_entry(&pe, qs) - }) + } + // str -> Proto entry + let pe: ProtoEntry = serde_json::from_str(es).map_err(|e| { + // We probably shouldn't print ES here because that would allow users + // to inject content into our logs :) + admin_error!(?e, "SerdeJson Failure"); + OperationError::SerdeJsonError + })?; + // now call from_proto_entry + Self::from_proto_entry(&pe, qs) } #[cfg(test)] @@ -2471,13 +2464,15 @@ impl From<&SchemaClass> for Entry { #[cfg(test)] mod tests { + use std::collections::BTreeSet as Set; + + use hashbrown::HashMap; + use smartstring::alias::String as AttrString; + use crate::be::{IdxKey, IdxSlope}; use crate::entry::{Entry, EntryInit, EntryInvalid, EntryNew}; use crate::modify::{Modify, ModifyList}; use crate::value::{IndexType, PartialValue, Value}; - use hashbrown::HashMap; - use smartstring::alias::String as AttrString; - use std::collections::BTreeSet as Set; #[test] fn test_entry_basic() { diff --git a/kanidmd/idm/src/event.rs b/kanidmd/idm/src/event.rs index 3ad205d8d..c059e3618 100644 --- a/kanidmd/idm/src/event.rs +++ b/kanidmd/idm/src/event.rs @@ -15,6 +15,21 @@ //! with the operation, and a clear path to know how to transform events between //! various types. +use std::collections::BTreeSet; +#[cfg(test)] +use std::sync::Arc; +use std::time::Duration; + +use kanidm_proto::v1::{ + AuthCredential, AuthMech, AuthRequest, AuthStep, CreateRequest, DeleteRequest, + Entry as ProtoEntry, ModifyList as ProtoModifyList, ModifyRequest, OperationError, + SearchRequest, SearchResponse, WhoamiResponse, +}; +use ldap3_proto::simple::LdapFilter; +use uuid::Uuid; +#[cfg(test)] +use webauthn_rs::prelude::PublicKeyCredential; + use crate::entry::{Entry, EntryCommitted, EntryInit, EntryNew, EntryReduced}; use crate::filter::{Filter, FilterInvalid, FilterValid}; use crate::identity::Limits; @@ -23,24 +38,6 @@ use crate::modify::{ModifyInvalid, ModifyList, ModifyValid}; use crate::prelude::*; use crate::schema::SchemaTransaction; use crate::value::PartialValue; -use kanidm_proto::v1::Entry as ProtoEntry; -use kanidm_proto::v1::ModifyList as ProtoModifyList; -use kanidm_proto::v1::OperationError; -use kanidm_proto::v1::{ - AuthCredential, AuthMech, AuthRequest, AuthStep, CreateRequest, DeleteRequest, ModifyRequest, - SearchRequest, SearchResponse, WhoamiResponse, -}; - -use ldap3_proto::simple::LdapFilter; -use std::collections::BTreeSet; -use std::time::Duration; -use uuid::Uuid; - -#[cfg(test)] -use std::sync::Arc; - -#[cfg(test)] -use webauthn_rs::prelude::PublicKeyCredential; #[derive(Debug)] pub struct SearchResult { diff --git a/kanidmd/idm/src/filter.rs b/kanidmd/idm/src/filter.rs index 85eafbd92..68b4d3961 100644 --- a/kanidmd/idm/src/filter.rs +++ b/kanidmd/idm/src/filter.rs @@ -8,28 +8,28 @@ //! [`Filter`]: struct.Filter.html //! [`Entry`]: ../entry/struct.Entry.html +use std::cmp::{Ordering, PartialOrd}; +use std::collections::BTreeSet; +use std::hash::Hash; +use std::iter; +use std::num::NonZeroU8; + +use concread::arcache::ARCacheReadTxn; +use hashbrown::HashMap; +#[cfg(test)] +use hashbrown::HashSet; +use kanidm_proto::v1::{Filter as ProtoFilter, OperationError, SchemaError}; +use ldap3_proto::proto::{LdapFilter, LdapSubstringFilter}; +// use smartstring::alias::String as AttrString; +use serde::Deserialize; +use uuid::Uuid; + use crate::be::{IdxKey, IdxKeyRef, IdxKeyToRef, IdxMeta, IdxSlope}; use crate::identity::IdentityId; use crate::ldap::ldap_attr_filter_map; use crate::prelude::*; use crate::schema::SchemaTransaction; use crate::value::{IndexType, PartialValue}; -use concread::arcache::ARCacheReadTxn; -use kanidm_proto::v1::Filter as ProtoFilter; -use kanidm_proto::v1::{OperationError, SchemaError}; -use ldap3_proto::proto::{LdapFilter, LdapSubstringFilter}; -// use smartstring::alias::String as AttrString; -use serde::Deserialize; -use std::cmp::{Ordering, PartialOrd}; -use std::collections::BTreeSet; -use std::hash::Hash; -use std::iter; -use std::num::NonZeroU8; -use uuid::Uuid; - -use hashbrown::HashMap; -#[cfg(test)] -use hashbrown::HashSet; const FILTER_DEPTH_MAX: usize = 16; @@ -491,51 +491,48 @@ impl Filter { // This has to have two versions to account for ro/rw traits, because RS can't // monomorphise on the trait to call clone_value. An option is to make a fn that // takes "clone_value(t, a, v) instead, but that may have a similar issue. + #[instrument(level = "debug", skip_all)] pub fn from_ro( ev: &Identity, f: &ProtoFilter, qs: &QueryServerReadTransaction, ) -> Result { - spanned!("filer::from_ro", { - let depth = FILTER_DEPTH_MAX; - let mut elems = ev.limits.filter_max_elements; - Ok(Filter { - state: FilterInvalid { - inner: FilterComp::from_ro(f, qs, depth, &mut elems)?, - }, - }) + let depth = FILTER_DEPTH_MAX; + let mut elems = ev.limits.filter_max_elements; + Ok(Filter { + state: FilterInvalid { + inner: FilterComp::from_ro(f, qs, depth, &mut elems)?, + }, }) } + #[instrument(level = "debug", skip_all)] pub fn from_rw( ev: &Identity, f: &ProtoFilter, qs: &QueryServerWriteTransaction, ) -> Result { - spanned!("filter::from_rw", { - let depth = FILTER_DEPTH_MAX; - let mut elems = ev.limits.filter_max_elements; - Ok(Filter { - state: FilterInvalid { - inner: FilterComp::from_rw(f, qs, depth, &mut elems)?, - }, - }) + let depth = FILTER_DEPTH_MAX; + let mut elems = ev.limits.filter_max_elements; + Ok(Filter { + state: FilterInvalid { + inner: FilterComp::from_rw(f, qs, depth, &mut elems)?, + }, }) } + #[instrument(level = "debug", skip_all)] pub fn from_ldap_ro( ev: &Identity, f: &LdapFilter, qs: &QueryServerReadTransaction, ) -> Result { - spanned!("filter::from_ldap_ro", { - let depth = FILTER_DEPTH_MAX; - let mut elems = ev.limits.filter_max_elements; - Ok(Filter { - state: FilterInvalid { - inner: FilterComp::from_ldap_ro(f, qs, depth, &mut elems)?, - }, - }) + let depth = FILTER_DEPTH_MAX; + let mut elems = ev.limits.filter_max_elements; + Ok(Filter { + state: FilterInvalid { + inner: FilterComp::from_ldap_ro(f, qs, depth, &mut elems)?, + }, }) } } @@ -948,7 +945,6 @@ impl PartialOrd for FilterResolved { impl Ord for FilterResolved { /// Ordering of filters for optimisation and subsequent dead term elimination. - /// fn cmp(&self, rhs: &FilterResolved) -> Ordering { let left_slopey = self.get_slopeyness_factor(); let right_slopey = rhs.get_slopeyness_factor(); @@ -1330,10 +1326,6 @@ impl FilterResolved { #[cfg(test)] mod tests { - use crate::event::CreateEvent; - use crate::event::DeleteEvent; - use crate::filter::{Filter, FilterInvalid, FILTER_DEPTH_MAX}; - use crate::prelude::*; use std::cmp::{Ordering, PartialOrd}; use std::collections::BTreeSet; use std::time::Duration; @@ -1341,6 +1333,10 @@ mod tests { use kanidm_proto::v1::Filter as ProtoFilter; use ldap3_proto::simple::LdapFilter; + use crate::event::{CreateEvent, DeleteEvent}; + use crate::filter::{Filter, FilterInvalid, FILTER_DEPTH_MAX}; + use crate::prelude::*; + #[test] fn test_filter_simple() { // Test construction. diff --git a/kanidmd/idm/src/identity.rs b/kanidmd/idm/src/identity.rs index 5fed6db8a..8d9098b97 100644 --- a/kanidmd/idm/src/identity.rs +++ b/kanidmd/idm/src/identity.rs @@ -3,12 +3,14 @@ //! and this provides the set of `Limits` to confine how many resources that the //! identity may consume during operations to prevent denial-of-service. -use crate::prelude::*; -use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use std::hash::Hash; use std::sync::Arc; +use serde::{Deserialize, Serialize}; + +use crate::prelude::*; + #[derive(Debug, Clone)] /// Limits on the resources a single event can consume. These are defined per-event /// as they are derived from the userAuthToken based on that individual session diff --git a/kanidmd/idm/src/idm/account.rs b/kanidmd/idm/src/idm/account.rs index 0bc432354..d8084d85e 100644 --- a/kanidmd/idm/src/idm/account.rs +++ b/kanidmd/idm/src/idm/account.rs @@ -1,30 +1,27 @@ -use crate::entry::{Entry, EntryCommitted, EntryReduced, EntrySealed}; -use crate::prelude::*; -use crate::schema::SchemaTransaction; +use std::collections::{BTreeMap, BTreeSet}; +use std::time::Duration; -use kanidm_proto::v1::OperationError; -use kanidm_proto::v1::UiHint; -use kanidm_proto::v1::{AuthType, UserAuthToken}; -use kanidm_proto::v1::{BackupCodesView, CredentialStatus}; - -use webauthn_rs::prelude::CredentialID; -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; +use kanidm_proto::v1::{ + AuthType, BackupCodesView, CredentialStatus, OperationError, UiHint, UserAuthToken, +}; +use time::OffsetDateTime; +use uuid::Uuid; +use webauthn_rs::prelude::{ + AuthenticationResult, CredentialID, DeviceKey as DeviceKeyV4, Passkey as PasskeyV4, +}; use crate::constants::UUID_ANONYMOUS; use crate::credential::policy::CryptoPolicy; -use crate::credential::{softlock::CredSoftLockPolicy, Credential}; +use crate::credential::softlock::CredSoftLockPolicy; +use crate::credential::Credential; +use crate::entry::{Entry, EntryCommitted, EntryReduced, EntrySealed}; use crate::idm::group::Group; use crate::idm::server::IdmServerProxyWriteTransaction; use crate::modify::{ModifyInvalid, ModifyList}; +use crate::prelude::*; +use crate::schema::SchemaTransaction; use crate::value::{IntentTokenState, PartialValue, Value}; -use std::collections::{BTreeMap, BTreeSet}; -use std::time::Duration; -use time::OffsetDateTime; -use uuid::Uuid; -use webauthn_rs::prelude::AuthenticationResult; - lazy_static! { static ref PVCLASS_ACCOUNT: PartialValue = PartialValue::new_class("account"); static ref PVCLASS_POSIXACCOUNT: PartialValue = PartialValue::new_class("posixaccount"); @@ -153,34 +150,31 @@ pub(crate) struct Account { } impl Account { + #[instrument(level = "trace", skip_all)] pub(crate) fn try_from_entry_ro( value: &Entry, qs: &mut QueryServerReadTransaction, ) -> Result { - spanned!("idm::account::try_from_entry_ro", { - let groups = Group::try_from_account_entry_ro(value, qs)?; - try_from_entry!(value, groups) - }) + let groups = Group::try_from_account_entry_ro(value, qs)?; + try_from_entry!(value, groups) } + #[instrument(level = "trace", skip_all)] pub(crate) fn try_from_entry_rw( value: &Entry, qs: &mut QueryServerWriteTransaction, ) -> Result { - spanned!("idm::account::try_from_entry_rw", { - let groups = Group::try_from_account_entry_rw(value, qs)?; - try_from_entry!(value, groups) - }) + let groups = Group::try_from_account_entry_rw(value, qs)?; + try_from_entry!(value, groups) } + #[instrument(level = "trace", skip_all)] pub(crate) fn try_from_entry_reduced( value: &Entry, qs: &mut QueryServerReadTransaction, ) -> Result { - spanned!("idm::account::try_from_entry_reduced", { - let groups = Group::try_from_account_entry_red_ro(value, qs)?; - try_from_entry!(value, groups) - }) + let groups = Group::try_from_account_entry_red_ro(value, qs)?; + try_from_entry!(value, groups) } pub(crate) fn try_from_entry_no_groups( diff --git a/kanidmd/idm/src/idm/authsession.rs b/kanidmd/idm/src/idm/authsession.rs index 915449e12..1b05d63ce 100644 --- a/kanidmd/idm/src/idm/authsession.rs +++ b/kanidmd/idm/src/idm/authsession.rs @@ -2,34 +2,32 @@ //! Generally this has to process an authentication attempt, and validate each //! factor to assert that the user is legitimate. This also contains some //! support code for asynchronous task execution. -use crate::credential::BackupCodes; -use crate::idm::account::Account; -use crate::idm::delayed::BackupCodeRemoval; -use crate::idm::AuthState; -use crate::prelude::*; -use hashbrown::HashSet; -use kanidm_proto::v1::OperationError; -use kanidm_proto::v1::{AuthAllowed, AuthCredential, AuthMech, AuthType}; - -use crate::credential::{totp::Totp, Credential, CredentialType, Password}; - -use crate::idm::delayed::{DelayedAction, PasswordUpgrade, WebauthnCounterIncrement}; -// use crossbeam::channel::Sender; -use tokio::sync::mpsc::UnboundedSender as Sender; - -use std::time::Duration; -use uuid::Uuid; -// use webauthn_rs::proto::Credential as WebauthnCredential; -use compact_jwt::{Jws, JwsSigner}; use std::collections::BTreeMap; pub use std::collections::BTreeSet as Set; use std::convert::TryFrom; +use std::time::Duration; +// use webauthn_rs::proto::Credential as WebauthnCredential; +use compact_jwt::{Jws, JwsSigner}; +use hashbrown::HashSet; +use kanidm_proto::v1::{AuthAllowed, AuthCredential, AuthMech, AuthType, OperationError}; +// use crossbeam::channel::Sender; +use tokio::sync::mpsc::UnboundedSender as Sender; +use uuid::Uuid; +// use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; +use webauthn_rs::prelude::Passkey as PasskeyV4; use webauthn_rs::prelude::{ PasskeyAuthentication, RequestChallengeResponse, SecurityKeyAuthentication, Webauthn, }; -// use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; + +use crate::credential::totp::Totp; +use crate::credential::{BackupCodes, Credential, CredentialType, Password}; +use crate::idm::account::Account; +use crate::idm::delayed::{ + BackupCodeRemoval, DelayedAction, PasswordUpgrade, WebauthnCounterIncrement, +}; +use crate::idm::AuthState; +use crate::prelude::*; // Each CredHandler takes one or more credentials and determines if the // handlers requirements can be 100% fufilled. This is where MFA or other @@ -405,7 +403,9 @@ impl CredHandler { CredState::Denied(BAD_AUTH_TYPE_MSG) } } - } // end CredHandler::PasswordMfa + } + + // end CredHandler::PasswordMfa /// Validate a webauthn authentication attempt pub fn validate_webauthn( @@ -832,6 +832,16 @@ impl AuthSession { #[cfg(test)] mod tests { + pub use std::collections::BTreeSet as Set; + use std::time::Duration; + + use compact_jwt::JwsSigner; + use hashbrown::HashSet; + use kanidm_proto::v1::{AuthAllowed, AuthCredential, AuthMech}; + use tokio::sync::mpsc::unbounded_channel as unbounded; + use webauthn_authenticator_rs::softpasskey::SoftPasskey; + use webauthn_authenticator_rs::WebauthnAuthenticator; + use crate::credential::policy::CryptoPolicy; use crate::credential::totp::{Totp, TOTP_DEFAULT_STEP}; use crate::credential::{BackupCodes, Credential}; @@ -842,17 +852,7 @@ mod tests { use crate::idm::delayed::DelayedAction; use crate::idm::AuthState; use crate::prelude::*; - use hashbrown::HashSet; - pub use std::collections::BTreeSet as Set; - use crate::utils::{duration_from_epoch_now, readable_password_from_random}; - use kanidm_proto::v1::{AuthAllowed, AuthCredential, AuthMech}; - use std::time::Duration; - - use tokio::sync::mpsc::unbounded_channel as unbounded; - use webauthn_authenticator_rs::{softpasskey::SoftPasskey, WebauthnAuthenticator}; - - use compact_jwt::JwsSigner; fn create_pw_badlist_cache() -> HashSet { let mut s = HashSet::new(); diff --git a/kanidmd/idm/src/idm/credupdatesession.rs b/kanidmd/idm/src/idm/credupdatesession.rs index deac3e93c..63a789f48 100644 --- a/kanidmd/idm/src/idm/credupdatesession.rs +++ b/kanidmd/idm/src/idm/credupdatesession.rs @@ -1,36 +1,28 @@ -use crate::access::AccessControlsTransaction; -use crate::credential::{BackupCodes, Credential}; -use crate::idm::account::Account; -use crate::idm::server::IdmServerCredUpdateTransaction; -use crate::idm::server::IdmServerProxyWriteTransaction; -use crate::prelude::*; -use crate::value::IntentTokenState; -use hashbrown::HashSet; +use core::ops::Deref; use std::collections::BTreeMap; +use std::fmt; +use std::sync::{Arc, Mutex}; +use std::time::Duration; -use crate::credential::totp::{Totp, TOTP_DEFAULT_STEP}; - +use hashbrown::HashSet; use kanidm_proto::v1::{ CURegState, CUStatus, CredentialDetail, PasskeyDetail, PasswordFeedback, TotpSecret, }; - -use crate::utils::{backup_code_from_random, readable_password_from_random, uuid_from_duration}; - -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; use webauthn_rs::prelude::{ - CreationChallengeResponse, PasskeyRegistration, RegisterPublicKeyCredential, + CreationChallengeResponse, DeviceKey as DeviceKeyV4, Passkey as PasskeyV4, PasskeyRegistration, + RegisterPublicKeyCredential, }; -use serde::{Deserialize, Serialize}; - -use std::fmt; -use std::sync::Arc; -use std::sync::Mutex; -use std::time::Duration; -use time::OffsetDateTime; - -use core::ops::Deref; +use crate::access::AccessControlsTransaction; +use crate::credential::totp::{Totp, TOTP_DEFAULT_STEP}; +use crate::credential::{BackupCodes, Credential}; +use crate::idm::account::Account; +use crate::idm::server::{IdmServerCredUpdateTransaction, IdmServerProxyWriteTransaction}; +use crate::prelude::*; +use crate::utils::{backup_code_from_random, readable_password_from_random, uuid_from_duration}; +use crate::value::IntentTokenState; const MAXIMUM_CRED_UPDATE_TTL: Duration = Duration::from_secs(900); const MAXIMUM_INTENT_TTL: Duration = Duration::from_secs(86400); @@ -155,7 +147,6 @@ pub struct CredentialUpdateSessionStatus { // The target user's display name displayname: String, // ttl: Duration, - // can_commit: bool, primary: Option, passkeys: Vec, @@ -384,85 +375,85 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { Ok((CredentialUpdateSessionToken { token_enc }, status)) } + #[instrument(level = "debug", skip_all)] pub fn init_credential_update_intent( &mut self, event: &InitCredentialUpdateIntentEvent, ct: Duration, ) -> Result { - spanned!("idm::server::credupdatesession", { - let account = self.validate_init_credential_update(event.target, &event.ident)?; + let account = self.validate_init_credential_update(event.target, &event.ident)?; - // ==== AUTHORISATION CHECKED === + // ==== AUTHORISATION CHECKED === - // Build the intent token. - let mttl = event.max_ttl.unwrap_or_else(|| Duration::new(0, 0)); - let max_ttl = ct + mttl.clamp(MINIMUM_INTENT_TTL, MAXIMUM_INTENT_TTL); - // let sessionid = uuid_from_duration(max_ttl, self.sid); - let intent_id = readable_password_from_random(); + // Build the intent token. + let mttl = event.max_ttl.unwrap_or_else(|| Duration::new(0, 0)); + let max_ttl = ct + mttl.clamp(MINIMUM_INTENT_TTL, MAXIMUM_INTENT_TTL); + // let sessionid = uuid_from_duration(max_ttl, self.sid); + let intent_id = readable_password_from_random(); - /* - let token = CredentialUpdateIntentTokenInner { - sessionid, - target, - intent_id, - max_ttl, - }; + /* + let token = CredentialUpdateIntentTokenInner { + sessionid, + target, + intent_id, + max_ttl, + }; - let token_data = serde_json::to_vec(&token).map_err(|e| { - admin_error!(err = ?e, "Unable to encode token data"); - OperationError::SerdeJsonError + let token_data = serde_json::to_vec(&token).map_err(|e| { + admin_error!(err = ?e, "Unable to encode token data"); + OperationError::SerdeJsonError + })?; + + let token_enc = self + .token_enc_key + .encrypt_at_time(&token_data, ct.as_secs()); + */ + + // Mark that we have created an intent token on the user. + // ⚠️ -- remember, there is a risk, very low, but still a risk of collision of the intent_id. + // instead of enforcing unique, which would divulge that the collision occured, we + // write anyway, and instead on the intent access path we invalidate IF the collision + // occurs. + let mut modlist = ModifyList::new_append( + "credential_update_intent_token", + Value::IntentToken(intent_id.clone(), IntentTokenState::Valid { max_ttl }), + ); + + // Remove any old credential update intents + account + .credential_update_intent_tokens + .iter() + .for_each(|(existing_intent_id, state)| { + let max_ttl = match state { + IntentTokenState::Valid { max_ttl } + | IntentTokenState::InProgress { + max_ttl, + session_id: _, + session_ttl: _, + } + | IntentTokenState::Consumed { max_ttl } => *max_ttl, + }; + + if ct >= max_ttl { + modlist.push_mod(Modify::Removed( + AttrString::from("credential_update_intent_token"), + PartialValue::IntentToken(existing_intent_id.clone()), + )); + } + }); + + self.qs_write + .internal_modify( + // Filter as executed + &filter!(f_eq("uuid", PartialValue::new_uuid(account.uuid))), + &modlist, + ) + .map_err(|e| { + request_error!(error = ?e); + e })?; - let token_enc = self - .token_enc_key - .encrypt_at_time(&token_data, ct.as_secs()); - */ - - // Mark that we have created an intent token on the user. - // ⚠️ -- remember, there is a risk, very low, but still a risk of collision of the intent_id. - // instead of enforcing unique, which would divulge that the collision occured, we - // write anyway, and instead on the intent access path we invalidate IF the collision - // occurs. - let mut modlist = ModifyList::new_append( - "credential_update_intent_token", - Value::IntentToken(intent_id.clone(), IntentTokenState::Valid { max_ttl }), - ); - - // Remove any old credential update intents - account.credential_update_intent_tokens.iter().for_each( - |(existing_intent_id, state)| { - let max_ttl = match state { - IntentTokenState::Valid { max_ttl } - | IntentTokenState::InProgress { - max_ttl, - session_id: _, - session_ttl: _, - } - | IntentTokenState::Consumed { max_ttl } => *max_ttl, - }; - - if ct >= max_ttl { - modlist.push_mod(Modify::Removed( - AttrString::from("credential_update_intent_token"), - PartialValue::IntentToken(existing_intent_id.clone()), - )); - } - }, - ); - - self.qs_write - .internal_modify( - // Filter as executed - &filter!(f_eq("uuid", PartialValue::new_uuid(account.uuid))), - &modlist, - ) - .map_err(|e| { - request_error!(error = ?e); - e - })?; - - Ok(CredentialUpdateIntentToken { intent_id }) - }) + Ok(CredentialUpdateIntentToken { intent_id }) } pub fn exchange_intent_credential_update( @@ -642,21 +633,20 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { self.create_credupdate_session(session_id, Some(intent_id), account, current_time) } + #[instrument(level = "debug", skip_all)] pub fn init_credential_update( &mut self, event: &InitCredentialUpdateEvent, ct: Duration, ) -> Result<(CredentialUpdateSessionToken, CredentialUpdateSessionStatus), OperationError> { - spanned!("idm::server::credupdatesession", { - let account = self.validate_init_credential_update(event.target, &event.ident)?; - // ==== AUTHORISATION CHECKED === - // This is the expiry time, so that our cleanup task can "purge up to now" rather - // than needing to do calculations. - let sessionid = uuid_from_duration(ct + MAXIMUM_CRED_UPDATE_TTL, self.sid); + let account = self.validate_init_credential_update(event.target, &event.ident)?; + // ==== AUTHORISATION CHECKED === + // This is the expiry time, so that our cleanup task can "purge up to now" rather + // than needing to do calculations. + let sessionid = uuid_from_duration(ct + MAXIMUM_CRED_UPDATE_TTL, self.sid); - // Build the cred update session. - self.create_credupdate_session(sessionid, None, account, ct) - }) + // Build the cred update session. + self.create_credupdate_session(sessionid, None, account, ct) } #[instrument(level = "trace", skip(self))] @@ -1472,6 +1462,14 @@ impl<'a> IdmServerCredUpdateTransaction<'a> { #[cfg(test)] mod tests { + use std::time::Duration; + + use async_std::task; + use kanidm_proto::v1::{AuthAllowed, AuthMech, CredentialDetailType}; + use uuid::uuid; + use webauthn_authenticator_rs::softpasskey::SoftPasskey; + use webauthn_authenticator_rs::WebauthnAuthenticator; + use super::{ CredentialUpdateSessionStatus, CredentialUpdateSessionToken, InitCredentialUpdateEvent, InitCredentialUpdateIntentEvent, MfaRegStateStatus, MAXIMUM_CRED_UPDATE_TTL, @@ -1481,16 +1479,8 @@ mod tests { use crate::event::{AuthEvent, AuthResult, CreateEvent}; use crate::idm::delayed::DelayedAction; use crate::idm::server::IdmServer; - use crate::prelude::*; - use std::time::Duration; - - use webauthn_authenticator_rs::{softpasskey::SoftPasskey, WebauthnAuthenticator}; - use crate::idm::AuthState; - use kanidm_proto::v1::{AuthAllowed, AuthMech, CredentialDetailType}; - use uuid::uuid; - - use async_std::task; + use crate::prelude::*; const TEST_CURRENT_TIME: u64 = 6000; const TESTPERSON_UUID: Uuid = uuid!("cf231fea-1a8f-4410-a520-fd9b1a379c86"); diff --git a/kanidmd/idm/src/idm/event.rs b/kanidmd/idm/src/idm/event.rs index 082c1b234..523311d63 100644 --- a/kanidmd/idm/src/idm/event.rs +++ b/kanidmd/idm/src/idm/event.rs @@ -1,6 +1,7 @@ -use crate::prelude::*; use kanidm_proto::v1::OperationError; +use crate::prelude::*; + #[cfg(test)] pub(crate) struct PasswordChangeEvent { pub ident: Identity, diff --git a/kanidmd/idm/src/idm/group.rs b/kanidmd/idm/src/idm/group.rs index 86c324648..b90f33d2e 100644 --- a/kanidmd/idm/src/idm/group.rs +++ b/kanidmd/idm/src/idm/group.rs @@ -1,10 +1,9 @@ +use kanidm_proto::v1::{Group as ProtoGroup, OperationError}; +use uuid::Uuid; + use crate::entry::{Entry, EntryCommitted, EntryReduced, EntrySealed}; use crate::prelude::*; use crate::value::PartialValue; -use kanidm_proto::v1::Group as ProtoGroup; -use kanidm_proto::v1::OperationError; - -use uuid::Uuid; lazy_static! { static ref PVCLASS_GROUP: PartialValue = PartialValue::new_class("group"); diff --git a/kanidmd/idm/src/idm/mod.rs b/kanidmd/idm/src/idm/mod.rs index 36f4f52e9..76cb1ede8 100644 --- a/kanidmd/idm/src/idm/mod.rs +++ b/kanidmd/idm/src/idm/mod.rs @@ -15,10 +15,10 @@ pub mod server; pub(crate) mod serviceaccount; pub(crate) mod unix; -use kanidm_proto::v1::{AuthAllowed, AuthMech}; - use std::fmt; +use kanidm_proto::v1::{AuthAllowed, AuthMech}; + pub enum AuthState { Choose(Vec), Continue(Vec), diff --git a/kanidmd/idm/src/idm/oauth2.rs b/kanidmd/idm/src/idm/oauth2.rs index 84100d18a..7387902dc 100644 --- a/kanidmd/idm/src/idm/oauth2.rs +++ b/kanidmd/idm/src/idm/oauth2.rs @@ -3,30 +3,19 @@ //! This contains the in memory and loaded set of active oauth2 resource server //! integrations, which are then able to be used an accessed from the IDM layer //! for operations involving oauth2 authentication processing. -//! -use crate::identity::IdentityId; -use crate::idm::delayed::{DelayedAction, Oauth2ConsentGrant}; -use crate::idm::server::{IdmServerProxyReadTransaction, IdmServerTransaction}; -use crate::prelude::*; -use crate::value::OAUTHSCOPE_RE; +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::TryFrom; +use std::fmt; +use std::sync::Arc; +use std::time::Duration; + use base64urlsafedata::Base64UrlSafeData; pub use compact_jwt::{JwkKeySet, OidcToken}; use compact_jwt::{JwsSigner, OidcClaims, OidcSubject}; use concread::cowcell::*; use fernet::Fernet; use hashbrown::HashMap; -use kanidm_proto::v1::{AuthType, UserAuthToken}; -use openssl::sha; -use serde::{Deserialize, Serialize}; -use std::collections::{BTreeMap, BTreeSet}; -use std::fmt; -use std::sync::Arc; -use time::OffsetDateTime; -use tokio::sync::mpsc::UnboundedSender as Sender; -use tracing::trace; -use url::{Origin, Url}; - pub use kanidm_proto::oauth2::{ AccessTokenIntrospectRequest, AccessTokenIntrospectResponse, AccessTokenRequest, AccessTokenResponse, AuthorisationRequest, CodeChallengeMethod, ErrorResponse, @@ -36,9 +25,19 @@ use kanidm_proto::oauth2::{ ClaimType, DisplayValue, GrantType, IdTokenSignAlg, ResponseMode, ResponseType, SubjectType, TokenEndpointAuthMethod, }; +use kanidm_proto::v1::{AuthType, UserAuthToken}; +use openssl::sha; +use serde::{Deserialize, Serialize}; +use time::OffsetDateTime; +use tokio::sync::mpsc::UnboundedSender as Sender; +use tracing::trace; +use url::{Origin, Url}; -use std::convert::TryFrom; -use std::time::Duration; +use crate::identity::IdentityId; +use crate::idm::delayed::{DelayedAction, Oauth2ConsentGrant}; +use crate::idm::server::{IdmServerProxyReadTransaction, IdmServerTransaction}; +use crate::prelude::*; +use crate::value::OAUTHSCOPE_RE; lazy_static! { static ref CLASS_OAUTH2: PartialValue = PartialValue::new_class("oauth2_resource_server"); @@ -1351,26 +1350,22 @@ fn parse_basic_authz(client_authz: &str) -> Result<(String, String), Oauth2Error #[cfg(test)] mod tests { - use crate::event::CreateEvent; + use std::convert::TryFrom; + use std::str::FromStr; + use std::time::Duration; + + use base64urlsafedata::Base64UrlSafeData; + use compact_jwt::{JwaAlg, Jwk, JwkUse, JwsValidator, OidcSubject, OidcUnverified}; + use kanidm_proto::oauth2::*; + use kanidm_proto::v1::{AuthType, UserAuthToken}; + use openssl::sha; + + use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::idm::delayed::DelayedAction; use crate::idm::oauth2::{AuthoriseResponse, Oauth2Error}; use crate::idm::server::{IdmServer, IdmServerTransaction}; use crate::prelude::*; - use crate::event::{DeleteEvent, ModifyEvent}; - - use base64urlsafedata::Base64UrlSafeData; - use kanidm_proto::oauth2::*; - use kanidm_proto::v1::{AuthType, UserAuthToken}; - - use compact_jwt::{JwaAlg, Jwk, JwkUse, JwsValidator, OidcSubject, OidcUnverified}; - - use openssl::sha; - - use std::convert::TryFrom; - use std::str::FromStr; - use std::time::Duration; - const TEST_CURRENT_TIME: u64 = 6000; const UAT_EXPIRE: u64 = 5; const TOKEN_EXPIRE: u64 = 900; diff --git a/kanidmd/idm/src/idm/radius.rs b/kanidmd/idm/src/idm/radius.rs index 9e064e3e0..6ea3eb794 100644 --- a/kanidmd/idm/src/idm/radius.rs +++ b/kanidmd/idm/src/idm/radius.rs @@ -1,14 +1,13 @@ -use crate::idm::group::Group; +use std::time::Duration; + +use kanidm_proto::v1::{OperationError, RadiusAuthToken}; +use time::OffsetDateTime; use uuid::Uuid; -use crate::prelude::*; - use crate::entry::{Entry, EntryCommitted, EntryReduced}; +use crate::idm::group::Group; +use crate::prelude::*; use crate::value::PartialValue; -use kanidm_proto::v1::OperationError; -use kanidm_proto::v1::RadiusAuthToken; -use std::time::Duration; -use time::OffsetDateTime; lazy_static! { static ref PVCLASS_ACCOUNT: PartialValue = PartialValue::new_class("account"); diff --git a/kanidmd/idm/src/idm/server.rs b/kanidmd/idm/src/idm/server.rs index 187189be1..c40bf598b 100644 --- a/kanidmd/idm/src/idm/server.rs +++ b/kanidmd/idm/src/idm/server.rs @@ -1,3 +1,35 @@ +use core::task::{Context, Poll}; +use std::convert::TryFrom; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use async_std::task; +use compact_jwt::{Jws, JwsSigner, JwsUnverified, JwsValidator}; +use concread::bptree::{BptreeMap, BptreeMapReadTxn, BptreeMapWriteTxn}; +use concread::cowcell::{CowCellReadTxn, CowCellWriteTxn}; +use concread::hashmap::HashMap; +use concread::CowCell; +use fernet::Fernet; +// #[cfg(any(test,bench))] +use futures::task as futures_task; +use hashbrown::HashSet; +use kanidm_proto::v1::{ + ApiToken, BackupCodesView, CredentialStatus, PasswordFeedback, RadiusAuthToken, UnixGroupToken, + UnixUserToken, UserAuthToken, +}; +use rand::prelude::*; +use tokio::sync::mpsc::{ + unbounded_channel as unbounded, UnboundedReceiver as Receiver, UnboundedSender as Sender, +}; +use tokio::sync::{Mutex, Semaphore}; +use tracing::trace; +use url::Url; +use webauthn_rs::prelude::{Webauthn, WebauthnBuilder}; + +use super::delayed::BackupCodeRemoval; +use super::event::ReadBackupCodeEvent; +use crate::actors::v1_write::QueryServerWriteV1; use crate::credential::policy::CryptoPolicy; use crate::credential::softlock::CredSoftLock; use crate::event::{AuthEvent, AuthEventStep, AuthResult}; @@ -5,6 +37,10 @@ use crate::identity::{IdentType, IdentUser, Limits}; use crate::idm::account::Account; use crate::idm::authsession::AuthSession; use crate::idm::credupdatesession::CredentialUpdateSessionMutex; +use crate::idm::delayed::{ + DelayedAction, Oauth2ConsentGrant, PasswordUpgrade, UnixPasswordUpgrade, + WebauthnCounterIncrement, +}; #[cfg(test)] use crate::idm::event::PasswordChangeEvent; use crate::idm::event::{ @@ -26,54 +62,6 @@ use crate::ldap::{LdapBoundToken, LdapSession}; use crate::prelude::*; use crate::utils::{password_from_random, readable_password_from_random, uuid_from_duration, Sid}; -use crate::actors::v1_write::QueryServerWriteV1; -use crate::idm::delayed::{ - DelayedAction, Oauth2ConsentGrant, PasswordUpgrade, UnixPasswordUpgrade, - WebauthnCounterIncrement, -}; - -use hashbrown::HashSet; -use kanidm_proto::v1::{ - ApiToken, BackupCodesView, CredentialStatus, PasswordFeedback, RadiusAuthToken, UnixGroupToken, - UnixUserToken, UserAuthToken, -}; - -use compact_jwt::{Jws, JwsSigner, JwsUnverified, JwsValidator}; -use fernet::Fernet; - -use tokio::sync::mpsc::{ - unbounded_channel as unbounded, UnboundedReceiver as Receiver, UnboundedSender as Sender, -}; -use tokio::sync::Semaphore; - -use async_std::task; - -// #[cfg(any(test,bench))] -use core::task::{Context, Poll}; -// #[cfg(any(test,bench))] -use futures::task as futures_task; - -use concread::{ - bptree::{BptreeMap, BptreeMapReadTxn, BptreeMapWriteTxn}, - cowcell::{CowCellReadTxn, CowCellWriteTxn}, - hashmap::HashMap, - CowCell, -}; - -use rand::prelude::*; -use std::convert::TryFrom; -use std::str::FromStr; -use std::{sync::Arc, time::Duration}; -use tokio::sync::Mutex; -use url::Url; - -use webauthn_rs::prelude::{Webauthn, WebauthnBuilder}; - -use super::delayed::BackupCodeRemoval; -use super::event::ReadBackupCodeEvent; - -use tracing::trace; - type AuthSessionMutex = Arc>; type CredSoftLockMutex = Arc>; @@ -298,6 +286,7 @@ impl IdmServer { } /// Read from the database, in a transaction. + #[instrument(level = "debug", skip_all)] pub async fn proxy_read_async(&self) -> IdmServerProxyReadTransaction<'_> { IdmServerProxyReadTransaction { qs_read: self.qs.read_async().await, @@ -312,6 +301,7 @@ impl IdmServer { task::block_on(self.proxy_write_async(ts)) } + #[instrument(level = "debug", skip_all)] pub async fn proxy_write_async(&self, ts: Duration) -> IdmServerProxyWriteTransaction<'_> { let mut sid = [0; 4]; let mut rng = StdRng::from_entropy(); @@ -421,6 +411,7 @@ pub(crate) trait IdmServerTransaction<'a> { /// The primary method of verification selection is the use of the KID parameter /// that we internally sign with. We can use this to select the appropriate token type /// and validation method. + #[instrument(level = "info", skip_all)] fn validate_and_parse_token_to_ident( &self, token: Option<&str>, @@ -532,6 +523,7 @@ pub(crate) trait IdmServerTransaction<'a> { } } + #[instrument(level = "debug", skip_all)] fn validate_and_parse_uat( &self, token: Option<&str>, @@ -598,6 +590,7 @@ pub(crate) trait IdmServerTransaction<'a> { /// something we can pin access controls and other limits and references to. /// This is why it is the location where validity windows are checked and other /// relevant session information is injected. + #[instrument(level = "debug", skip_all)] fn process_uat_to_identity( &self, uat: &UserAuthToken, @@ -663,6 +656,7 @@ pub(crate) trait IdmServerTransaction<'a> { }) } + #[instrument(level = "debug", skip_all)] fn process_apit_to_identity( &self, apit: &ApiToken, @@ -683,6 +677,7 @@ pub(crate) trait IdmServerTransaction<'a> { }) } + #[instrument(level = "debug", skip_all)] fn validate_ldap_session( &self, session: &LdapSession, @@ -883,16 +878,14 @@ impl<'a> IdmServerAuthTransaction<'a> { match auth_session { Some(auth_session) => { let mut session_write = self.sessions.write(); - spanned!("idm::server::auth -> sessions", { - if session_write.contains_key(&sessionid) { - Err(OperationError::InvalidSessionState) - } else { - session_write.insert(sessionid, Arc::new(Mutex::new(auth_session))); - // Debugging: ensure we really inserted ... - debug_assert!(session_write.get(&sessionid).is_some()); - Ok(()) - } - })?; + if session_write.contains_key(&sessionid) { + Err(OperationError::InvalidSessionState) + } else { + session_write.insert(sessionid, Arc::new(Mutex::new(auth_session))); + // Debugging: ensure we really inserted ... + debug_assert!(session_write.get(&sessionid).is_some()); + Ok(()) + }?; session_write.commit(); } None => { @@ -2024,65 +2017,64 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { } } + #[instrument(level = "debug", skip_all)] pub fn commit(mut self) -> Result<(), OperationError> { - spanned!("idm::server::IdmServerProxyWriteTransaction::commit", { - if self - .qs_write - .get_changed_uuids() - .contains(&UUID_SYSTEM_CONFIG) - { - self.reload_password_badlist()?; - }; - if self.qs_write.get_changed_ouath2() { - self.qs_write - .get_oauth2rs_set() - .and_then(|oauth2rs_set| self.oauth2rs.reload(oauth2rs_set))?; - } - if self.qs_write.get_changed_domain() { - // reload token_key? - self.qs_write - .get_domain_fernet_private_key() - .and_then(|token_key| { - Fernet::new(&token_key).ok_or_else(|| { - admin_error!("Failed to generate token_enc_key"); + if self + .qs_write + .get_changed_uuids() + .contains(&UUID_SYSTEM_CONFIG) + { + self.reload_password_badlist()?; + }; + if self.qs_write.get_changed_ouath2() { + self.qs_write + .get_oauth2rs_set() + .and_then(|oauth2rs_set| self.oauth2rs.reload(oauth2rs_set))?; + } + if self.qs_write.get_changed_domain() { + // reload token_key? + self.qs_write + .get_domain_fernet_private_key() + .and_then(|token_key| { + Fernet::new(&token_key).ok_or_else(|| { + admin_error!("Failed to generate token_enc_key"); + OperationError::InvalidState + }) + }) + .map(|new_handle| { + *self.token_enc_key = new_handle; + })?; + self.qs_write + .get_domain_es256_private_key() + .and_then(|key_der| { + JwsSigner::from_es256_der(&key_der).map_err(|e| { + admin_error!("Failed to generate uat_jwt_signer - {:?}", e); + OperationError::InvalidState + }) + }) + .and_then(|signer| { + signer + .get_validator() + .map_err(|e| { + admin_error!("Failed to generate uat_jwt_validator - {:?}", e); OperationError::InvalidState }) - }) - .map(|new_handle| { - *self.token_enc_key = new_handle; - })?; - self.qs_write - .get_domain_es256_private_key() - .and_then(|key_der| { - JwsSigner::from_es256_der(&key_der).map_err(|e| { - admin_error!("Failed to generate uat_jwt_signer - {:?}", e); - OperationError::InvalidState - }) - }) - .and_then(|signer| { - signer - .get_validator() - .map_err(|e| { - admin_error!("Failed to generate uat_jwt_validator - {:?}", e); - OperationError::InvalidState - }) - .map(|validator| (signer, validator)) - }) - .map(|(new_signer, new_validator)| { - *self.uat_jwt_signer = new_signer; - *self.uat_jwt_validator = new_validator; - })?; - } - // Commit everything. - self.oauth2rs.commit(); - self.uat_jwt_signer.commit(); - self.uat_jwt_validator.commit(); - self.token_enc_key.commit(); - self.pw_badlist_cache.commit(); - self.cred_update_sessions.commit(); - trace!("cred_update_session.commit"); - self.qs_write.commit() - }) + .map(|validator| (signer, validator)) + }) + .map(|(new_signer, new_validator)| { + *self.uat_jwt_signer = new_signer; + *self.uat_jwt_validator = new_validator; + })?; + } + // Commit everything. + self.oauth2rs.commit(); + self.uat_jwt_signer.commit(); + self.uat_jwt_validator.commit(); + self.token_enc_key.commit(); + self.pw_badlist_cache.commit(); + self.cred_update_sessions.commit(); + trace!("cred_update_session.commit"); + self.qs_write.commit() } fn reload_password_badlist(&mut self) -> Result<(), OperationError> { @@ -2100,6 +2092,14 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { #[cfg(test)] mod tests { + use std::convert::TryFrom; + use std::time::Duration; + + use async_std::task; + use kanidm_proto::v1::{AuthAllowed, AuthMech, AuthType, OperationError}; + use smartstring::alias::String as AttrString; + use uuid::Uuid; + use crate::credential::policy::CryptoPolicy; use crate::credential::{Credential, Password}; use crate::event::{AuthEvent, AuthResult, CreateEvent, ModifyEvent}; @@ -2107,20 +2107,12 @@ mod tests { PasswordChangeEvent, RadiusAuthTokenEvent, RegenerateRadiusSecretEvent, UnixGroupTokenEvent, UnixPasswordChangeEvent, UnixUserAuthEvent, UnixUserTokenEvent, }; + use crate::idm::server::{IdmServer, IdmServerTransaction}; use crate::idm::AuthState; use crate::modify::{Modify, ModifyList}; use crate::prelude::*; - use kanidm_proto::v1::OperationError; - use kanidm_proto::v1::{AuthAllowed, AuthMech, AuthType}; - - use crate::idm::server::{IdmServer, IdmServerTransaction}; // , IdmServerDelayed; use crate::utils::duration_from_epoch_now; - use async_std::task; - use smartstring::alias::String as AttrString; - use std::convert::TryFrom; - use std::time::Duration; - use uuid::Uuid; const TEST_PASSWORD: &'static str = "ntaoeuntnaoeuhraohuercahu😍"; const TEST_PASSWORD_INC: &'static str = "ntaoentu nkrcgaeunhibwmwmqj;k wqjbkx "; diff --git a/kanidmd/idm/src/idm/serviceaccount.rs b/kanidmd/idm/src/idm/serviceaccount.rs index 18532df8f..67894bbb6 100644 --- a/kanidmd/idm/src/idm/serviceaccount.rs +++ b/kanidmd/idm/src/idm/serviceaccount.rs @@ -1,16 +1,16 @@ +use std::collections::BTreeMap; +use std::time::Duration; + +use compact_jwt::{Jws, JwsSigner}; +use kanidm_proto::v1::ApiToken; +use time::OffsetDateTime; + use crate::event::SearchEvent; use crate::idm::account::Account; use crate::idm::server::{IdmServerProxyReadTransaction, IdmServerProxyWriteTransaction}; use crate::prelude::*; use crate::value::Session; -use compact_jwt::{Jws, JwsSigner}; -use std::collections::BTreeMap; -use std::time::Duration; -use time::OffsetDateTime; - -use kanidm_proto::v1::ApiToken; - // Need to add KID to es256 der for lookups ✅ // Need to generate the es256 on the account on modifies ✅ @@ -87,14 +87,13 @@ pub struct ServiceAccount { } impl ServiceAccount { + #[instrument(level = "debug", skip_all)] pub(crate) fn try_from_entry_rw( value: &Entry, // qs: &mut QueryServerWriteTransaction, ) -> Result { - spanned!("idm::serviceaccount::try_from_entry_rw", { - // let groups = Group::try_from_account_entry_rw(value, qs)?; - try_from_entry!(value) - }) + // let groups = Group::try_from_account_entry_rw(value, qs)?; + try_from_entry!(value) } pub(crate) fn check_api_token_valid( @@ -354,16 +353,17 @@ impl<'a> IdmServerProxyReadTransaction<'a> { #[cfg(test)] mod tests { - use super::{DestroyApiTokenEvent, GenerateApiTokenEvent, GRACE_WINDOW}; - use crate::idm::server::IdmServerTransaction; - // use crate::prelude::*; - - use crate::event::CreateEvent; - use compact_jwt::{Jws, JwsUnverified}; - use kanidm_proto::v1::ApiToken; use std::str::FromStr; use std::time::Duration; + use compact_jwt::{Jws, JwsUnverified}; + use kanidm_proto::v1::ApiToken; + + use super::{DestroyApiTokenEvent, GenerateApiTokenEvent, GRACE_WINDOW}; + // use crate::prelude::*; + use crate::event::CreateEvent; + use crate::idm::server::IdmServerTransaction; + const TEST_CURRENT_TIME: u64 = 6000; #[test] diff --git a/kanidmd/idm/src/idm/unix.rs b/kanidmd/idm/src/idm/unix.rs index bc5831cd5..cb4bcb02c 100644 --- a/kanidmd/idm/src/idm/unix.rs +++ b/kanidmd/idm/src/idm/unix.rs @@ -1,20 +1,18 @@ +use std::iter; +// use crossbeam::channel::Sender; +use std::time::Duration; + +use kanidm_proto::v1::{OperationError, UnixGroupToken, UnixUserToken}; +use time::OffsetDateTime; +use tokio::sync::mpsc::UnboundedSender as Sender; use uuid::Uuid; use crate::credential::policy::CryptoPolicy; -use crate::credential::{softlock::CredSoftLockPolicy, Credential}; +use crate::credential::softlock::CredSoftLockPolicy; +use crate::credential::Credential; +use crate::idm::delayed::{DelayedAction, UnixPasswordUpgrade}; use crate::modify::{ModifyInvalid, ModifyList}; use crate::prelude::*; -use kanidm_proto::v1::OperationError; -use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; - -use crate::idm::delayed::{DelayedAction, UnixPasswordUpgrade}; - -// use crossbeam::channel::Sender; -use std::time::Duration; -use time::OffsetDateTime; -use tokio::sync::mpsc::UnboundedSender as Sender; - -use std::iter; #[derive(Debug, Clone)] pub(crate) struct UnixUserAccount { diff --git a/kanidmd/idm/src/interval.rs b/kanidmd/idm/src/interval.rs index db8737cd6..5a7f37329 100644 --- a/kanidmd/idm/src/interval.rs +++ b/kanidmd/idm/src/interval.rs @@ -1,20 +1,20 @@ //! This contains scheduled tasks/interval tasks that are run inside of the server on a schedule //! as background operations. -use crate::actors::v1_read::QueryServerReadV1; -use crate::actors::v1_write::QueryServerWriteV1; - -use crate::config::OnlineBackup; -use crate::constants::PURGE_FREQUENCY; -use crate::event::{OnlineBackupEvent, PurgeRecycledEvent, PurgeTombstoneEvent}; +use std::fs; +use std::path::Path; use chrono::Utc; use saffron::parse::{CronExpr, English}; use saffron::Cron; -use std::fs; -use std::path::Path; use tokio::time::{interval, sleep, Duration}; +use crate::actors::v1_read::QueryServerReadV1; +use crate::actors::v1_write::QueryServerWriteV1; +use crate::config::OnlineBackup; +use crate::constants::PURGE_FREQUENCY; +use crate::event::{OnlineBackupEvent, PurgeRecycledEvent, PurgeTombstoneEvent}; + pub struct IntervalActor; impl IntervalActor { diff --git a/kanidmd/idm/src/ldap.rs b/kanidmd/idm/src/ldap.rs index f848bcd23..843977673 100644 --- a/kanidmd/idm/src/ldap.rs +++ b/kanidmd/idm/src/ldap.rs @@ -1,18 +1,20 @@ //! LDAP specific operations handling components. This is where LDAP operations //! are sent to for processing. +use std::collections::BTreeSet; +use std::iter; + +use async_std::task; +use kanidm_proto::v1::{ApiToken, OperationError, UserAuthToken}; +use ldap3_proto::simple::*; +use regex::Regex; +use tracing::trace; +use uuid::Uuid; + use crate::event::SearchEvent; use crate::idm::event::{LdapAuthEvent, LdapTokenAuthEvent}; use crate::idm::server::{IdmServer, IdmServerTransaction}; use crate::prelude::*; -use async_std::task; -use kanidm_proto::v1::{ApiToken, OperationError, UserAuthToken}; -use ldap3_proto::simple::*; -use regex::Regex; -use std::collections::BTreeSet; -use std::iter; -use tracing::trace; -use uuid::Uuid; // Clippy doesn't like Bind here. But proto needs unboxed ldapmsg, // and ldapboundtoken is moved. Really, it's not too bad, every message here is pretty sucky. @@ -121,6 +123,7 @@ impl LdapServer { }) } + #[instrument(level = "debug", skip_all)] async fn do_search( &self, idms: &IdmServer, @@ -252,93 +255,82 @@ impl LdapServer { let ct = duration_from_epoch_now(); let idm_read = idms.proxy_read_async().await; - spanned!("ldap::do_search", { - // Now start the txn - we need it for resolving filter components. + // Now start the txn - we need it for resolving filter components. - // join the filter, with ext_filter - let lfilter = match ext_filter { - Some(ext) => LdapFilter::And(vec![ - sr.filter.clone(), - ext, - LdapFilter::Not(Box::new(LdapFilter::Or(vec![ - LdapFilter::Equality("class".to_string(), "classtype".to_string()), - LdapFilter::Equality("class".to_string(), "attributetype".to_string()), - LdapFilter::Equality( - "class".to_string(), - "access_control_profile".to_string(), - ), - ]))), - ]), - None => LdapFilter::And(vec![ - sr.filter.clone(), - LdapFilter::Not(Box::new(LdapFilter::Or(vec![ - LdapFilter::Equality("class".to_string(), "classtype".to_string()), - LdapFilter::Equality("class".to_string(), "attributetype".to_string()), - LdapFilter::Equality( - "class".to_string(), - "access_control_profile".to_string(), - ), - ]))), - ]), - }; + // join the filter, with ext_filter + let lfilter = match ext_filter { + Some(ext) => LdapFilter::And(vec![ + sr.filter.clone(), + ext, + LdapFilter::Not(Box::new(LdapFilter::Or(vec![ + LdapFilter::Equality("class".to_string(), "classtype".to_string()), + LdapFilter::Equality("class".to_string(), "attributetype".to_string()), + LdapFilter::Equality( + "class".to_string(), + "access_control_profile".to_string(), + ), + ]))), + ]), + None => LdapFilter::And(vec![ + sr.filter.clone(), + LdapFilter::Not(Box::new(LdapFilter::Or(vec![ + LdapFilter::Equality("class".to_string(), "classtype".to_string()), + LdapFilter::Equality("class".to_string(), "attributetype".to_string()), + LdapFilter::Equality( + "class".to_string(), + "access_control_profile".to_string(), + ), + ]))), + ]), + }; - admin_info!(filter = ?lfilter, "LDAP Search Filter"); + admin_info!(filter = ?lfilter, "LDAP Search Filter"); - // Build the event, with the permissions from effective_session - // - // ! Remember, searchEvent wraps to ignore hidden for us. - let se = spanned!("ldap::do_search", { - let ident = idm_read - .validate_ldap_session(&uat.effective_session, ct) - .map_err(|e| { - admin_error!("Invalid identity: {:?}", e); - e - })?; - SearchEvent::new_ext_impersonate_uuid( - &idm_read.qs_read, - ident, - &lfilter, - k_attrs, - ) - }) + // Build the event, with the permissions from effective_session + // + // ! Remember, searchEvent wraps to ignore hidden for us. + let ident = idm_read + .validate_ldap_session(&uat.effective_session, ct) .map_err(|e| { - admin_error!("failed to create search event -> {:?}", e); + admin_error!("Invalid identity: {:?}", e); e })?; + let se = + SearchEvent::new_ext_impersonate_uuid(&idm_read.qs_read, ident, &lfilter, k_attrs) + .map_err(|e| { + admin_error!("failed to create search event -> {:?}", e); + e + })?; - let res = idm_read.qs_read.search_ext(&se).map_err(|e| { - admin_error!("search failure {:?}", e); - e - })?; + let res = idm_read.qs_read.search_ext(&se).map_err(|e| { + admin_error!("search failure {:?}", e); + e + })?; - // These have already been fully reduced (access controls applied), - // so we can just transform the values and open palm slam them into - // the result structure. - let lres = spanned!("ldap::do_search", { - let lres: Result, _> = res - .into_iter() - .map(|e| { - e.to_ldap(&idm_read.qs_read, self.basedn.as_str(), all_attrs, &l_attrs) - // if okay, wrap in a ldap msg. - .map(|r| sr.gen_result_entry(r)) - }) - .chain(iter::once(Ok(sr.gen_success()))) - .collect(); - lres - }); + // These have already been fully reduced (access controls applied), + // so we can just transform the values and open palm slam them into + // the result structure. + let lres: Result, _> = res + .into_iter() + .map(|e| { + e.to_ldap(&idm_read.qs_read, self.basedn.as_str(), all_attrs, &l_attrs) + // if okay, wrap in a ldap msg. + .map(|r| sr.gen_result_entry(r)) + }) + .chain(iter::once(Ok(sr.gen_success()))) + .collect(); - let lres = lres.map_err(|e| { - admin_error!("entry resolve failure {:?}", e); - e - })?; + let lres = lres.map_err(|e| { + admin_error!("entry resolve failure {:?}", e); + e + })?; - admin_info!( - nentries = %lres.len(), - "LDAP Search Success -> number of entries" - ); + admin_info!( + nentries = %lres.len(), + "LDAP Search Success -> number of entries" + ); - Ok(lres) - }) + Ok(lres) } } @@ -550,18 +542,19 @@ pub(crate) fn ldap_attr_filter_map(input: &str) -> AttrString { #[cfg(test)] mod tests { // use crate::prelude::*; - use crate::event::{CreateEvent, ModifyEvent}; - use crate::idm::event::UnixPasswordChangeEvent; - use crate::idm::serviceaccount::GenerateApiTokenEvent; - use crate::ldap::{LdapServer, LdapSession}; + use std::str::FromStr; + use async_std::task; + use compact_jwt::{Jws, JwsUnverified}; use hashbrown::HashSet; use kanidm_proto::v1::ApiToken; use ldap3_proto::proto::{LdapFilter, LdapOp, LdapSearchScope}; use ldap3_proto::simple::*; - use std::str::FromStr; - use compact_jwt::{Jws, JwsUnverified}; + use crate::event::{CreateEvent, ModifyEvent}; + use crate::idm::event::UnixPasswordChangeEvent; + use crate::idm::serviceaccount::GenerateApiTokenEvent; + use crate::ldap::{LdapServer, LdapSession}; const TEST_PASSWORD: &'static str = "ntaoeuntnaoeuhraohuercahu😍"; diff --git a/kanidmd/idm/src/lib.rs b/kanidmd/idm/src/lib.rs index 1fe22dd86..3112e9ee8 100644 --- a/kanidmd/idm/src/lib.rs +++ b/kanidmd/idm/src/lib.rs @@ -63,39 +63,36 @@ pub mod config; /// A prelude of imports that should be imported by all other Kanidm modules to /// help make imports cleaner. pub mod prelude { - pub use crate::utils::duration_from_epoch_now; pub use kanidm_proto::v1::{ConsistencyError, OperationError}; + pub use sketching::{ + admin_debug, admin_error, admin_info, admin_warn, filter_error, filter_info, filter_trace, + filter_warn, perf_trace, request_error, request_info, request_trace, request_warn, + security_access, security_critical, security_error, security_info, tagged_event, EventTag, + }; pub use smartstring::alias::String as AttrString; pub use url::Url; pub use uuid::Uuid; pub use crate::constants::*; - pub use crate::filter::{ - f_and, f_andnot, f_eq, f_id, f_inc, f_lt, f_or, f_pres, f_self, f_spn_name, f_sub, - }; - pub use crate::filter::{Filter, FilterInvalid, FC}; - pub use crate::modify::{m_pres, m_purge, m_remove}; - pub use crate::modify::{Modify, ModifyInvalid, ModifyList}; - pub use crate::entry::{ Entry, EntryCommitted, EntryInit, EntryInvalid, EntryInvalidCommitted, EntryNew, EntryReduced, EntrySealed, EntrySealedCommitted, EntryTuple, EntryValid, }; + pub use crate::filter::{ + f_and, f_andnot, f_eq, f_id, f_inc, f_lt, f_or, f_pres, f_self, f_spn_name, f_sub, Filter, + FilterInvalid, FC, + }; pub use crate::identity::Identity; + pub use crate::modify::{m_pres, m_purge, m_remove, Modify, ModifyInvalid, ModifyList}; pub use crate::server::{ QueryServer, QueryServerReadTransaction, QueryServerTransaction, QueryServerWriteTransaction, }; + pub use crate::utils::duration_from_epoch_now; pub use crate::value::{IndexType, PartialValue, SyntaxType, Value}; pub use crate::valueset::{ ValueSet, ValueSetBool, ValueSetCid, ValueSetIndex, ValueSetIutf8, ValueSetRefer, ValueSetSecret, ValueSetSpn, ValueSetSyntax, ValueSetT, ValueSetUint32, ValueSetUtf8, ValueSetUuid, }; - pub use sketching::{ - admin_debug, admin_error, admin_info, admin_warn, filter_error, filter_info, filter_trace, - filter_warn, perf_trace, request_error, request_info, request_trace, request_warn, - security_access, security_critical, security_error, security_info, spanned, tagged_event, - EventTag, - }; } diff --git a/kanidmd/idm/src/macros.rs b/kanidmd/idm/src/macros.rs index 88d16bccf..b497ddd8c 100644 --- a/kanidmd/idm/src/macros.rs +++ b/kanidmd/idm/src/macros.rs @@ -19,9 +19,10 @@ macro_rules! setup_test { ( $preload_entries:expr ) => {{ - use crate::utils::duration_from_epoch_now; use async_std::task; + use crate::utils::duration_from_epoch_now; + let _ = sketching::test_init(); // Create an in memory BE @@ -105,10 +106,11 @@ macro_rules! run_test { #[cfg(test)] macro_rules! entry_str_to_account { ($entry_str:expr) => {{ + use std::iter::once; + use crate::entry::{Entry, EntryInvalid, EntryNew}; use crate::idm::account::Account; use crate::value::Value; - use std::iter::once; let mut e: Entry = unsafe { Entry::unsafe_from_entry_str($entry_str).into_invalid_new() }; @@ -195,37 +197,35 @@ macro_rules! run_create_test { use crate::schema::Schema; use crate::utils::duration_from_epoch_now; - spanned!("plugins::macros::run_create_test", { - let qs = setup_test!($preload_entries); + let qs = setup_test!($preload_entries); - let ce = match $internal { - None => CreateEvent::new_internal($create_entries.clone()), - Some(e_str) => unsafe { - CreateEvent::new_impersonate_entry_ser(e_str, $create_entries.clone()) - }, - }; + let ce = match $internal { + None => CreateEvent::new_internal($create_entries.clone()), + Some(e_str) => unsafe { + CreateEvent::new_impersonate_entry_ser(e_str, $create_entries.clone()) + }, + }; - { - let qs_write = qs.write(duration_from_epoch_now()); - let r = qs_write.create(&ce); - trace!("test result: {:?}", r); - assert!(r == $expect); - $check(&qs_write); - match r { - Ok(_) => { - qs_write.commit().expect("commit failure!"); - } - Err(e) => { - admin_error!("Rolling back => {:?}", e); - } + { + let qs_write = qs.write(duration_from_epoch_now()); + let r = qs_write.create(&ce); + trace!("test result: {:?}", r); + assert!(r == $expect); + $check(&qs_write); + match r { + Ok(_) => { + qs_write.commit().expect("commit failure!"); + } + Err(e) => { + admin_error!("Rolling back => {:?}", e); } } - // Make sure there are no errors. - trace!("starting verification"); - let ver = qs.verify(); - trace!("verification -> {:?}", ver); - assert!(ver.len() == 0); - }); + } + // Make sure there are no errors. + trace!("starting verification"); + let ver = qs.verify(); + trace!("verification -> {:?}", ver); + assert!(ver.len() == 0); }}; } @@ -246,49 +246,41 @@ macro_rules! run_modify_test { use crate::prelude::*; use crate::schema::Schema; - spanned!("plugins::macros::run_modify_test", { - let qs = setup_test!($preload_entries); + let qs = setup_test!($preload_entries); - { - let qs_write = qs.write(duration_from_epoch_now()); - spanned!("plugins::macros::run_modify_test -> pre_test hook", { - $pre_hook(&qs_write) - }); - qs_write.commit().expect("commit failure!"); - } + { + let qs_write = qs.write(duration_from_epoch_now()); + $pre_hook(&qs_write); + qs_write.commit().expect("commit failure!"); + } - let me = match $internal { - None => unsafe { ModifyEvent::new_internal_invalid($modify_filter, $modify_list) }, - Some(e_str) => unsafe { - ModifyEvent::new_impersonate_entry_ser(e_str, $modify_filter, $modify_list) - }, - }; + let me = match $internal { + None => unsafe { ModifyEvent::new_internal_invalid($modify_filter, $modify_list) }, + Some(e_str) => unsafe { + ModifyEvent::new_impersonate_entry_ser(e_str, $modify_filter, $modify_list) + }, + }; - { - let qs_write = qs.write(duration_from_epoch_now()); - let r = spanned!("plugins::macros::run_modify_test -> main_test", { - qs_write.modify(&me) - }); - spanned!("plugins::macros::run_modify_test -> post_test check", { - $check(&qs_write) - }); - trace!("test result: {:?}", r); - assert!(r == $expect); - match r { - Ok(_) => { - qs_write.commit().expect("commit failure!"); - } - Err(e) => { - admin_error!("Rolling back => {:?}", e); - } + { + let qs_write = qs.write(duration_from_epoch_now()); + let r = qs_write.modify(&me); + $check(&qs_write); + trace!("test result: {:?}", r); + assert!(r == $expect); + match r { + Ok(_) => { + qs_write.commit().expect("commit failure!"); + } + Err(e) => { + admin_error!("Rolling back => {:?}", e); } } - // Make sure there are no errors. - trace!("starting verification"); - let ver = qs.verify(); - trace!("verification -> {:?}", ver); - assert!(ver.len() == 0); - }); + } + // Make sure there are no errors. + trace!("starting verification"); + let ver = qs.verify(); + trace!("verification -> {:?}", ver); + assert!(ver.len() == 0); }}; } @@ -308,37 +300,35 @@ macro_rules! run_delete_test { use crate::schema::Schema; use crate::utils::duration_from_epoch_now; - spanned!("plugins::macros::run_delete_test", { - let qs = setup_test!($preload_entries); + let qs = setup_test!($preload_entries); - let de = match $internal { - Some(e_str) => unsafe { - DeleteEvent::new_impersonate_entry_ser(e_str, $delete_filter.clone()) - }, - None => unsafe { DeleteEvent::new_internal_invalid($delete_filter.clone()) }, - }; + let de = match $internal { + Some(e_str) => unsafe { + DeleteEvent::new_impersonate_entry_ser(e_str, $delete_filter.clone()) + }, + None => unsafe { DeleteEvent::new_internal_invalid($delete_filter.clone()) }, + }; - { - let qs_write = qs.write(duration_from_epoch_now()); - let r = qs_write.delete(&de); - trace!("test result: {:?}", r); - $check(&qs_write); - assert!(r == $expect); - match r { - Ok(_) => { - qs_write.commit().expect("commit failure!"); - } - Err(e) => { - admin_error!("Rolling back => {:?}", e); - } + { + let qs_write = qs.write(duration_from_epoch_now()); + let r = qs_write.delete(&de); + trace!("test result: {:?}", r); + $check(&qs_write); + assert!(r == $expect); + match r { + Ok(_) => { + qs_write.commit().expect("commit failure!"); + } + Err(e) => { + admin_error!("Rolling back => {:?}", e); } } - // Make sure there are no errors. - trace!("starting verification"); - let ver = qs.verify(); - trace!("verification -> {:?}", ver); - assert!(ver.len() == 0); - }); + } + // Make sure there are no errors. + trace!("starting verification"); + let ver = qs.verify(); + trace!("verification -> {:?}", ver); + assert!(ver.len() == 0); }}; } diff --git a/kanidmd/idm/src/modify.rs b/kanidmd/idm/src/modify.rs index 189be742f..c1fba5005 100644 --- a/kanidmd/idm/src/modify.rs +++ b/kanidmd/idm/src/modify.rs @@ -2,19 +2,19 @@ //! express the series of Modifications that should be applied. These are expressed //! as "states" on what attribute-values should appear as within the `Entry` -use crate::prelude::*; -use kanidm_proto::v1::Entry as ProtoEntry; -use kanidm_proto::v1::Modify as ProtoModify; -use kanidm_proto::v1::ModifyList as ProtoModifyList; - -use crate::schema::SchemaTransaction; -use crate::value::{PartialValue, Value}; -use kanidm_proto::v1::{OperationError, SchemaError}; +use std::slice; +use kanidm_proto::v1::{ + Entry as ProtoEntry, Modify as ProtoModify, ModifyList as ProtoModifyList, OperationError, + SchemaError, +}; // Should this be std? use serde::{Deserialize, Serialize}; use smartstring::alias::String as AttrString; -use std::slice; + +use crate::prelude::*; +use crate::schema::SchemaTransaction; +use crate::value::{PartialValue, Value}; #[derive(Serialize, Deserialize, Debug, Clone)] pub struct ModifyValid; @@ -69,8 +69,8 @@ pub struct ModifyList { } impl<'a> IntoIterator for &'a ModifyList { - type Item = &'a Modify; type IntoIter = slice::Iter<'a, Modify>; + type Item = &'a Modify; fn into_iter(self) -> Self::IntoIter { self.mods.iter() diff --git a/kanidmd/idm/src/plugins/attrunique.rs b/kanidmd/idm/src/plugins/attrunique.rs index edcc139d3..d74bc4891 100644 --- a/kanidmd/idm/src/plugins/attrunique.rs +++ b/kanidmd/idm/src/plugins/attrunique.rs @@ -4,14 +4,15 @@ // both change approaches. // // +use std::collections::BTreeMap; + +use kanidm_proto::v1::{ConsistencyError, PluginError}; +use tracing::trace; + use crate::event::{CreateEvent, ModifyEvent}; use crate::plugins::Plugin; use crate::prelude::*; use crate::schema::SchemaTransaction; -use kanidm_proto::v1::{ConsistencyError, PluginError}; -use tracing::trace; - -use std::collections::BTreeMap; pub struct AttrUnique; @@ -192,9 +193,10 @@ impl Plugin for AttrUnique { #[cfg(test)] mod tests { - use crate::prelude::*; use kanidm_proto::v1::PluginError; + use crate::prelude::*; + // Test entry in db, and same name, reject. #[test] fn test_pre_create_name_unique() { diff --git a/kanidmd/idm/src/plugins/base.rs b/kanidmd/idm/src/plugins/base.rs index 12ba64f7f..a60ced3c2 100644 --- a/kanidmd/idm/src/plugins/base.rs +++ b/kanidmd/idm/src/plugins/base.rs @@ -1,12 +1,13 @@ -use crate::plugins::Plugin; -use hashbrown::HashSet; use std::collections::BTreeSet; use std::iter::once; +use hashbrown::HashSet; +use kanidm_proto::v1::{ConsistencyError, PluginError}; + use crate::event::{CreateEvent, ModifyEvent}; use crate::modify::Modify; +use crate::plugins::Plugin; use crate::prelude::*; -use kanidm_proto::v1::{ConsistencyError, PluginError}; lazy_static! { static ref CLASS_OBJECT: Value = Value::new_class("object"); @@ -43,15 +44,9 @@ impl Plugin for Base { // debug!("Entering base pre_create_transform"); // For each candidate for entry in cand.iter_mut() { - trace!("Base check on entry: {:?}", entry); - // First, ensure we have the 'object', class in the class set. entry.add_ava("class", CLASS_OBJECT.clone()); - trace!("Object should now be in entry: {:?}", entry); - - // If they have a name, but no principal name, derive it. - // if they don't have uuid, create it. match entry.get_ava_set("uuid").map(|s| s.len()) { None => { @@ -85,7 +80,6 @@ impl Plugin for Base { let uuid_ref: Uuid = entry .get_ava_single_uuid("uuid") .ok_or_else(|| OperationError::InvalidAttribute("uuid".to_string()))?; - trace!("Entry valid UUID: {:?}", entry); if !cand_uuid.insert(uuid_ref) { trace!("uuid duplicate found in create set! {:?}", uuid_ref); return Err(OperationError::Plugin(PluginError::Base( @@ -224,9 +218,10 @@ impl Plugin for Base { #[cfg(test)] mod tests { - use crate::prelude::*; use kanidm_proto::v1::PluginError; + use crate::prelude::*; + const JSON_ADMIN_ALLOW_ALL: &'static str = r#"{ "attrs": { "class": [ diff --git a/kanidmd/idm/src/plugins/domain.rs b/kanidmd/idm/src/plugins/domain.rs index eca3c41bb..1bd1774da 100644 --- a/kanidmd/idm/src/plugins/domain.rs +++ b/kanidmd/idm/src/plugins/domain.rs @@ -4,15 +4,16 @@ // The primary point of this is to generate a unique domain UUID on startup // which is importart for management of the replication topo and trust // relationships. -use crate::plugins::Plugin; +use std::iter::once; -use crate::event::{CreateEvent, ModifyEvent}; -use crate::prelude::*; use compact_jwt::JwsSigner; use kanidm_proto::v1::OperationError; -use std::iter::once; use tracing::trace; +use crate::event::{CreateEvent, ModifyEvent}; +use crate::plugins::Plugin; +use crate::prelude::*; + lazy_static! { static ref PVCLASS_DOMAIN_INFO: PartialValue = PartialValue::new_class("domain_info"); static ref PVUUID_DOMAIN_INFO: PartialValue = PartialValue::new_uuid(*UUID_DOMAIN_INFO); diff --git a/kanidmd/idm/src/plugins/dyngroup.rs b/kanidmd/idm/src/plugins/dyngroup.rs index c890216ee..d4956fea2 100644 --- a/kanidmd/idm/src/plugins/dyngroup.rs +++ b/kanidmd/idm/src/plugins/dyngroup.rs @@ -1,9 +1,11 @@ +use std::collections::BTreeMap; +use std::sync::Arc; + +use kanidm_proto::v1::Filter as ProtoFilter; + use crate::event::{CreateEvent, ModifyEvent}; use crate::filter::FilterInvalid; use crate::prelude::*; -use kanidm_proto::v1::Filter as ProtoFilter; -use std::collections::BTreeMap; -use std::sync::Arc; lazy_static! { static ref CLASS_DYNGROUP: PartialValue = PartialValue::new_class("dyngroup"); @@ -361,9 +363,10 @@ impl DynGroup { #[cfg(test)] mod tests { - use crate::prelude::*; use kanidm_proto::v1::Filter as ProtoFilter; + use crate::prelude::*; + const UUID_TEST_GROUP: Uuid = uuid::uuid!("7bfd9931-06c2-4608-8a46-78719bb746fe"); #[test] diff --git a/kanidmd/idm/src/plugins/gidnumber.rs b/kanidmd/idm/src/plugins/gidnumber.rs index b491f7c29..b81dbc073 100644 --- a/kanidmd/idm/src/plugins/gidnumber.rs +++ b/kanidmd/idm/src/plugins/gidnumber.rs @@ -1,11 +1,12 @@ // A plugin that generates gid numbers on types that require them for posix // support. +use std::iter::once; + use crate::event::{CreateEvent, ModifyEvent}; use crate::plugins::Plugin; use crate::prelude::*; use crate::utils::uuid_to_gid_u32; -use std::iter::once; /// Systemd dynamic units allocate between 61184–65519, most distros allocate /// system uids from 0 - 1000, and many others give user ids between 1000 to diff --git a/kanidmd/idm/src/plugins/jwskeygen.rs b/kanidmd/idm/src/plugins/jwskeygen.rs index b5974024e..6b9ed4056 100644 --- a/kanidmd/idm/src/plugins/jwskeygen.rs +++ b/kanidmd/idm/src/plugins/jwskeygen.rs @@ -1,8 +1,9 @@ +use compact_jwt::JwsSigner; + use crate::event::{CreateEvent, ModifyEvent}; use crate::plugins::Plugin; use crate::prelude::*; use crate::utils::password_from_random; -use compact_jwt::JwsSigner; lazy_static! { static ref CLASS_OAUTH2_BASIC: PartialValue = diff --git a/kanidmd/idm/src/plugins/memberof.rs b/kanidmd/idm/src/plugins/memberof.rs index ab1c37548..2485ea754 100644 --- a/kanidmd/idm/src/plugins/memberof.rs +++ b/kanidmd/idm/src/plugins/memberof.rs @@ -10,16 +10,17 @@ // As a result, we first need to run refint to clean up all dangling references, then memberof // fixes the graph of memberships +use std::collections::BTreeSet; +use std::sync::Arc; + +use hashbrown::HashMap; +use kanidm_proto::v1::{ConsistencyError, OperationError}; + use crate::entry::{Entry, EntryCommitted, EntrySealed, EntryTuple}; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::plugins::Plugin; use crate::prelude::*; use crate::value::{PartialValue, Value}; -use kanidm_proto::v1::{ConsistencyError, OperationError}; -use std::collections::BTreeSet; - -use hashbrown::HashMap; -use std::sync::Arc; lazy_static! { static ref CLASS_GROUP: PartialValue = PartialValue::new_class("group"); diff --git a/kanidmd/idm/src/plugins/mod.rs b/kanidmd/idm/src/plugins/mod.rs index d49197430..f38597ca4 100644 --- a/kanidmd/idm/src/plugins/mod.rs +++ b/kanidmd/idm/src/plugins/mod.rs @@ -3,12 +3,13 @@ //! helps to ensure that data is always in specific known states within the //! `QueryServer` +use std::sync::Arc; + +use kanidm_proto::v1::{ConsistencyError, OperationError}; + use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntrySealed}; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::prelude::*; -use kanidm_proto::v1::{ConsistencyError, OperationError}; -use std::sync::Arc; -use tracing::trace_span; mod attrunique; mod base; @@ -118,108 +119,99 @@ macro_rules! run_verify_plugin { } impl Plugins { + #[instrument(level = "debug", name = "plugins::run_pre_create_transform", skip_all)] pub fn run_pre_create_transform( qs: &QueryServerWriteTransaction, cand: &mut Vec>, ce: &CreateEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_pre_create_transform", { - base::Base::pre_create_transform(qs, cand, ce) - .and_then(|_| password_import::PasswordImport::pre_create_transform(qs, cand, ce)) - .and_then(|_| jwskeygen::JwsKeygen::pre_create_transform(qs, cand, ce)) - .and_then(|_| gidnumber::GidNumber::pre_create_transform(qs, cand, ce)) - .and_then(|_| domain::Domain::pre_create_transform(qs, cand, ce)) - .and_then(|_| spn::Spn::pre_create_transform(qs, cand, ce)) - // Should always be last - .and_then(|_| attrunique::AttrUnique::pre_create_transform(qs, cand, ce)) - }) + base::Base::pre_create_transform(qs, cand, ce) + .and_then(|_| password_import::PasswordImport::pre_create_transform(qs, cand, ce)) + .and_then(|_| jwskeygen::JwsKeygen::pre_create_transform(qs, cand, ce)) + .and_then(|_| gidnumber::GidNumber::pre_create_transform(qs, cand, ce)) + .and_then(|_| domain::Domain::pre_create_transform(qs, cand, ce)) + .and_then(|_| spn::Spn::pre_create_transform(qs, cand, ce)) + // Should always be last + .and_then(|_| attrunique::AttrUnique::pre_create_transform(qs, cand, ce)) } + #[instrument(level = "debug", name = "plugins::run_pre_create", skip_all)] pub fn run_pre_create( qs: &QueryServerWriteTransaction, cand: &[Entry], ce: &CreateEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_pre_create", { - protected::Protected::pre_create(qs, cand, ce) - }) + protected::Protected::pre_create(qs, cand, ce) } + #[instrument(level = "debug", name = "plugins::run_post_create", skip_all)] pub fn run_post_create( qs: &QueryServerWriteTransaction, cand: &[Entry], ce: &CreateEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_post_create", { - refint::ReferentialIntegrity::post_create(qs, cand, ce) - .and_then(|_| memberof::MemberOf::post_create(qs, cand, ce)) - }) + refint::ReferentialIntegrity::post_create(qs, cand, ce) + .and_then(|_| memberof::MemberOf::post_create(qs, cand, ce)) } + #[instrument(level = "debug", name = "plugins::run_pre_modify", skip_all)] pub fn run_pre_modify( qs: &QueryServerWriteTransaction, cand: &mut Vec>, me: &ModifyEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_pre_modify", { - protected::Protected::pre_modify(qs, cand, me) - .and_then(|_| base::Base::pre_modify(qs, cand, me)) - .and_then(|_| password_import::PasswordImport::pre_modify(qs, cand, me)) - .and_then(|_| jwskeygen::JwsKeygen::pre_modify(qs, cand, me)) - .and_then(|_| gidnumber::GidNumber::pre_modify(qs, cand, me)) - .and_then(|_| domain::Domain::pre_modify(qs, cand, me)) - .and_then(|_| spn::Spn::pre_modify(qs, cand, me)) - // attr unique should always be last - .and_then(|_| attrunique::AttrUnique::pre_modify(qs, cand, me)) - }) + protected::Protected::pre_modify(qs, cand, me) + .and_then(|_| base::Base::pre_modify(qs, cand, me)) + .and_then(|_| password_import::PasswordImport::pre_modify(qs, cand, me)) + .and_then(|_| jwskeygen::JwsKeygen::pre_modify(qs, cand, me)) + .and_then(|_| gidnumber::GidNumber::pre_modify(qs, cand, me)) + .and_then(|_| domain::Domain::pre_modify(qs, cand, me)) + .and_then(|_| spn::Spn::pre_modify(qs, cand, me)) + // attr unique should always be last + .and_then(|_| attrunique::AttrUnique::pre_modify(qs, cand, me)) } + #[instrument(level = "debug", name = "plugins::run_post_modify", skip_all)] pub fn run_post_modify( qs: &QueryServerWriteTransaction, pre_cand: &[Arc>], cand: &[Entry], me: &ModifyEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_post_modify", { - refint::ReferentialIntegrity::post_modify(qs, pre_cand, cand, me) - .and_then(|_| spn::Spn::post_modify(qs, pre_cand, cand, me)) - .and_then(|_| memberof::MemberOf::post_modify(qs, pre_cand, cand, me)) - }) + refint::ReferentialIntegrity::post_modify(qs, pre_cand, cand, me) + .and_then(|_| spn::Spn::post_modify(qs, pre_cand, cand, me)) + .and_then(|_| memberof::MemberOf::post_modify(qs, pre_cand, cand, me)) } + #[instrument(level = "debug", name = "plugins::run_pre_delete", skip_all)] pub fn run_pre_delete( qs: &QueryServerWriteTransaction, cand: &mut Vec>, de: &DeleteEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_pre_delete", { - protected::Protected::pre_delete(qs, cand, de) - }) + protected::Protected::pre_delete(qs, cand, de) } + #[instrument(level = "debug", name = "plugins::run_post_delete", skip_all)] pub fn run_post_delete( qs: &QueryServerWriteTransaction, cand: &[Entry], de: &DeleteEvent, ) -> Result<(), OperationError> { - spanned!("plugins::run_post_delete", { - refint::ReferentialIntegrity::post_delete(qs, cand, de) - .and_then(|_| memberof::MemberOf::post_delete(qs, cand, de)) - }) + refint::ReferentialIntegrity::post_delete(qs, cand, de) + .and_then(|_| memberof::MemberOf::post_delete(qs, cand, de)) } + #[instrument(level = "debug", name = "plugins::run_verify", skip_all)] pub fn run_verify( qs: &QueryServerReadTransaction, results: &mut Vec>, ) { - let _entered = trace_span!("plugins::run_verify").entered(); - spanned!("plugins::run_verify", { - run_verify_plugin!(qs, results, base::Base); - run_verify_plugin!(qs, results, attrunique::AttrUnique); - run_verify_plugin!(qs, results, refint::ReferentialIntegrity); - run_verify_plugin!(qs, results, dyngroup::DynGroup); - run_verify_plugin!(qs, results, memberof::MemberOf); - run_verify_plugin!(qs, results, spn::Spn); - }) + run_verify_plugin!(qs, results, base::Base); + run_verify_plugin!(qs, results, attrunique::AttrUnique); + run_verify_plugin!(qs, results, refint::ReferentialIntegrity); + run_verify_plugin!(qs, results, dyngroup::DynGroup); + run_verify_plugin!(qs, results, memberof::MemberOf); + run_verify_plugin!(qs, results, spn::Spn); } } diff --git a/kanidmd/idm/src/plugins/password_import.rs b/kanidmd/idm/src/plugins/password_import.rs index cd1f31dad..50a1cd2bd 100644 --- a/kanidmd/idm/src/plugins/password_import.rs +++ b/kanidmd/idm/src/plugins/password_import.rs @@ -1,11 +1,13 @@ // Transform password import requests into proper kanidm credentials. +use std::convert::TryFrom; +use std::iter::once; + +use kanidm_proto::v1::PluginError; + use crate::credential::{Credential, Password}; use crate::event::{CreateEvent, ModifyEvent}; use crate::plugins::Plugin; use crate::prelude::*; -use kanidm_proto::v1::PluginError; -use std::convert::TryFrom; -use std::iter::once; pub struct PasswordImport {} diff --git a/kanidmd/idm/src/plugins/protected.rs b/kanidmd/idm/src/plugins/protected.rs index edddc875c..53d028346 100644 --- a/kanidmd/idm/src/plugins/protected.rs +++ b/kanidmd/idm/src/plugins/protected.rs @@ -1,12 +1,12 @@ // System protected objects. Items matching specific requirements // may only have certain modifications performed. -use crate::plugins::Plugin; -use crate::prelude::*; +use hashbrown::HashSet; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::modify::Modify; -use hashbrown::HashSet; +use crate::plugins::Plugin; +use crate::prelude::*; pub struct Protected {} diff --git a/kanidmd/idm/src/plugins/refint.rs b/kanidmd/idm/src/plugins/refint.rs index be6730de4..f3d251589 100644 --- a/kanidmd/idm/src/plugins/refint.rs +++ b/kanidmd/idm/src/plugins/refint.rs @@ -9,19 +9,19 @@ // when that is written, as they *both* manipulate and alter entry reference // data, so we should be careful not to step on each other. -use hashbrown::HashSet as Set; use std::collections::BTreeSet; +use std::sync::Arc; -use crate::plugins::Plugin; -use crate::prelude::*; +use hashbrown::HashSet as Set; +use kanidm_proto::v1::{ConsistencyError, PluginError}; +use tracing::trace; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent}; use crate::filter::f_eq; use crate::modify::Modify; +use crate::plugins::Plugin; +use crate::prelude::*; use crate::schema::SchemaTransaction; -use kanidm_proto::v1::{ConsistencyError, PluginError}; -use std::sync::Arc; -use tracing::trace; // NOTE: This *must* be after base.rs!!! @@ -265,9 +265,10 @@ impl Plugin for ReferentialIntegrity { #[cfg(test)] mod tests { - use crate::prelude::*; use kanidm_proto::v1::PluginError; + use crate::prelude::*; + // The create references a uuid that doesn't exist - reject #[test] fn test_create_uuid_reference_not_exist() { diff --git a/kanidmd/idm/src/plugins/spn.rs b/kanidmd/idm/src/plugins/spn.rs index b553503b5..70b9ad915 100644 --- a/kanidmd/idm/src/plugins/spn.rs +++ b/kanidmd/idm/src/plugins/spn.rs @@ -1,16 +1,17 @@ // Generate and manage spn's for all entries in the domain. Also deals with // the infrequent - but possible - case where a domain is renamed. -use crate::plugins::Plugin; -use crate::prelude::*; +use std::iter::once; +use std::sync::Arc; + +// use crate::value::{PartialValue, Value}; +use kanidm_proto::v1::{ConsistencyError, OperationError}; use crate::constants::UUID_DOMAIN_INFO; use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntrySealed}; use crate::event::{CreateEvent, ModifyEvent}; +use crate::plugins::Plugin; +use crate::prelude::*; use crate::value::PartialValue; -// use crate::value::{PartialValue, Value}; -use kanidm_proto::v1::{ConsistencyError, OperationError}; -use std::iter::once; -use std::sync::Arc; pub struct Spn {} diff --git a/kanidmd/idm/src/repl/cid.rs b/kanidmd/idm/src/repl/cid.rs index 503d0f053..65f675903 100644 --- a/kanidmd/idm/src/repl/cid.rs +++ b/kanidmd/idm/src/repl/cid.rs @@ -1,7 +1,8 @@ -use kanidm_proto::v1::OperationError; -use serde::{Deserialize, Serialize}; use std::fmt; use std::time::Duration; + +use kanidm_proto::v1::OperationError; +use serde::{Deserialize, Serialize}; use uuid::Uuid; #[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Eq, PartialOrd, Ord, Hash)] @@ -73,11 +74,13 @@ impl Cid { #[cfg(test)] mod tests { - use crate::repl::cid::Cid; use std::cmp::Ordering; use std::time::Duration; + use uuid::Uuid; + use crate::repl::cid::Cid; + #[test] fn test_cid_ordering() { // Check diff ts diff --git a/kanidmd/idm/src/repl/entry.rs b/kanidmd/idm/src/repl/entry.rs index e2dc5e057..4eb2e3a88 100644 --- a/kanidmd/idm/src/repl/entry.rs +++ b/kanidmd/idm/src/repl/entry.rs @@ -1,18 +1,17 @@ -use super::cid::Cid; -use crate::prelude::*; -use crate::valueset; -use kanidm_proto::v1::ConsistencyError; - -use crate::entry::{compare_attrs, Eattrs}; -use crate::schema::SchemaTransaction; - use std::collections::btree_map::Keys; use std::collections::BTreeMap; - use std::fmt; use std::ops::Bound; use std::ops::Bound::*; +use kanidm_proto::v1::ConsistencyError; + +use super::cid::Cid; +use crate::entry::{compare_attrs, Eattrs}; +use crate::prelude::*; +use crate::schema::SchemaTransaction; +use crate::valueset; + lazy_static! { static ref PVCLASS_TOMBSTONE: PartialValue = PartialValue::new_class("tombstone"); static ref PVCLASS_RECYCLED: PartialValue = PartialValue::new_class("recycled"); @@ -518,12 +517,13 @@ impl EntryChangelog { #[cfg(test)] mod tests { + use std::time::Duration; + use crate::entry::Eattrs; // use crate::prelude::*; use crate::repl::cid::Cid; use crate::repl::entry::{Change, EntryChangelog, State, Transition}; use crate::schema::{Schema, SchemaTransaction}; - use std::time::Duration; #[test] fn test_entrychangelog_basic() { diff --git a/kanidmd/idm/src/repl/ruv.rs b/kanidmd/idm/src/repl/ruv.rs index e07064ab9..f02ad3efc 100644 --- a/kanidmd/idm/src/repl/ruv.rs +++ b/kanidmd/idm/src/repl/ruv.rs @@ -1,13 +1,15 @@ -use crate::prelude::*; -use crate::repl::cid::Cid; -use concread::bptree::{BptreeMap, BptreeMapReadTxn, BptreeMapWriteTxn}; -use idlset::v2::IDLBitRange; -use kanidm_proto::v1::ConsistencyError; use std::cmp::Ordering; use std::collections::BTreeMap; use std::ops::Bound::*; use std::sync::Arc; +use concread::bptree::{BptreeMap, BptreeMapReadTxn, BptreeMapWriteTxn}; +use idlset::v2::IDLBitRange; +use kanidm_proto::v1::ConsistencyError; + +use crate::prelude::*; +use crate::repl::cid::Cid; + pub struct ReplicationUpdateVector { // This sorts by time. Should we look up by IDL or by UUID? // I think IDL, because when we need to actually do the look ups we'll need diff --git a/kanidmd/idm/src/schema.rs b/kanidmd/idm/src/schema.rs index c3924c763..8f027df5a 100644 --- a/kanidmd/idm/src/schema.rs +++ b/kanidmd/idm/src/schema.rs @@ -16,17 +16,17 @@ //! [`Attributes`]: struct.SchemaAttribute.html //! [`Classes`]: struct.SchemaClass.html +use std::collections::BTreeSet; + +use concread::cowcell::*; +use hashbrown::{HashMap, HashSet}; +use kanidm_proto::v1::{ConsistencyError, OperationError, SchemaError}; +use tracing::trace; +use uuid::Uuid; + use crate::be::IdxKey; use crate::prelude::*; use crate::valueset::ValueSet; -use kanidm_proto::v1::{ConsistencyError, OperationError, SchemaError}; -use tracing::trace; - -use hashbrown::{HashMap, HashSet}; -use std::collections::BTreeSet; -use uuid::Uuid; - -use concread::cowcell::*; // representations of schema that confines object types, classes // and attributes. This ties in deeply with "Entry". @@ -642,124 +642,124 @@ impl<'a> SchemaWriteTransaction<'a> { .collect() } + #[instrument(level = "debug", name = "schema::generate_in_memory", skip_all)] pub fn generate_in_memory(&mut self) -> Result<(), OperationError> { - spanned!("schema::generate_in_memory", { - // - self.classes.clear(); - self.attributes.clear(); - // Bootstrap in definitions of our own schema types - // First, add all the needed core attributes for schema parsing - self.attributes.insert( - AttrString::from("class"), - SchemaAttribute { - name: AttrString::from("class"), - uuid: UUID_SCHEMA_ATTR_CLASS, - description: String::from("The set of classes defining an object"), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality, IndexType::Presence], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("uuid"), - SchemaAttribute { - name: AttrString::from("uuid"), - uuid: UUID_SCHEMA_ATTR_UUID, - description: String::from("The universal unique id of the object"), - multivalue: false, - // Uniqueness is handled by base.rs, not attrunique here due to - // needing to check recycled objects too. - unique: false, - phantom: false, - index: vec![IndexType::Equality, IndexType::Presence], - syntax: SyntaxType::Uuid, - }, - ); - self.attributes.insert( - AttrString::from("last_modified_cid"), - SchemaAttribute { - name: AttrString::from("last_modified_cid"), - uuid: UUID_SCHEMA_ATTR_LAST_MOD_CID, - description: String::from("The cid of the last change to this object"), - multivalue: false, - // Uniqueness is handled by base.rs, not attrunique here due to - // needing to check recycled objects too. - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Cid, - }, - ); - self.attributes.insert( - AttrString::from("name"), - SchemaAttribute { - name: AttrString::from("name"), - uuid: UUID_SCHEMA_ATTR_NAME, - description: String::from("The shortform name of an object"), - multivalue: false, - unique: true, - phantom: false, - index: vec![IndexType::Equality, IndexType::Presence], - syntax: SyntaxType::Utf8StringIname, - }, - ); - self.attributes.insert( - AttrString::from("spn"), - SchemaAttribute { - name: AttrString::from("spn"), - uuid: UUID_SCHEMA_ATTR_SPN, - description: String::from( - "The Security Principal Name of an object, unique across all domain trusts", - ), - multivalue: false, - unique: true, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::SecurityPrincipalName, - }, - ); - self.attributes.insert( - AttrString::from("attributename"), - SchemaAttribute { - name: AttrString::from("attributename"), - uuid: UUID_SCHEMA_ATTR_ATTRIBUTENAME, - description: String::from("The name of a schema attribute"), - multivalue: false, - unique: true, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("classname"), - SchemaAttribute { - name: AttrString::from("classname"), - uuid: UUID_SCHEMA_ATTR_CLASSNAME, - description: String::from("The name of a schema class"), - multivalue: false, - unique: true, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("description"), - SchemaAttribute { - name: AttrString::from("description"), - uuid: UUID_SCHEMA_ATTR_DESCRIPTION, - description: String::from("A description of an attribute, object or class"), - multivalue: false, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Utf8String, - }, - ); - self.attributes.insert(AttrString::from("multivalue"), SchemaAttribute { + // + self.classes.clear(); + self.attributes.clear(); + // Bootstrap in definitions of our own schema types + // First, add all the needed core attributes for schema parsing + self.attributes.insert( + AttrString::from("class"), + SchemaAttribute { + name: AttrString::from("class"), + uuid: UUID_SCHEMA_ATTR_CLASS, + description: String::from("The set of classes defining an object"), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality, IndexType::Presence], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("uuid"), + SchemaAttribute { + name: AttrString::from("uuid"), + uuid: UUID_SCHEMA_ATTR_UUID, + description: String::from("The universal unique id of the object"), + multivalue: false, + // Uniqueness is handled by base.rs, not attrunique here due to + // needing to check recycled objects too. + unique: false, + phantom: false, + index: vec![IndexType::Equality, IndexType::Presence], + syntax: SyntaxType::Uuid, + }, + ); + self.attributes.insert( + AttrString::from("last_modified_cid"), + SchemaAttribute { + name: AttrString::from("last_modified_cid"), + uuid: UUID_SCHEMA_ATTR_LAST_MOD_CID, + description: String::from("The cid of the last change to this object"), + multivalue: false, + // Uniqueness is handled by base.rs, not attrunique here due to + // needing to check recycled objects too. + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Cid, + }, + ); + self.attributes.insert( + AttrString::from("name"), + SchemaAttribute { + name: AttrString::from("name"), + uuid: UUID_SCHEMA_ATTR_NAME, + description: String::from("The shortform name of an object"), + multivalue: false, + unique: true, + phantom: false, + index: vec![IndexType::Equality, IndexType::Presence], + syntax: SyntaxType::Utf8StringIname, + }, + ); + self.attributes.insert( + AttrString::from("spn"), + SchemaAttribute { + name: AttrString::from("spn"), + uuid: UUID_SCHEMA_ATTR_SPN, + description: String::from( + "The Security Principal Name of an object, unique across all domain trusts", + ), + multivalue: false, + unique: true, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::SecurityPrincipalName, + }, + ); + self.attributes.insert( + AttrString::from("attributename"), + SchemaAttribute { + name: AttrString::from("attributename"), + uuid: UUID_SCHEMA_ATTR_ATTRIBUTENAME, + description: String::from("The name of a schema attribute"), + multivalue: false, + unique: true, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("classname"), + SchemaAttribute { + name: AttrString::from("classname"), + uuid: UUID_SCHEMA_ATTR_CLASSNAME, + description: String::from("The name of a schema class"), + multivalue: false, + unique: true, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("description"), + SchemaAttribute { + name: AttrString::from("description"), + uuid: UUID_SCHEMA_ATTR_DESCRIPTION, + description: String::from("A description of an attribute, object or class"), + multivalue: false, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Utf8String, + }, + ); + self.attributes.insert(AttrString::from("multivalue"), SchemaAttribute { name: AttrString::from("multivalue"), uuid: UUID_SCHEMA_ATTR_MULTIVALUE, description: String::from("If true, this attribute is able to store multiple values rather than just a single value."), @@ -769,7 +769,7 @@ impl<'a> SchemaWriteTransaction<'a> { index: vec![], syntax: SyntaxType::Boolean, }); - self.attributes.insert(AttrString::from("phantom"), SchemaAttribute { + self.attributes.insert(AttrString::from("phantom"), SchemaAttribute { name: AttrString::from("phantom"), uuid: UUID_SCHEMA_ATTR_PHANTOM, description: String::from("If true, this attribute must NOT be present in any may/must sets of a class as. This represents generated attributes."), @@ -779,107 +779,112 @@ impl<'a> SchemaWriteTransaction<'a> { index: vec![], syntax: SyntaxType::Boolean, }); - self.attributes.insert(AttrString::from("unique"), SchemaAttribute { + self.attributes.insert( + AttrString::from("unique"), + SchemaAttribute { name: AttrString::from("unique"), uuid: UUID_SCHEMA_ATTR_UNIQUE, - description: String::from("If true, this attribute must store a unique value through out the database."), + description: String::from( + "If true, this attribute must store a unique value through out the database.", + ), multivalue: false, unique: false, phantom: false, index: vec![], syntax: SyntaxType::Boolean, - }); - self.attributes.insert( - AttrString::from("index"), - SchemaAttribute { - name: AttrString::from("index"), - uuid: UUID_SCHEMA_ATTR_INDEX, - description: String::from( - "Describe the indexes to apply to instances of this attribute.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::IndexId, - }, - ); - self.attributes.insert( - AttrString::from("syntax"), - SchemaAttribute { - name: AttrString::from("syntax"), - uuid: UUID_SCHEMA_ATTR_SYNTAX, - description: String::from( - "Describe the syntax of this attribute. This affects indexing and sorting.", - ), - multivalue: false, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::SyntaxId, - }, - ); - self.attributes.insert( - AttrString::from("systemmay"), - SchemaAttribute { - name: AttrString::from("systemmay"), - uuid: UUID_SCHEMA_ATTR_SYSTEMMAY, - description: String::from( - "A list of system provided optional attributes this class can store.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("may"), - SchemaAttribute { - name: AttrString::from("may"), - uuid: UUID_SCHEMA_ATTR_MAY, - description: String::from( - "A user modifiable list of optional attributes this class can store.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("systemmust"), - SchemaAttribute { - name: AttrString::from("systemmust"), - uuid: UUID_SCHEMA_ATTR_SYSTEMMUST, - description: String::from( - "A list of system provided required attributes this class must store.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("must"), - SchemaAttribute { - name: AttrString::from("must"), - uuid: UUID_SCHEMA_ATTR_MUST, - description: String::from( - "A user modifiable list of required attributes this class must store.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( + }, + ); + self.attributes.insert( + AttrString::from("index"), + SchemaAttribute { + name: AttrString::from("index"), + uuid: UUID_SCHEMA_ATTR_INDEX, + description: String::from( + "Describe the indexes to apply to instances of this attribute.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::IndexId, + }, + ); + self.attributes.insert( + AttrString::from("syntax"), + SchemaAttribute { + name: AttrString::from("syntax"), + uuid: UUID_SCHEMA_ATTR_SYNTAX, + description: String::from( + "Describe the syntax of this attribute. This affects indexing and sorting.", + ), + multivalue: false, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::SyntaxId, + }, + ); + self.attributes.insert( + AttrString::from("systemmay"), + SchemaAttribute { + name: AttrString::from("systemmay"), + uuid: UUID_SCHEMA_ATTR_SYSTEMMAY, + description: String::from( + "A list of system provided optional attributes this class can store.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("may"), + SchemaAttribute { + name: AttrString::from("may"), + uuid: UUID_SCHEMA_ATTR_MAY, + description: String::from( + "A user modifiable list of optional attributes this class can store.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("systemmust"), + SchemaAttribute { + name: AttrString::from("systemmust"), + uuid: UUID_SCHEMA_ATTR_SYSTEMMUST, + description: String::from( + "A list of system provided required attributes this class must store.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("must"), + SchemaAttribute { + name: AttrString::from("must"), + uuid: UUID_SCHEMA_ATTR_MUST, + description: String::from( + "A user modifiable list of required attributes this class must store.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( AttrString::from("systemsupplements"), SchemaAttribute { name: AttrString::from("systemsupplements"), @@ -894,7 +899,7 @@ impl<'a> SchemaWriteTransaction<'a> { syntax: SyntaxType::Utf8StringInsensitive, }, ); - self.attributes.insert( + self.attributes.insert( AttrString::from("supplements"), SchemaAttribute { name: AttrString::from("supplements"), @@ -909,22 +914,22 @@ impl<'a> SchemaWriteTransaction<'a> { syntax: SyntaxType::Utf8StringInsensitive, }, ); - self.attributes.insert( - AttrString::from("systemexcludes"), - SchemaAttribute { - name: AttrString::from("systemexcludes"), - uuid: UUID_SCHEMA_ATTR_SYSTEMEXCLUDES, - description: String::from( - "A set of classes that are denied presence in connection to this class", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( + self.attributes.insert( + AttrString::from("systemexcludes"), + SchemaAttribute { + name: AttrString::from("systemexcludes"), + uuid: UUID_SCHEMA_ATTR_SYSTEMEXCLUDES, + description: String::from( + "A set of classes that are denied presence in connection to this class", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( AttrString::from("excludes"), SchemaAttribute { name: AttrString::from("excludes"), @@ -940,9 +945,9 @@ impl<'a> SchemaWriteTransaction<'a> { }, ); - // SYSINFO attrs - // ACP attributes. - self.attributes.insert( + // SYSINFO attrs + // ACP attributes. + self.attributes.insert( AttrString::from("acp_enable"), SchemaAttribute { name: AttrString::from("acp_enable"), @@ -956,107 +961,111 @@ impl<'a> SchemaWriteTransaction<'a> { }, ); - self.attributes.insert( - AttrString::from("acp_receiver"), - SchemaAttribute { - name: AttrString::from("acp_receiver"), - uuid: UUID_SCHEMA_ATTR_ACP_RECEIVER, - description: String::from( - "Who the ACP applies to, constraining or allowing operations.", - ), - multivalue: false, - unique: false, - phantom: false, - index: vec![IndexType::Equality, IndexType::SubString], - syntax: SyntaxType::JsonFilter, - }, - ); - self.attributes.insert( - AttrString::from("acp_targetscope"), - SchemaAttribute { - name: AttrString::from("acp_targetscope"), - uuid: UUID_SCHEMA_ATTR_ACP_TARGETSCOPE, - description: String::from( - "The effective targets of the ACP, IE what will be acted upon.", - ), - multivalue: false, - unique: false, - phantom: false, - index: vec![IndexType::Equality, IndexType::SubString], - syntax: SyntaxType::JsonFilter, - }, - ); - self.attributes.insert( - AttrString::from("acp_search_attr"), - SchemaAttribute { - name: AttrString::from("acp_search_attr"), - uuid: UUID_SCHEMA_ATTR_ACP_SEARCH_ATTR, - description: String::from("The attributes that may be viewed or searched by the reciever on targetscope."), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("acp_create_class"), - SchemaAttribute { - name: AttrString::from("acp_create_class"), - uuid: UUID_SCHEMA_ATTR_ACP_CREATE_CLASS, - description: String::from( - "The set of classes that can be created on a new entry.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("acp_create_attr"), - SchemaAttribute { - name: AttrString::from("acp_create_attr"), - uuid: UUID_SCHEMA_ATTR_ACP_CREATE_ATTR, - description: String::from( - "The set of attribute types that can be created on an entry.", - ), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); + self.attributes.insert( + AttrString::from("acp_receiver"), + SchemaAttribute { + name: AttrString::from("acp_receiver"), + uuid: UUID_SCHEMA_ATTR_ACP_RECEIVER, + description: String::from( + "Who the ACP applies to, constraining or allowing operations.", + ), + multivalue: false, + unique: false, + phantom: false, + index: vec![IndexType::Equality, IndexType::SubString], + syntax: SyntaxType::JsonFilter, + }, + ); + self.attributes.insert( + AttrString::from("acp_targetscope"), + SchemaAttribute { + name: AttrString::from("acp_targetscope"), + uuid: UUID_SCHEMA_ATTR_ACP_TARGETSCOPE, + description: String::from( + "The effective targets of the ACP, IE what will be acted upon.", + ), + multivalue: false, + unique: false, + phantom: false, + index: vec![IndexType::Equality, IndexType::SubString], + syntax: SyntaxType::JsonFilter, + }, + ); + self.attributes.insert( + AttrString::from("acp_search_attr"), + SchemaAttribute { + name: AttrString::from("acp_search_attr"), + uuid: UUID_SCHEMA_ATTR_ACP_SEARCH_ATTR, + description: String::from( + "The attributes that may be viewed or searched by the reciever on targetscope.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("acp_create_class"), + SchemaAttribute { + name: AttrString::from("acp_create_class"), + uuid: UUID_SCHEMA_ATTR_ACP_CREATE_CLASS, + description: String::from("The set of classes that can be created on a new entry."), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("acp_create_attr"), + SchemaAttribute { + name: AttrString::from("acp_create_attr"), + uuid: UUID_SCHEMA_ATTR_ACP_CREATE_ATTR, + description: String::from( + "The set of attribute types that can be created on an entry.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); - self.attributes.insert( - AttrString::from("acp_modify_removedattr"), - SchemaAttribute { - name: AttrString::from("acp_modify_removedattr"), - uuid: UUID_SCHEMA_ATTR_ACP_MODIFY_REMOVEDATTR, - description: String::from("The set of attribute types that could be removed or purged in a modification."), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("acp_modify_presentattr"), - SchemaAttribute { - name: AttrString::from("acp_modify_presentattr"), - uuid: UUID_SCHEMA_ATTR_ACP_MODIFY_PRESENTATTR, - description: String::from("The set of attribute types that could be added or asserted in a modification."), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( + self.attributes.insert( + AttrString::from("acp_modify_removedattr"), + SchemaAttribute { + name: AttrString::from("acp_modify_removedattr"), + uuid: UUID_SCHEMA_ATTR_ACP_MODIFY_REMOVEDATTR, + description: String::from( + "The set of attribute types that could be removed or purged in a modification.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("acp_modify_presentattr"), + SchemaAttribute { + name: AttrString::from("acp_modify_presentattr"), + uuid: UUID_SCHEMA_ATTR_ACP_MODIFY_PRESENTATTR, + description: String::from( + "The set of attribute types that could be added or asserted in a modification.", + ), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( AttrString::from("acp_modify_class"), SchemaAttribute { name: AttrString::from("acp_modify_class"), @@ -1069,341 +1078,341 @@ impl<'a> SchemaWriteTransaction<'a> { syntax: SyntaxType::Utf8StringInsensitive, }, ); - // MO/Member - self.attributes.insert( - AttrString::from("memberof"), - SchemaAttribute { - name: AttrString::from("memberof"), - uuid: UUID_SCHEMA_ATTR_MEMBEROF, - description: String::from("reverse group membership of the object"), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::ReferenceUuid, - }, - ); - self.attributes.insert( - AttrString::from("directmemberof"), - SchemaAttribute { - name: AttrString::from("directmemberof"), - uuid: UUID_SCHEMA_ATTR_DIRECTMEMBEROF, - description: String::from("reverse direct group membership of the object"), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::ReferenceUuid, - }, - ); - self.attributes.insert( - AttrString::from("member"), - SchemaAttribute { - name: AttrString::from("member"), - uuid: UUID_SCHEMA_ATTR_MEMBER, - description: String::from("List of members of the group"), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::ReferenceUuid, - }, - ); - // Migration related - self.attributes.insert( - AttrString::from("version"), - SchemaAttribute { - name: AttrString::from("version"), - uuid: UUID_SCHEMA_ATTR_VERSION, - description: String::from( - "The systems internal migration version for provided objects", - ), - multivalue: false, - unique: false, - phantom: false, - index: vec![], - syntax: SyntaxType::Uint32, - }, - ); - // Domain for sysinfo - self.attributes.insert( - AttrString::from("domain"), - SchemaAttribute { - name: AttrString::from("domain"), - uuid: UUID_SCHEMA_ATTR_DOMAIN, - description: String::from("A DNS Domain name entry."), - multivalue: true, - unique: false, - phantom: false, - index: vec![IndexType::Equality], - syntax: SyntaxType::Utf8StringIname, - }, - ); - self.attributes.insert( - AttrString::from("claim"), - SchemaAttribute { - name: AttrString::from("claim"), - uuid: UUID_SCHEMA_ATTR_CLAIM, - description: String::from( - "The string identifier of an extracted claim that can be filtered", - ), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("scope"), - SchemaAttribute { - name: AttrString::from("scope"), - uuid: UUID_SCHEMA_ATTR_SCOPE, - description: String::from( - "The string identifier of a permission scope in a session", - ), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); + // MO/Member + self.attributes.insert( + AttrString::from("memberof"), + SchemaAttribute { + name: AttrString::from("memberof"), + uuid: UUID_SCHEMA_ATTR_MEMBEROF, + description: String::from("reverse group membership of the object"), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::ReferenceUuid, + }, + ); + self.attributes.insert( + AttrString::from("directmemberof"), + SchemaAttribute { + name: AttrString::from("directmemberof"), + uuid: UUID_SCHEMA_ATTR_DIRECTMEMBEROF, + description: String::from("reverse direct group membership of the object"), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::ReferenceUuid, + }, + ); + self.attributes.insert( + AttrString::from("member"), + SchemaAttribute { + name: AttrString::from("member"), + uuid: UUID_SCHEMA_ATTR_MEMBER, + description: String::from("List of members of the group"), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::ReferenceUuid, + }, + ); + // Migration related + self.attributes.insert( + AttrString::from("version"), + SchemaAttribute { + name: AttrString::from("version"), + uuid: UUID_SCHEMA_ATTR_VERSION, + description: String::from( + "The systems internal migration version for provided objects", + ), + multivalue: false, + unique: false, + phantom: false, + index: vec![], + syntax: SyntaxType::Uint32, + }, + ); + // Domain for sysinfo + self.attributes.insert( + AttrString::from("domain"), + SchemaAttribute { + name: AttrString::from("domain"), + uuid: UUID_SCHEMA_ATTR_DOMAIN, + description: String::from("A DNS Domain name entry."), + multivalue: true, + unique: false, + phantom: false, + index: vec![IndexType::Equality], + syntax: SyntaxType::Utf8StringIname, + }, + ); + self.attributes.insert( + AttrString::from("claim"), + SchemaAttribute { + name: AttrString::from("claim"), + uuid: UUID_SCHEMA_ATTR_CLAIM, + description: String::from( + "The string identifier of an extracted claim that can be filtered", + ), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("scope"), + SchemaAttribute { + name: AttrString::from("scope"), + uuid: UUID_SCHEMA_ATTR_SCOPE, + description: String::from( + "The string identifier of a permission scope in a session", + ), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); - self.attributes.insert( - AttrString::from("password_import"), - SchemaAttribute { - name: AttrString::from("password_import"), - uuid: UUID_SCHEMA_ATTR_PASSWORD_IMPORT, - description: String::from("An imported password hash from an external system."), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8String, - }, - ); + self.attributes.insert( + AttrString::from("password_import"), + SchemaAttribute { + name: AttrString::from("password_import"), + uuid: UUID_SCHEMA_ATTR_PASSWORD_IMPORT, + description: String::from("An imported password hash from an external system."), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8String, + }, + ); - // LDAP Masking Phantoms - self.attributes.insert( - AttrString::from("dn"), - SchemaAttribute { - name: AttrString::from("dn"), - uuid: UUID_SCHEMA_ATTR_DN, - description: String::from("An LDAP Compatible DN"), - multivalue: false, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("entrydn"), - SchemaAttribute { - name: AttrString::from("entrydn"), - uuid: UUID_SCHEMA_ATTR_ENTRYDN, - description: String::from("An LDAP Compatible EntryDN"), - multivalue: false, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("entryuuid"), - SchemaAttribute { - name: AttrString::from("entryuuid"), - uuid: UUID_SCHEMA_ATTR_ENTRYUUID, - description: String::from("An LDAP Compatible entryUUID"), - multivalue: false, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Uuid, - }, - ); - self.attributes.insert( - AttrString::from("objectclass"), - SchemaAttribute { - name: AttrString::from("objectclass"), - uuid: UUID_SCHEMA_ATTR_OBJECTCLASS, - description: String::from("An LDAP Compatible objectClass"), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8StringInsensitive, - }, - ); - self.attributes.insert( - AttrString::from("cn"), - SchemaAttribute { - name: AttrString::from("cn"), - uuid: UUID_SCHEMA_ATTR_CN, - description: String::from("An LDAP Compatible objectClass"), - multivalue: false, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Utf8StringIname, - }, - ); - self.attributes.insert( - AttrString::from("keys"), - SchemaAttribute { - name: AttrString::from("keys"), - uuid: UUID_SCHEMA_ATTR_KEYS, - description: String::from("An LDAP Compatible keys (ssh)"), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::SshKey, - }, - ); - self.attributes.insert( - AttrString::from("sshpublickey"), - SchemaAttribute { - name: AttrString::from("sshpublickey"), - uuid: UUID_SCHEMA_ATTR_SSHPUBLICKEY, - description: String::from("An LDAP Compatible sshPublicKey"), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::SshKey, - }, - ); - self.attributes.insert( - AttrString::from("email"), - SchemaAttribute { - name: AttrString::from("email"), - uuid: UUID_SCHEMA_ATTR_EMAIL, - description: String::from("An LDAP Compatible email"), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::EmailAddress, - }, - ); - self.attributes.insert( - AttrString::from("emailaddress"), - SchemaAttribute { - name: AttrString::from("emailaddress"), - uuid: UUID_SCHEMA_ATTR_EMAILADDRESS, - description: String::from("An LDAP Compatible emailAddress"), - multivalue: true, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::EmailAddress, - }, - ); - self.attributes.insert( - AttrString::from("uidnumber"), - SchemaAttribute { - name: AttrString::from("uidnumber"), - uuid: UUID_SCHEMA_ATTR_UIDNUMBER, - description: String::from("An LDAP Compatible uidNumber"), - multivalue: false, - unique: false, - phantom: true, - index: vec![], - syntax: SyntaxType::Uint32, - }, - ); - // end LDAP masking phantoms + // LDAP Masking Phantoms + self.attributes.insert( + AttrString::from("dn"), + SchemaAttribute { + name: AttrString::from("dn"), + uuid: UUID_SCHEMA_ATTR_DN, + description: String::from("An LDAP Compatible DN"), + multivalue: false, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("entrydn"), + SchemaAttribute { + name: AttrString::from("entrydn"), + uuid: UUID_SCHEMA_ATTR_ENTRYDN, + description: String::from("An LDAP Compatible EntryDN"), + multivalue: false, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("entryuuid"), + SchemaAttribute { + name: AttrString::from("entryuuid"), + uuid: UUID_SCHEMA_ATTR_ENTRYUUID, + description: String::from("An LDAP Compatible entryUUID"), + multivalue: false, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Uuid, + }, + ); + self.attributes.insert( + AttrString::from("objectclass"), + SchemaAttribute { + name: AttrString::from("objectclass"), + uuid: UUID_SCHEMA_ATTR_OBJECTCLASS, + description: String::from("An LDAP Compatible objectClass"), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8StringInsensitive, + }, + ); + self.attributes.insert( + AttrString::from("cn"), + SchemaAttribute { + name: AttrString::from("cn"), + uuid: UUID_SCHEMA_ATTR_CN, + description: String::from("An LDAP Compatible objectClass"), + multivalue: false, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Utf8StringIname, + }, + ); + self.attributes.insert( + AttrString::from("keys"), + SchemaAttribute { + name: AttrString::from("keys"), + uuid: UUID_SCHEMA_ATTR_KEYS, + description: String::from("An LDAP Compatible keys (ssh)"), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::SshKey, + }, + ); + self.attributes.insert( + AttrString::from("sshpublickey"), + SchemaAttribute { + name: AttrString::from("sshpublickey"), + uuid: UUID_SCHEMA_ATTR_SSHPUBLICKEY, + description: String::from("An LDAP Compatible sshPublicKey"), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::SshKey, + }, + ); + self.attributes.insert( + AttrString::from("email"), + SchemaAttribute { + name: AttrString::from("email"), + uuid: UUID_SCHEMA_ATTR_EMAIL, + description: String::from("An LDAP Compatible email"), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::EmailAddress, + }, + ); + self.attributes.insert( + AttrString::from("emailaddress"), + SchemaAttribute { + name: AttrString::from("emailaddress"), + uuid: UUID_SCHEMA_ATTR_EMAILADDRESS, + description: String::from("An LDAP Compatible emailAddress"), + multivalue: true, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::EmailAddress, + }, + ); + self.attributes.insert( + AttrString::from("uidnumber"), + SchemaAttribute { + name: AttrString::from("uidnumber"), + uuid: UUID_SCHEMA_ATTR_UIDNUMBER, + description: String::from("An LDAP Compatible uidNumber"), + multivalue: false, + unique: false, + phantom: true, + index: vec![], + syntax: SyntaxType::Uint32, + }, + ); + // end LDAP masking phantoms - self.classes.insert( - AttrString::from("attributetype"), - SchemaClass { - name: AttrString::from("attributetype"), - uuid: UUID_SCHEMA_CLASS_ATTRIBUTETYPE, - description: String::from("Definition of a schema attribute"), - systemmay: vec![AttrString::from("phantom"), AttrString::from("index")], - systemmust: vec![ - AttrString::from("class"), - AttrString::from("attributename"), - AttrString::from("multivalue"), - AttrString::from("unique"), - AttrString::from("syntax"), - AttrString::from("description"), - ], - systemexcludes: vec![AttrString::from("classtype")], - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("classtype"), - SchemaClass { - name: AttrString::from("classtype"), - uuid: UUID_SCHEMA_CLASS_CLASSTYPE, - description: String::from("Definition of a schema classtype"), - systemmay: vec![ - AttrString::from("systemmay"), - AttrString::from("may"), - AttrString::from("systemmust"), - AttrString::from("must"), - AttrString::from("systemsupplements"), - AttrString::from("supplements"), - AttrString::from("systemexcludes"), - AttrString::from("excludes"), - ], - systemmust: vec![ - AttrString::from("class"), - AttrString::from("classname"), - AttrString::from("description"), - ], - systemexcludes: vec![AttrString::from("attributetype")], - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("object"), - SchemaClass { - name: AttrString::from("object"), - uuid: UUID_SCHEMA_CLASS_OBJECT, - description: String::from( - "A system created class that all objects must contain", - ), - systemmay: vec![AttrString::from("description")], - systemmust: vec![ - AttrString::from("class"), - AttrString::from("uuid"), - AttrString::from("last_modified_cid"), - ], - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("memberof"), - SchemaClass { - name: AttrString::from("memberof"), - uuid: UUID_SCHEMA_CLASS_MEMBEROF, - description: String::from("Class that is dynamically added to recepients of memberof or directmemberof"), - systemmay: vec![ - AttrString::from("memberof"), - AttrString::from("directmemberof") - ], - .. Default::default() - }, - ); - self.classes.insert( - AttrString::from("extensibleobject"), - SchemaClass { - name: AttrString::from("extensibleobject"), - uuid: UUID_SCHEMA_CLASS_EXTENSIBLEOBJECT, - description: String::from( - "A class type that has green hair and turns off all rules ...", - ), - ..Default::default() - }, - ); - /* These two classes are core to the entry lifecycle for recycling and tombstoning */ - self.classes.insert( + self.classes.insert( + AttrString::from("attributetype"), + SchemaClass { + name: AttrString::from("attributetype"), + uuid: UUID_SCHEMA_CLASS_ATTRIBUTETYPE, + description: String::from("Definition of a schema attribute"), + systemmay: vec![AttrString::from("phantom"), AttrString::from("index")], + systemmust: vec![ + AttrString::from("class"), + AttrString::from("attributename"), + AttrString::from("multivalue"), + AttrString::from("unique"), + AttrString::from("syntax"), + AttrString::from("description"), + ], + systemexcludes: vec![AttrString::from("classtype")], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("classtype"), + SchemaClass { + name: AttrString::from("classtype"), + uuid: UUID_SCHEMA_CLASS_CLASSTYPE, + description: String::from("Definition of a schema classtype"), + systemmay: vec![ + AttrString::from("systemmay"), + AttrString::from("may"), + AttrString::from("systemmust"), + AttrString::from("must"), + AttrString::from("systemsupplements"), + AttrString::from("supplements"), + AttrString::from("systemexcludes"), + AttrString::from("excludes"), + ], + systemmust: vec![ + AttrString::from("class"), + AttrString::from("classname"), + AttrString::from("description"), + ], + systemexcludes: vec![AttrString::from("attributetype")], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("object"), + SchemaClass { + name: AttrString::from("object"), + uuid: UUID_SCHEMA_CLASS_OBJECT, + description: String::from("A system created class that all objects must contain"), + systemmay: vec![AttrString::from("description")], + systemmust: vec![ + AttrString::from("class"), + AttrString::from("uuid"), + AttrString::from("last_modified_cid"), + ], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("memberof"), + SchemaClass { + name: AttrString::from("memberof"), + uuid: UUID_SCHEMA_CLASS_MEMBEROF, + description: String::from( + "Class that is dynamically added to recepients of memberof or directmemberof", + ), + systemmay: vec![ + AttrString::from("memberof"), + AttrString::from("directmemberof"), + ], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("extensibleobject"), + SchemaClass { + name: AttrString::from("extensibleobject"), + uuid: UUID_SCHEMA_CLASS_EXTENSIBLEOBJECT, + description: String::from( + "A class type that has green hair and turns off all rules ...", + ), + ..Default::default() + }, + ); + /* These two classes are core to the entry lifecycle for recycling and tombstoning */ + self.classes.insert( AttrString::from("recycled"), SchemaClass { name: AttrString::from("recycled"), @@ -1412,7 +1421,7 @@ impl<'a> SchemaWriteTransaction<'a> { .. Default::default() }, ); - self.classes.insert( + self.classes.insert( AttrString::from("tombstone"), SchemaClass { name: AttrString::from("tombstone"), @@ -1425,89 +1434,89 @@ impl<'a> SchemaWriteTransaction<'a> { .. Default::default() }, ); - // sysinfo - self.classes.insert( - AttrString::from("system_info"), - SchemaClass { - name: AttrString::from("system_info"), - uuid: UUID_SCHEMA_CLASS_SYSTEM_INFO, - description: String::from("System metadata object class"), - systemmust: vec![AttrString::from("version")], - ..Default::default() - }, - ); - // ACP - self.classes.insert( - AttrString::from("access_control_profile"), - SchemaClass { - name: AttrString::from("access_control_profile"), - uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_PROFILE, - description: String::from("System Access Control Profile Class"), - systemmay: vec![ - AttrString::from("acp_enable"), - AttrString::from("description"), - ], - systemmust: vec![ - AttrString::from("acp_receiver"), - AttrString::from("acp_targetscope"), - AttrString::from("name"), - ], - systemsupplements: vec![ - AttrString::from("access_control_search"), - AttrString::from("access_control_delete"), - AttrString::from("access_control_modify"), - AttrString::from("access_control_create"), - ], - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("access_control_search"), - SchemaClass { - name: AttrString::from("access_control_search"), - uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_SEARCH, - description: String::from("System Access Control Search Class"), - systemmust: vec![AttrString::from("acp_search_attr")], - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("access_control_delete"), - SchemaClass { - name: AttrString::from("access_control_delete"), - uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_DELETE, - description: String::from("System Access Control DELETE Class"), - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("access_control_modify"), - SchemaClass { - name: AttrString::from("access_control_modify"), - uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_MODIFY, - description: String::from("System Access Control Modify Class"), - systemmay: vec![ - AttrString::from("acp_modify_removedattr"), - AttrString::from("acp_modify_presentattr"), - AttrString::from("acp_modify_class"), - ], - ..Default::default() - }, - ); - self.classes.insert( - AttrString::from("access_control_create"), - SchemaClass { - name: AttrString::from("access_control_create"), - uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_CREATE, - description: String::from("System Access Control Create Class"), - systemmay: vec![ - AttrString::from("acp_create_class"), - AttrString::from("acp_create_attr"), - ], - ..Default::default() - }, - ); - self.classes.insert( + // sysinfo + self.classes.insert( + AttrString::from("system_info"), + SchemaClass { + name: AttrString::from("system_info"), + uuid: UUID_SCHEMA_CLASS_SYSTEM_INFO, + description: String::from("System metadata object class"), + systemmust: vec![AttrString::from("version")], + ..Default::default() + }, + ); + // ACP + self.classes.insert( + AttrString::from("access_control_profile"), + SchemaClass { + name: AttrString::from("access_control_profile"), + uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_PROFILE, + description: String::from("System Access Control Profile Class"), + systemmay: vec![ + AttrString::from("acp_enable"), + AttrString::from("description"), + ], + systemmust: vec![ + AttrString::from("acp_receiver"), + AttrString::from("acp_targetscope"), + AttrString::from("name"), + ], + systemsupplements: vec![ + AttrString::from("access_control_search"), + AttrString::from("access_control_delete"), + AttrString::from("access_control_modify"), + AttrString::from("access_control_create"), + ], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("access_control_search"), + SchemaClass { + name: AttrString::from("access_control_search"), + uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_SEARCH, + description: String::from("System Access Control Search Class"), + systemmust: vec![AttrString::from("acp_search_attr")], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("access_control_delete"), + SchemaClass { + name: AttrString::from("access_control_delete"), + uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_DELETE, + description: String::from("System Access Control DELETE Class"), + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("access_control_modify"), + SchemaClass { + name: AttrString::from("access_control_modify"), + uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_MODIFY, + description: String::from("System Access Control Modify Class"), + systemmay: vec![ + AttrString::from("acp_modify_removedattr"), + AttrString::from("acp_modify_presentattr"), + AttrString::from("acp_modify_class"), + ], + ..Default::default() + }, + ); + self.classes.insert( + AttrString::from("access_control_create"), + SchemaClass { + name: AttrString::from("access_control_create"), + uuid: UUID_SCHEMA_CLASS_ACCESS_CONTROL_CREATE, + description: String::from("System Access Control Create Class"), + systemmay: vec![ + AttrString::from("acp_create_class"), + AttrString::from("acp_create_attr"), + ], + ..Default::default() + }, + ); + self.classes.insert( AttrString::from("system"), SchemaClass { name: AttrString::from("system"), @@ -1517,15 +1526,14 @@ impl<'a> SchemaWriteTransaction<'a> { }, ); - let r = self.validate(); - if r.is_empty() { - admin_debug!("schema validate -> passed"); - Ok(()) - } else { - admin_error!(err = ?r, "schema validate -> errors"); - Err(OperationError::ConsistencyError(r)) - } - }) + let r = self.validate(); + if r.is_empty() { + admin_debug!("schema validate -> passed"); + Ok(()) + } else { + admin_error!(err = ?r, "schema validate -> errors"); + Err(OperationError::ConsistencyError(r)) + } } } @@ -1624,12 +1632,14 @@ impl Schema { #[cfg(test)] mod tests { - use crate::prelude::*; - use crate::schema::SchemaTransaction; - use crate::schema::{IndexType, Schema, SchemaAttribute, SchemaClass, SyntaxType}; use kanidm_proto::v1::{ConsistencyError, SchemaError}; use uuid::Uuid; + use crate::prelude::*; + use crate::schema::{ + IndexType, Schema, SchemaAttribute, SchemaClass, SchemaTransaction, SyntaxType, + }; + // use crate::proto_v1::Filter as ProtoFilter; macro_rules! validate_schema { diff --git a/kanidmd/idm/src/server.rs b/kanidmd/idm/src/server.rs index c13c34aa2..5c69bbe6f 100644 --- a/kanidmd/idm/src/server.rs +++ b/kanidmd/idm/src/server.rs @@ -3,13 +3,15 @@ // This is really only used for long lived, high level types that need clone // that otherwise can't be cloned. Think Mutex. +use std::cell::Cell; +use std::sync::Arc; +use std::time::Duration; + use async_std::task; use concread::arcache::{ARCache, ARCacheBuilder, ARCacheReadTxn}; use concread::cowcell::*; use hashbrown::{HashMap, HashSet}; -use std::cell::Cell; -use std::sync::Arc; -use std::time::Duration; +use kanidm_proto::v1::{ConsistencyError, SchemaError}; use tokio::sync::{Semaphore, SemaphorePermit}; use tracing::trace; @@ -19,7 +21,6 @@ use crate::access::{ AccessControlsWriteTransaction, }; use crate::be::{Backend, BackendReadTransaction, BackendTransaction, BackendWriteTransaction}; -use crate::prelude::*; // We use so many, we just import them all ... use crate::event::{ CreateEvent, DeleteEvent, ExistsEvent, ModifyEvent, ReviveRecycledEvent, SearchEvent, @@ -29,13 +30,13 @@ use crate::identity::IdentityId; use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid}; use crate::plugins::dyngroup::{DynGroup, DynGroupCache}; use crate::plugins::Plugins; +use crate::prelude::*; use crate::repl::cid::Cid; use crate::schema::{ Schema, SchemaAttribute, SchemaClass, SchemaReadTransaction, SchemaTransaction, SchemaWriteTransaction, }; use crate::valueset::uuid_to_proto_string; -use kanidm_proto::v1::{ConsistencyError, SchemaError}; const RESOLVE_FILTER_CACHE_MAX: usize = 4096; const RESOLVE_FILTER_CACHE_LOCAL: usize = 0; @@ -177,107 +178,103 @@ pub trait QueryServerTransaction<'a> { /// [`SearchEvent`]: ../event/struct.SearchEvent.html /// [`access`]: ../access/index.html /// [`fn search`]: trait.QueryServerTransaction.html#method.search + #[instrument(level = "debug", skip_all)] fn search_ext( &self, se: &SearchEvent, ) -> Result>, OperationError> { - spanned!("server::search_ext", { - /* - * This just wraps search, but it's for the external interface - * so as a result it also reduces the entry set's attributes at - * the end. - */ - let entries = self.search(se)?; + /* + * This just wraps search, but it's for the external interface + * so as a result it also reduces the entry set's attributes at + * the end. + */ + let entries = self.search(se)?; - let access = self.get_accesscontrols(); - access - .search_filter_entry_attributes(se, entries) - .map_err(|e| { - // Log and fail if something went wrong. - admin_error!(?e, "Failed to filter entry attributes"); - e - }) - // This now returns the reduced vec. - }) + let access = self.get_accesscontrols(); + access + .search_filter_entry_attributes(se, entries) + .map_err(|e| { + // Log and fail if something went wrong. + admin_error!(?e, "Failed to filter entry attributes"); + e + }) + // This now returns the reduced vec. } + #[instrument(level = "debug", skip_all)] fn search(&self, se: &SearchEvent) -> Result>, OperationError> { - spanned!("server::search", { - if se.ident.is_internal() { - trace!(internal_filter = ?se.filter, "search"); - } else { - security_info!(initiator = %se.ident, "search"); - admin_info!(external_filter = ?se.filter, "search"); - } + if se.ident.is_internal() { + trace!(internal_filter = ?se.filter, "search"); + } else { + security_info!(initiator = %se.ident, "search"); + admin_info!(external_filter = ?se.filter, "search"); + } - // This is an important security step because it prevents us from - // performing un-indexed searches on attr's that don't exist in the - // server. This is why ExtensibleObject can only take schema that - // exists in the server, not arbitrary attr names. - // - // This normalises and validates in a single step. - // - // NOTE: Filters are validated in event conversion. + // This is an important security step because it prevents us from + // performing un-indexed searches on attr's that don't exist in the + // server. This is why ExtensibleObject can only take schema that + // exists in the server, not arbitrary attr names. + // + // This normalises and validates in a single step. + // + // NOTE: Filters are validated in event conversion. - let resolve_filter_cache = self.get_resolve_filter_cache(); + let resolve_filter_cache = self.get_resolve_filter_cache(); - let be_txn = self.get_be_txn(); - let idxmeta = be_txn.get_idxmeta_ref(); - // Now resolve all references and indexes. - let vfr = spanned!("server::search", { - se.filter - .resolve(&se.ident, Some(idxmeta), Some(resolve_filter_cache)) - }) + let be_txn = self.get_be_txn(); + let idxmeta = be_txn.get_idxmeta_ref(); + // Now resolve all references and indexes. + let vfr = se + .filter + .resolve(&se.ident, Some(idxmeta), Some(resolve_filter_cache)) .map_err(|e| { admin_error!(?e, "search filter resolve failure"); e })?; - let lims = se.get_limits(); + let lims = se.get_limits(); - // NOTE: We currently can't build search plugins due to the inability to hand - // the QS wr/ro to the plugin trait. However, there shouldn't be a need for search - // plugins, because all data transforms should be in the write path. + // NOTE: We currently can't build search plugins due to the inability to hand + // the QS wr/ro to the plugin trait. However, there shouldn't be a need for search + // plugins, because all data transforms should be in the write path. - let res = self.get_be_txn().search(lims, &vfr).map_err(|e| { - admin_error!(?e, "backend failure"); - OperationError::Backend - })?; + let res = self.get_be_txn().search(lims, &vfr).map_err(|e| { + admin_error!(?e, "backend failure"); + OperationError::Backend + })?; - // Apply ACP before we let the plugins "have at it". - // WARNING; for external searches this is NOT the only - // ACP application. There is a second application to reduce the - // attribute set on the entries! - // - let access = self.get_accesscontrols(); - access.search_filter_entries(se, res).map_err(|e| { - admin_error!(?e, "Unable to access filter entries"); - e - }) + // Apply ACP before we let the plugins "have at it". + // WARNING; for external searches this is NOT the only + // ACP application. There is a second application to reduce the + // attribute set on the entries! + // + let access = self.get_accesscontrols(); + access.search_filter_entries(se, res).map_err(|e| { + admin_error!(?e, "Unable to access filter entries"); + e }) } + #[instrument(level = "debug", skip_all)] fn exists(&self, ee: &ExistsEvent) -> Result { - spanned!("server::exists", { - let be_txn = self.get_be_txn(); - let idxmeta = be_txn.get_idxmeta_ref(); + let be_txn = self.get_be_txn(); + let idxmeta = be_txn.get_idxmeta_ref(); - let resolve_filter_cache = self.get_resolve_filter_cache(); + let resolve_filter_cache = self.get_resolve_filter_cache(); - let vfr = ee - .filter - .resolve(&ee.ident, Some(idxmeta), Some(resolve_filter_cache)) - .map_err(|e| { - admin_error!(?e, "Failed to resolve filter"); - e - })?; + let vfr = ee + .filter + .resolve(&ee.ident, Some(idxmeta), Some(resolve_filter_cache)) + .map_err(|e| { + admin_error!(?e, "Failed to resolve filter"); + e + })?; - let lims = ee.get_limits(); + let lims = ee.get_limits(); - self.get_be_txn().exists(lims, &vfr).map_err(|e| { - admin_error!(?e, "backend failure"); - OperationError::Backend - }) + self.get_be_txn().exists(lims, &vfr).map_err(|e| { + admin_error!(?e, "backend failure"); + OperationError::Backend }) } @@ -327,42 +324,39 @@ pub trait QueryServerTransaction<'a> { } /// From internal, generate an "exists" event and dispatch + #[instrument(level = "debug", skip_all)] fn internal_exists(&self, filter: Filter) -> Result { - spanned!("server::internal_exists", { - // Check the filter - let f_valid = filter - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - // Build an exists event - let ee = ExistsEvent::new_internal(f_valid); - // Submit it - self.exists(&ee) - }) + // Check the filter + let f_valid = filter + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + // Build an exists event + let ee = ExistsEvent::new_internal(f_valid); + // Submit it + self.exists(&ee) } + #[instrument(level = "debug", skip_all)] fn internal_search( &self, filter: Filter, ) -> Result>, OperationError> { - spanned!("server::internal_search", { - let f_valid = filter - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - let se = SearchEvent::new_internal(f_valid); - self.search(&se) - }) + let f_valid = filter + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let se = SearchEvent::new_internal(f_valid); + self.search(&se) } + #[instrument(level = "debug", skip_all)] fn impersonate_search_valid( &self, f_valid: Filter, f_intent_valid: Filter, event: &Identity, ) -> Result>, OperationError> { - spanned!("server::internal_search_valid", { - let se = SearchEvent::new_impersonate(event, f_valid, f_intent_valid); - self.search(&se) - }) + let se = SearchEvent::new_impersonate(event, f_valid, f_intent_valid); + self.search(&se) } /// Applies ACP to filter result entries. @@ -392,78 +386,73 @@ pub trait QueryServerTransaction<'a> { self.impersonate_search_valid(f_valid, f_intent_valid, event) } + #[instrument(level = "debug", skip_all)] fn impersonate_search_ext( &self, filter: Filter, filter_intent: Filter, event: &Identity, ) -> Result>, OperationError> { - spanned!("server::internal_search_ext_valid", { - let f_valid = filter - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - let f_intent_valid = filter_intent - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - self.impersonate_search_ext_valid(f_valid, f_intent_valid, event) - }) + let f_valid = filter + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let f_intent_valid = filter_intent + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + self.impersonate_search_ext_valid(f_valid, f_intent_valid, event) } /// Get a single entry by its UUID. This is used heavily for internal /// server operations, especially in login and ACP checks. + #[instrument(level = "debug", skip_all)] fn internal_search_uuid( &self, uuid: &Uuid, ) -> Result, OperationError> { - spanned!("server::internal_search_uuid", { - let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); - let f_valid = spanned!("server::internal_search_uuid", { - filter - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation) // I feel like we should log this... - })?; - let se = SearchEvent::new_internal(f_valid); + let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); + let f_valid = filter.validate(self.get_schema()).map_err(|e| { + error!(?e, "Filter Validate - SchemaViolation"); + OperationError::SchemaViolation(e) + })?; + let se = SearchEvent::new_internal(f_valid); - let mut vs = self.search(&se)?; - match vs.pop() { - Some(entry) if vs.is_empty() => Ok(entry), - _ => Err(OperationError::NoMatchingEntries), - } - }) + let mut vs = self.search(&se)?; + match vs.pop() { + Some(entry) if vs.is_empty() => Ok(entry), + _ => Err(OperationError::NoMatchingEntries), + } } + #[instrument(level = "debug", skip_all)] fn impersonate_search_ext_uuid( &self, uuid: &Uuid, event: &Identity, ) -> Result, OperationError> { - spanned!("server::internal_search_ext_uuid", { - let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid))); - let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); + let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid))); + let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); - let mut vs = self.impersonate_search_ext(filter, filter_intent, event)?; - match vs.pop() { - Some(entry) if vs.is_empty() => Ok(entry), - _ => Err(OperationError::NoMatchingEntries), - } - }) + let mut vs = self.impersonate_search_ext(filter, filter_intent, event)?; + match vs.pop() { + Some(entry) if vs.is_empty() => Ok(entry), + _ => Err(OperationError::NoMatchingEntries), + } } + #[instrument(level = "debug", skip_all)] fn impersonate_search_uuid( &self, uuid: &Uuid, event: &Identity, ) -> Result, OperationError> { - spanned!("server::internal_search_uuid", { - let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid))); - let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); + let filter_intent = filter_all!(f_eq("uuid", PartialValue::new_uuid(*uuid))); + let filter = filter!(f_eq("uuid", PartialValue::new_uuid(*uuid))); - let mut vs = self.impersonate_search(filter, filter_intent, event)?; - match vs.pop() { - Some(entry) if vs.is_empty() => Ok(entry), - _ => Err(OperationError::NoMatchingEntries), - } - }) + let mut vs = self.impersonate_search(filter, filter_intent, event)?; + match vs.pop() { + Some(entry) if vs.is_empty() => Ok(entry), + _ => Err(OperationError::NoMatchingEntries), + } } /// Do a schema aware conversion from a String:String to String:Value for modification @@ -807,20 +796,18 @@ pub trait QueryServerTransaction<'a> { // This is the core of the server, as it processes the entire event // applies all parts required in order and more. impl<'a> QueryServerTransaction<'a> for QueryServerReadTransaction<'a> { + type AccessControlsTransactionType = AccessControlsReadTransaction<'a>; type BackendTransactionType = BackendReadTransaction<'a>; + type SchemaTransactionType = SchemaReadTransaction; fn get_be_txn(&self) -> &BackendReadTransaction<'a> { &self.be_txn } - type SchemaTransactionType = SchemaReadTransaction; - fn get_schema(&self) -> &SchemaReadTransaction { &self.schema } - type AccessControlsTransactionType = AccessControlsReadTransaction<'a>; - fn get_accesscontrols(&self) -> &AccessControlsReadTransaction<'a> { &self.accesscontrols } @@ -890,20 +877,18 @@ impl<'a> QueryServerReadTransaction<'a> { // the entry changelogs are consistent to their entries. let schema = self.get_schema(); - spanned!("server::verify", { - let filt_all = filter!(f_pres("class")); - let all_entries = match self.internal_search(filt_all) { - Ok(a) => a, - Err(_e) => return vec![Err(ConsistencyError::QueryServerSearchFailure)], - }; + let filt_all = filter!(f_pres("class")); + let all_entries = match self.internal_search(filt_all) { + Ok(a) => a, + Err(_e) => return vec![Err(ConsistencyError::QueryServerSearchFailure)], + }; - for e in all_entries { - e.verify(schema, &mut results) - } + for e in all_entries { + e.verify(schema, &mut results) + } - // Verify the RUV to the entry changelogs now. - self.get_be_txn().verify_ruv(&mut results); - }); + // Verify the RUV to the entry changelogs now. + self.get_be_txn().verify_ruv(&mut results); // Ok entries passed, lets move on to the content. // Most of our checks are in the plugins, so we let them @@ -918,20 +903,18 @@ impl<'a> QueryServerReadTransaction<'a> { } impl<'a> QueryServerTransaction<'a> for QueryServerWriteTransaction<'a> { + type AccessControlsTransactionType = AccessControlsWriteTransaction<'a>; type BackendTransactionType = BackendWriteTransaction<'a>; + type SchemaTransactionType = SchemaWriteTransaction<'a>; fn get_be_txn(&self) -> &BackendWriteTransaction<'a> { &self.be_txn } - type SchemaTransactionType = SchemaWriteTransaction<'a>; - fn get_schema(&self) -> &SchemaWriteTransaction<'a> { &self.schema } - type AccessControlsTransactionType = AccessControlsWriteTransaction<'a>; - fn get_accesscontrols(&self) -> &AccessControlsWriteTransaction<'a> { &self.accesscontrols } @@ -1221,859 +1204,827 @@ impl QueryServer { } impl<'a> QueryServerWriteTransaction<'a> { + #[instrument(level = "debug", skip_all)] pub fn create(&self, ce: &CreateEvent) -> Result<(), OperationError> { - spanned!("server::create", { - // The create event is a raw, read only representation of the request - // that was made to us, including information about the identity - // performing the request. - if !ce.ident.is_internal() { - security_info!(name = %ce.ident, "create initiator"); - } + // The create event is a raw, read only representation of the request + // that was made to us, including information about the identity + // performing the request. + if !ce.ident.is_internal() { + security_info!(name = %ce.ident, "create initiator"); + } - // Log the request + // Log the request - // TODO #67: Do we need limits on number of creates, or do we constraint - // based on request size in the frontend? + // TODO #67: Do we need limits on number of creates, or do we constraint + // based on request size in the frontend? - // Copy the entries to a writeable form, this involves assigning a - // change id so we can track what's happening. - let candidates: Vec> = ce.entries.clone(); + // Copy the entries to a writeable form, this involves assigning a + // change id so we can track what's happening. + let candidates: Vec> = ce.entries.clone(); - // Do we have rights to perform these creates? - // create_allow_operation - let access = self.get_accesscontrols(); - let op_allow = access - .create_allow_operation(ce, &candidates) - .map_err(|e| { - admin_error!("Failed to check create access {:?}", e); - e - })?; - if !op_allow { - return Err(OperationError::AccessDenied); - } - - // Before we assign replication metadata, we need to assert these entries - // are valid to create within the set of replication transitions. This - // means they *can not* be recycled or tombstones! - if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) { - admin_warn!("Refusing to create invalid entries that are attempting to bypass replication state machine."); - return Err(OperationError::AccessDenied); - } - - // Assign our replication metadata now, since we can proceed with this operation. - let mut candidates: Vec> = candidates - .into_iter() - .map(|e| e.assign_cid(self.cid.clone(), &self.schema)) - .collect(); - - // run any pre plugins, giving them the list of mutable candidates. - // pre-plugins are defined here in their correct order of calling! - // I have no intent to make these dynamic or configurable. - - Plugins::run_pre_create_transform(self, &mut candidates, ce).map_err(|e| { - admin_error!("Create operation failed (pre_transform plugin), {:?}", e); + // Do we have rights to perform these creates? + // create_allow_operation + let access = self.get_accesscontrols(); + let op_allow = access + .create_allow_operation(ce, &candidates) + .map_err(|e| { + admin_error!("Failed to check create access {:?}", e); e })?; + if !op_allow { + return Err(OperationError::AccessDenied); + } - // NOTE: This is how you map from Vec> to Result> - // remember, that you only get the first error and the iter terminates. + // Before we assign replication metadata, we need to assert these entries + // are valid to create within the set of replication transitions. This + // means they *can not* be recycled or tombstones! + if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) { + admin_warn!("Refusing to create invalid entries that are attempting to bypass replication state machine."); + return Err(OperationError::AccessDenied); + } - // eprintln!("{:?}", candidates); + // Assign our replication metadata now, since we can proceed with this operation. + let mut candidates: Vec> = candidates + .into_iter() + .map(|e| e.assign_cid(self.cid.clone(), &self.schema)) + .collect(); - // Now, normalise AND validate! + // run any pre plugins, giving them the list of mutable candidates. + // pre-plugins are defined here in their correct order of calling! + // I have no intent to make these dynamic or configurable. - let res: Result>, OperationError> = candidates - .into_iter() - .map(|e| { - e.validate(&self.schema) - .map_err(|e| { - admin_error!("Schema Violation in create validate {:?}", e); - OperationError::SchemaViolation(e) - }) - .map(|e| { - // Then seal the changes? - e.seal(&self.schema) - }) - }) - .collect(); + Plugins::run_pre_create_transform(self, &mut candidates, ce).map_err(|e| { + admin_error!("Create operation failed (pre_transform plugin), {:?}", e); + e + })?; - let norm_cand: Vec> = res?; + // NOTE: This is how you map from Vec> to Result> + // remember, that you only get the first error and the iter terminates. - // Run any pre-create plugins now with schema validated entries. - // This is important for normalisation of certain types IE class - // or attributes for these checks. - Plugins::run_pre_create(self, &norm_cand, ce).map_err(|e| { - admin_error!("Create operation failed (plugin), {:?}", e); - e - })?; + // eprintln!("{:?}", candidates); - // We may change from ce.entries later to something else? - let commit_cand = self.be_txn.create(&self.cid, norm_cand).map_err(|e| { - admin_error!("betxn create failure {:?}", e); - e - })?; + // Now, normalise AND validate! - // Run any post plugins + let res: Result>, OperationError> = candidates + .into_iter() + .map(|e| { + e.validate(&self.schema) + .map_err(|e| { + admin_error!("Schema Violation in create validate {:?}", e); + OperationError::SchemaViolation(e) + }) + .map(|e| { + // Then seal the changes? + e.seal(&self.schema) + }) + }) + .collect(); - Plugins::run_post_create(self, &commit_cand, ce).map_err(|e| { - admin_error!("Create operation failed (post plugin), {:?}", e); - e - })?; + let norm_cand: Vec> = res?; - // We have finished all plugs and now have a successful operation - flag if - // schema or acp requires reload. - if !self.changed_schema.get() { - self.changed_schema.set(commit_cand.iter().any(|e| { - e.attribute_equality("class", &PVCLASS_CLASSTYPE) - || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) - })) - } - if !self.changed_acp.get() { - self.changed_acp.set( - commit_cand - .iter() - .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), - ) - } - if !self.changed_oauth2.get() { - self.changed_oauth2.set( - commit_cand - .iter() - .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), - ) - } - if !self.changed_domain.get() { - self.changed_domain.set( - commit_cand - .iter() - .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), - ) - } + // Run any pre-create plugins now with schema validated entries. + // This is important for normalisation of certain types IE class + // or attributes for these checks. + Plugins::run_pre_create(self, &norm_cand, ce).map_err(|e| { + admin_error!("Create operation failed (plugin), {:?}", e); + e + })?; - let cu = self.changed_uuid.as_ptr(); - unsafe { - (*cu).extend(commit_cand.iter().map(|e| e.get_uuid())); - } - trace!( - schema_reload = ?self.changed_schema, - acp_reload = ?self.changed_acp, - oauth2_reload = ?self.changed_oauth2, - domain_reload = ?self.changed_domain, - ); + // We may change from ce.entries later to something else? + let commit_cand = self.be_txn.create(&self.cid, norm_cand).map_err(|e| { + admin_error!("betxn create failure {:?}", e); + e + })?; - // We are complete, finalise logging and return + // Run any post plugins - if ce.ident.is_internal() { - trace!("Create operation success"); - } else { - admin_info!("Create operation success"); - } - Ok(()) - }) + Plugins::run_post_create(self, &commit_cand, ce).map_err(|e| { + admin_error!("Create operation failed (post plugin), {:?}", e); + e + })?; + + // We have finished all plugs and now have a successful operation - flag if + // schema or acp requires reload. + if !self.changed_schema.get() { + self.changed_schema.set(commit_cand.iter().any(|e| { + e.attribute_equality("class", &PVCLASS_CLASSTYPE) + || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) + })) + } + if !self.changed_acp.get() { + self.changed_acp.set( + commit_cand + .iter() + .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), + ) + } + if !self.changed_oauth2.get() { + self.changed_oauth2.set( + commit_cand + .iter() + .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), + ) + } + if !self.changed_domain.get() { + self.changed_domain.set( + commit_cand + .iter() + .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), + ) + } + + let cu = self.changed_uuid.as_ptr(); + unsafe { + (*cu).extend(commit_cand.iter().map(|e| e.get_uuid())); + } + trace!( + schema_reload = ?self.changed_schema, + acp_reload = ?self.changed_acp, + oauth2_reload = ?self.changed_oauth2, + domain_reload = ?self.changed_domain, + ); + + // We are complete, finalise logging and return + + if ce.ident.is_internal() { + trace!("Create operation success"); + } else { + admin_info!("Create operation success"); + } + Ok(()) } #[allow(clippy::cognitive_complexity)] + #[instrument(level = "debug", skip_all)] pub fn delete(&self, de: &DeleteEvent) -> Result<(), OperationError> { - spanned!("server::delete", { - // Do you have access to view all the set members? Reduce based on your - // read permissions and attrs - // THIS IS PRETTY COMPLEX SEE THE DESIGN DOC - // In this case we need a search, but not INTERNAL to keep the same - // associated credentials. - // We only need to retrieve uuid though ... - if !de.ident.is_internal() { - security_info!(name = %de.ident, "delete initiator"); - } + // Do you have access to view all the set members? Reduce based on your + // read permissions and attrs + // THIS IS PRETTY COMPLEX SEE THE DESIGN DOC + // In this case we need a search, but not INTERNAL to keep the same + // associated credentials. + // We only need to retrieve uuid though ... + if !de.ident.is_internal() { + security_info!(name = %de.ident, "delete initiator"); + } - // Now, delete only what you can see - let pre_candidates = self - .impersonate_search_valid(de.filter.clone(), de.filter_orig.clone(), &de.ident) - .map_err(|e| { - admin_error!("delete: error in pre-candidate selection {:?}", e); - e - })?; - - // Apply access controls to reduce the set if required. - // delete_allow_operation - let access = self.get_accesscontrols(); - let op_allow = access - .delete_allow_operation(de, &pre_candidates) - .map_err(|e| { - admin_error!("Failed to check delete access {:?}", e); - e - })?; - if !op_allow { - return Err(OperationError::AccessDenied); - } - - // Is the candidate set empty? - if pre_candidates.is_empty() { - request_error!(filter = ?de.filter, "delete: no candidates match filter"); - return Err(OperationError::NoMatchingEntries); - }; - - if pre_candidates.iter().any(|e| e.mask_tombstone().is_none()) { - admin_warn!("Refusing to delete entries which may be an attempt to bypass replication state machine."); - return Err(OperationError::AccessDenied); - } - - let mut candidates: Vec> = pre_candidates - .iter() - // Invalidate and assign change id's - .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) - .collect(); - - trace!(?candidates, "delete: candidates"); - - // Pre delete plugs - Plugins::run_pre_delete(self, &mut candidates, de).map_err(|e| { - admin_error!("Delete operation failed (plugin), {:?}", e); + // Now, delete only what you can see + let pre_candidates = self + .impersonate_search_valid(de.filter.clone(), de.filter_orig.clone(), &de.ident) + .map_err(|e| { + admin_error!("delete: error in pre-candidate selection {:?}", e); e })?; - trace!(?candidates, "delete: now marking candidates as recycled"); + // Apply access controls to reduce the set if required. + // delete_allow_operation + let access = self.get_accesscontrols(); + let op_allow = access + .delete_allow_operation(de, &pre_candidates) + .map_err(|e| { + admin_error!("Failed to check delete access {:?}", e); + e + })?; + if !op_allow { + return Err(OperationError::AccessDenied); + } - let res: Result>, OperationError> = candidates - .into_iter() - .map(|e| { - e.to_recycled() - .validate(&self.schema) - .map_err(|e| { - admin_error!(err = ?e, "Schema Violation in delete validate"); - OperationError::SchemaViolation(e) - }) - // seal if it worked. - .map(|e| e.seal(&self.schema)) - }) - .collect(); + // Is the candidate set empty? + if pre_candidates.is_empty() { + request_error!(filter = ?de.filter, "delete: no candidates match filter"); + return Err(OperationError::NoMatchingEntries); + }; - let del_cand: Vec> = res?; + if pre_candidates.iter().any(|e| e.mask_tombstone().is_none()) { + admin_warn!("Refusing to delete entries which may be an attempt to bypass replication state machine."); + return Err(OperationError::AccessDenied); + } - self.be_txn - .modify(&self.cid, &pre_candidates, &del_cand) - .map_err(|e| { - // be_txn is dropped, ie aborted here. - admin_error!("Delete operation failed (backend), {:?}", e); - e - })?; + let mut candidates: Vec> = pre_candidates + .iter() + // Invalidate and assign change id's + .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) + .collect(); - // Post delete plugins - Plugins::run_post_delete(self, &del_cand, de).map_err(|e| { - admin_error!("Delete operation failed (plugin), {:?}", e); + trace!(?candidates, "delete: candidates"); + + // Pre delete plugs + Plugins::run_pre_delete(self, &mut candidates, de).map_err(|e| { + admin_error!("Delete operation failed (plugin), {:?}", e); + e + })?; + + trace!(?candidates, "delete: now marking candidates as recycled"); + + let res: Result>, OperationError> = candidates + .into_iter() + .map(|e| { + e.to_recycled() + .validate(&self.schema) + .map_err(|e| { + admin_error!(err = ?e, "Schema Violation in delete validate"); + OperationError::SchemaViolation(e) + }) + // seal if it worked. + .map(|e| e.seal(&self.schema)) + }) + .collect(); + + let del_cand: Vec> = res?; + + self.be_txn + .modify(&self.cid, &pre_candidates, &del_cand) + .map_err(|e| { + // be_txn is dropped, ie aborted here. + admin_error!("Delete operation failed (backend), {:?}", e); e })?; - // We have finished all plugs and now have a successful operation - flag if - // schema or acp requires reload. - if !self.changed_schema.get() { - self.changed_schema.set(del_cand.iter().any(|e| { - e.attribute_equality("class", &PVCLASS_CLASSTYPE) - || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) - })) - } - if !self.changed_acp.get() { - self.changed_acp.set( - del_cand - .iter() - .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), - ) - } - if !self.changed_oauth2.get() { - self.changed_oauth2.set( - del_cand - .iter() - .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), - ) - } - if !self.changed_domain.get() { - self.changed_domain.set( - del_cand - .iter() - .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), - ) - } + // Post delete plugins + Plugins::run_post_delete(self, &del_cand, de).map_err(|e| { + admin_error!("Delete operation failed (plugin), {:?}", e); + e + })?; - let cu = self.changed_uuid.as_ptr(); - unsafe { - (*cu).extend(del_cand.iter().map(|e| e.get_uuid())); - } + // We have finished all plugs and now have a successful operation - flag if + // schema or acp requires reload. + if !self.changed_schema.get() { + self.changed_schema.set(del_cand.iter().any(|e| { + e.attribute_equality("class", &PVCLASS_CLASSTYPE) + || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) + })) + } + if !self.changed_acp.get() { + self.changed_acp.set( + del_cand + .iter() + .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), + ) + } + if !self.changed_oauth2.get() { + self.changed_oauth2.set( + del_cand + .iter() + .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), + ) + } + if !self.changed_domain.get() { + self.changed_domain.set( + del_cand + .iter() + .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), + ) + } - trace!( - schema_reload = ?self.changed_schema, - acp_reload = ?self.changed_acp, - oauth2_reload = ?self.changed_oauth2, - domain_reload = ?self.changed_domain, - ); + let cu = self.changed_uuid.as_ptr(); + unsafe { + (*cu).extend(del_cand.iter().map(|e| e.get_uuid())); + } - // Send result - if de.ident.is_internal() { - trace!("Delete operation success"); - } else { - admin_info!("Delete operation success"); - } - Ok(()) - }) + trace!( + schema_reload = ?self.changed_schema, + acp_reload = ?self.changed_acp, + oauth2_reload = ?self.changed_oauth2, + domain_reload = ?self.changed_domain, + ); + + // Send result + if de.ident.is_internal() { + trace!("Delete operation success"); + } else { + admin_info!("Delete operation success"); + } + Ok(()) } + #[instrument(level = "debug", skip_all)] pub fn purge_tombstones(&self) -> Result<(), OperationError> { - spanned!("server::purge_tombstones", { - // purge everything that is a tombstone. - let cid = self.cid.sub_secs(CHANGELOG_MAX_AGE).map_err(|e| { - admin_error!("Unable to generate search cid {:?}", e); - e - })?; + // purge everything that is a tombstone. + let cid = self.cid.sub_secs(CHANGELOG_MAX_AGE).map_err(|e| { + admin_error!("Unable to generate search cid {:?}", e); + e + })?; - // Delete them - this is a TRUE delete, no going back now! - self.be_txn - .reap_tombstones(&cid) - .map_err(|e| { - admin_error!(err = ?e, "Tombstone purge operation failed (backend)"); - e - }) - .map(|_| { - admin_info!("Tombstone purge operation success"); - }) - }) + // Delete them - this is a TRUE delete, no going back now! + self.be_txn + .reap_tombstones(&cid) + .map_err(|e| { + admin_error!(err = ?e, "Tombstone purge operation failed (backend)"); + e + }) + .map(|_| { + admin_info!("Tombstone purge operation success"); + }) } + #[instrument(level = "debug", skip_all)] pub fn purge_recycled(&self) -> Result<(), OperationError> { - spanned!("server::purge_recycled", { - // Send everything that is recycled to tombstone - // Search all recycled - let cid = self.cid.sub_secs(RECYCLEBIN_MAX_AGE).map_err(|e| { - admin_error!(err = ?e, "Unable to generate search cid"); + // Send everything that is recycled to tombstone + // Search all recycled + let cid = self.cid.sub_secs(RECYCLEBIN_MAX_AGE).map_err(|e| { + admin_error!(err = ?e, "Unable to generate search cid"); + e + })?; + let rc = self.internal_search(filter_all!(f_and!([ + f_eq("class", PVCLASS_RECYCLED.clone()), + f_lt("last_modified_cid", PartialValue::new_cid(cid)), + ])))?; + + if rc.is_empty() { + admin_info!("No recycled present - purge operation success"); + return Ok(()); + } + + // Modify them to strip all avas except uuid + let tombstone_cand: Result, _> = rc + .iter() + .map(|e| { + e.to_tombstone(self.cid.clone()) + .validate(&self.schema) + .map_err(|e| { + admin_error!("Schema Violation in purge_recycled validate: {:?}", e); + OperationError::SchemaViolation(e) + }) + // seal if it worked. + .map(|e| e.seal(&self.schema)) + }) + .collect(); + + let tombstone_cand = tombstone_cand?; + + // Backend Modify + self.be_txn + .modify(&self.cid, &rc, &tombstone_cand) + .map_err(|e| { + admin_error!("Purge recycled operation failed (backend), {:?}", e); e - })?; - let rc = self.internal_search(filter_all!(f_and!([ - f_eq("class", PVCLASS_RECYCLED.clone()), - f_lt("last_modified_cid", PartialValue::new_cid(cid)), - ])))?; - - if rc.is_empty() { - admin_info!("No recycled present - purge operation success"); - return Ok(()); - } - - // Modify them to strip all avas except uuid - let tombstone_cand: Result, _> = rc - .iter() - .map(|e| { - e.to_tombstone(self.cid.clone()) - .validate(&self.schema) - .map_err(|e| { - admin_error!("Schema Violation in purge_recycled validate: {:?}", e); - OperationError::SchemaViolation(e) - }) - // seal if it worked. - .map(|e| e.seal(&self.schema)) - }) - .collect(); - - let tombstone_cand = tombstone_cand?; - - // Backend Modify - self.be_txn - .modify(&self.cid, &rc, &tombstone_cand) - .map_err(|e| { - admin_error!("Purge recycled operation failed (backend), {:?}", e); - e - }) - .map(|_| { - admin_info!("Purge recycled operation success"); - }) - }) + }) + .map(|_| { + admin_info!("Purge recycled operation success"); + }) } - // Should this take a revive event? + #[instrument(level = "debug", skip_all)] pub fn revive_recycled(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { - spanned!("server::revive_recycled", { - // Revive an entry to live. This is a specialised function, and draws a lot of - // inspiration from modify. - // - // Access is granted by the ability to ability to search the class=recycled - // and the ability modify + remove that class from the object. - if !re.ident.is_internal() { - security_info!(name = %re.ident, "revive initiator"); + // Revive an entry to live. This is a specialised function, and draws a lot of + // inspiration from modify. + // + // Access is granted by the ability to ability to search the class=recycled + // and the ability modify + remove that class from the object. + if !re.ident.is_internal() { + security_info!(name = %re.ident, "revive initiator"); + } + + // Get the list of pre_candidates, using impersonate search. + let pre_candidates = + self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?; + + // Is the list empty? + if pre_candidates.is_empty() { + if re.ident.is_internal() { + trace!( + "revive: no candidates match filter ... continuing {:?}", + re.filter + ); + return Ok(()); + } else { + request_error!( + "revive: no candidates match filter, failure {:?}", + re.filter + ); + return Err(OperationError::NoMatchingEntries); } + }; - // Get the list of pre_candidates, using impersonate search. - let pre_candidates = - self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?; + trace!("revive: pre_candidates -> {:?}", pre_candidates); - // Is the list empty? - if pre_candidates.is_empty() { - if re.ident.is_internal() { - trace!( - "revive: no candidates match filter ... continuing {:?}", - re.filter - ); - return Ok(()); - } else { - request_error!( - "revive: no candidates match filter, failure {:?}", - re.filter - ); - return Err(OperationError::NoMatchingEntries); - } - }; + // Check access against a "fake" modify. + let modlist = ModifyList::new_list(vec![Modify::Removed( + AttrString::from("class"), + PVCLASS_RECYCLED.clone(), + )]); - trace!("revive: pre_candidates -> {:?}", pre_candidates); + let m_valid = modlist.validate(self.get_schema()).map_err(|e| { + admin_error!("revive recycled modlist Schema Violation {:?}", e); + OperationError::SchemaViolation(e) + })?; - // Check access against a "fake" modify. - let modlist = ModifyList::new_list(vec![Modify::Removed( - AttrString::from("class"), - PVCLASS_RECYCLED.clone(), - )]); + let me = + ModifyEvent::new_impersonate(&re.ident, re.filter.clone(), re.filter.clone(), m_valid); - let m_valid = modlist.validate(self.get_schema()).map_err(|e| { - admin_error!("revive recycled modlist Schema Violation {:?}", e); - OperationError::SchemaViolation(e) - })?; - - let me = ModifyEvent::new_impersonate( - &re.ident, - re.filter.clone(), - re.filter.clone(), - m_valid, - ); - - let access = self.get_accesscontrols(); - let op_allow = access - .modify_allow_operation(&me, &pre_candidates) - .map_err(|e| { - admin_error!("Unable to check modify access {:?}", e); - e - })?; - if !op_allow { - return Err(OperationError::AccessDenied); - } - - // Are all of the entries actually recycled? - if pre_candidates.iter().all(|e| e.mask_recycled().is_some()) { - admin_warn!("Refusing to revive entries that are already live!"); - return Err(OperationError::AccessDenied); - } - - // Build the list of mods from directmo, to revive memberships. - let mut dm_mods: HashMap> = - HashMap::with_capacity(pre_candidates.len()); - - for e in &pre_candidates { - // Get this entries uuid. - let u: Uuid = e.get_uuid(); - - if let Some(riter) = e.get_ava_as_refuuid("directmemberof") { - for g_uuid in riter { - dm_mods - .entry(g_uuid) - .and_modify(|mlist| { - let m = Modify::Present( - AttrString::from("member"), - Value::new_refer_r(&u), - ); - mlist.push_mod(m); - }) - .or_insert({ - let m = Modify::Present( - AttrString::from("member"), - Value::new_refer_r(&u), - ); - ModifyList::new_list(vec![m]) - }); - } - } - } - - // clone the writeable entries. - let mut candidates: Vec> = pre_candidates - .iter() - .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) - // Mutate to apply the revive. - .map(|er| er.to_revived()) - .collect(); - - // Are they all revived? - if candidates.iter().all(|e| e.mask_recycled().is_none()) { - admin_error!("Not all candidates were correctly revived, unable to proceed"); - return Err(OperationError::InvalidEntryState); - } - - // Do we need to apply pre-mod? - // Very likely, incase domain has renamed etc. - Plugins::run_pre_modify(self, &mut candidates, &me).map_err(|e| { - admin_error!("Revive operation failed (plugin), {:?}", e); + let access = self.get_accesscontrols(); + let op_allow = access + .modify_allow_operation(&me, &pre_candidates) + .map_err(|e| { + admin_error!("Unable to check modify access {:?}", e); e })?; + if !op_allow { + return Err(OperationError::AccessDenied); + } - // Schema validate - let res: Result>, OperationError> = candidates - .into_iter() - .map(|e| { - e.validate(&self.schema) - .map_err(|e| { - admin_error!("Schema Violation {:?}", e); - OperationError::SchemaViolation(e) + // Are all of the entries actually recycled? + if pre_candidates.iter().all(|e| e.mask_recycled().is_some()) { + admin_warn!("Refusing to revive entries that are already live!"); + return Err(OperationError::AccessDenied); + } + + // Build the list of mods from directmo, to revive memberships. + let mut dm_mods: HashMap> = + HashMap::with_capacity(pre_candidates.len()); + + for e in &pre_candidates { + // Get this entries uuid. + let u: Uuid = e.get_uuid(); + + if let Some(riter) = e.get_ava_as_refuuid("directmemberof") { + for g_uuid in riter { + dm_mods + .entry(g_uuid) + .and_modify(|mlist| { + let m = + Modify::Present(AttrString::from("member"), Value::new_refer_r(&u)); + mlist.push_mod(m); }) - .map(|e| e.seal(&self.schema)) - }) - .collect(); - - let norm_cand: Vec> = res?; - - // build the mod partial - let mp = ModifyPartial { - norm_cand, - pre_candidates, - me: &me, - }; - - // Call modify_apply - self.modify_apply(mp)?; - - // If and only if that succeeds, apply the direct membership modifications - // if possible. - for (g, mods) in dm_mods { - // I think the filter/filter_all shouldn't matter here because the only - // valid direct memberships should be still valid/live references, as refint - // removes anything that was deleted even from recycled entries. - let f = filter_all!(f_eq("uuid", PartialValue::new_uuid(g))); - self.internal_modify(&f, &mods)?; - } - - Ok(()) - }) - } - - // Should this take a revive event? - pub fn revive_recycled_legacy(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { - spanned!("server::revive_recycled", { - // Revive an entry to live. This is a specialised function, and draws a lot of - // inspiration from modify. - // - // - // Access is granted by the ability to ability to search the class=recycled - // and the ability modify + remove that class from the object. - - // create the modify for access testing. - // tl;dr, remove the class=recycled - let modlist = ModifyList::new_list(vec![Modify::Removed( - AttrString::from("class"), - PVCLASS_RECYCLED.clone(), - )]); - - let m_valid = modlist.validate(self.get_schema()).map_err(|e| { - admin_error!( - "Schema Violation in revive recycled modlist validate: {:?}", - e - ); - OperationError::SchemaViolation(e) - })?; - - // Get the entries we are about to revive. - // we make a set of per-entry mod lists. A list of lists even ... - let revive_cands = - self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?; - - let mut dm_mods: HashMap> = - HashMap::with_capacity(revive_cands.len()); - - for e in revive_cands { - // Get this entries uuid. - let u: Uuid = e.get_uuid(); - - if let Some(riter) = e.get_ava_as_refuuid("directmemberof") { - for g_uuid in riter { - dm_mods - .entry(g_uuid) - .and_modify(|mlist| { - let m = Modify::Present( - AttrString::from("member"), - Value::new_refer_r(&u), - ); - mlist.push_mod(m); - }) - .or_insert({ - let m = Modify::Present( - AttrString::from("member"), - Value::new_refer_r(&u), - ); - ModifyList::new_list(vec![m]) - }); - } + .or_insert({ + let m = + Modify::Present(AttrString::from("member"), Value::new_refer_r(&u)); + ModifyList::new_list(vec![m]) + }); } } + } - // Now impersonate the modify - self.impersonate_modify_valid( - re.filter.clone(), - re.filter.clone(), - m_valid, - &re.ident, - )?; - // If and only if that succeeds, apply the direct membership modifications - // if possible. - for (g, mods) in dm_mods { - // I think the filter/filter_all shouldn't matter here because the only - // valid direct memberships should be still valid/live references. - let f = filter_all!(f_eq("uuid", PartialValue::new_uuid(g))); - self.internal_modify(&f, &mods)?; + // clone the writeable entries. + let mut candidates: Vec> = pre_candidates + .iter() + .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) + // Mutate to apply the revive. + .map(|er| er.to_revived()) + .collect(); + + // Are they all revived? + if candidates.iter().all(|e| e.mask_recycled().is_none()) { + admin_error!("Not all candidates were correctly revived, unable to proceed"); + return Err(OperationError::InvalidEntryState); + } + + // Do we need to apply pre-mod? + // Very likely, incase domain has renamed etc. + Plugins::run_pre_modify(self, &mut candidates, &me).map_err(|e| { + admin_error!("Revive operation failed (plugin), {:?}", e); + e + })?; + + // Schema validate + let res: Result>, OperationError> = candidates + .into_iter() + .map(|e| { + e.validate(&self.schema) + .map_err(|e| { + admin_error!("Schema Violation {:?}", e); + OperationError::SchemaViolation(e) + }) + .map(|e| e.seal(&self.schema)) + }) + .collect(); + + let norm_cand: Vec> = res?; + + // build the mod partial + let mp = ModifyPartial { + norm_cand, + pre_candidates, + me: &me, + }; + + // Call modify_apply + self.modify_apply(mp)?; + + // If and only if that succeeds, apply the direct membership modifications + // if possible. + for (g, mods) in dm_mods { + // I think the filter/filter_all shouldn't matter here because the only + // valid direct memberships should be still valid/live references, as refint + // removes anything that was deleted even from recycled entries. + let f = filter_all!(f_eq("uuid", PartialValue::new_uuid(g))); + self.internal_modify(&f, &mods)?; + } + + Ok(()) + } + + #[instrument(level = "debug", skip_all)] + pub fn revive_recycled_legacy(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { + // Revive an entry to live. This is a specialised function, and draws a lot of + // inspiration from modify. + // + // + // Access is granted by the ability to ability to search the class=recycled + // and the ability modify + remove that class from the object. + + // create the modify for access testing. + // tl;dr, remove the class=recycled + let modlist = ModifyList::new_list(vec![Modify::Removed( + AttrString::from("class"), + PVCLASS_RECYCLED.clone(), + )]); + + let m_valid = modlist.validate(self.get_schema()).map_err(|e| { + admin_error!( + "Schema Violation in revive recycled modlist validate: {:?}", + e + ); + OperationError::SchemaViolation(e) + })?; + + // Get the entries we are about to revive. + // we make a set of per-entry mod lists. A list of lists even ... + let revive_cands = + self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?; + + let mut dm_mods: HashMap> = + HashMap::with_capacity(revive_cands.len()); + + for e in revive_cands { + // Get this entries uuid. + let u: Uuid = e.get_uuid(); + + if let Some(riter) = e.get_ava_as_refuuid("directmemberof") { + for g_uuid in riter { + dm_mods + .entry(g_uuid) + .and_modify(|mlist| { + let m = + Modify::Present(AttrString::from("member"), Value::new_refer_r(&u)); + mlist.push_mod(m); + }) + .or_insert({ + let m = + Modify::Present(AttrString::from("member"), Value::new_refer_r(&u)); + ModifyList::new_list(vec![m]) + }); + } } - Ok(()) - }) + } + + // Now impersonate the modify + self.impersonate_modify_valid(re.filter.clone(), re.filter.clone(), m_valid, &re.ident)?; + // If and only if that succeeds, apply the direct membership modifications + // if possible. + for (g, mods) in dm_mods { + // I think the filter/filter_all shouldn't matter here because the only + // valid direct memberships should be still valid/live references. + let f = filter_all!(f_eq("uuid", PartialValue::new_uuid(g))); + self.internal_modify(&f, &mods)?; + } + Ok(()) } /// Unsafety: This is unsafe because you need to be careful about how you handle and check /// the Ok(None) case which occurs during internal operations, and that you DO NOT re-order /// and call multiple pre-applies at the same time, else you can cause DB corruption. + #[instrument(level = "debug", skip_all)] pub(crate) unsafe fn modify_pre_apply<'x>( &self, me: &'x ModifyEvent, ) -> Result>, OperationError> { - spanned!("server::modify_pre_apply", { - // Get the candidates. - // Modify applies a modlist to a filter, so we need to internal search - // then apply. - if !me.ident.is_internal() { - security_info!(name = %me.ident, "modify initiator"); - } + // Get the candidates. + // Modify applies a modlist to a filter, so we need to internal search + // then apply. + if !me.ident.is_internal() { + security_info!(name = %me.ident, "modify initiator"); + } - // Validate input. + // Validate input. - // Is the modlist non zero? - if me.modlist.is_empty() { - request_error!("modify: empty modify request"); - return Err(OperationError::EmptyRequest); - } + // Is the modlist non zero? + if me.modlist.is_empty() { + request_error!("modify: empty modify request"); + return Err(OperationError::EmptyRequest); + } - // Is the modlist valid? - // This is now done in the event transform + // Is the modlist valid? + // This is now done in the event transform - // Is the filter invalid to schema? - // This is now done in the event transform + // Is the filter invalid to schema? + // This is now done in the event transform - // This also checks access controls due to use of the impersonation. - let pre_candidates = self - .impersonate_search_valid(me.filter.clone(), me.filter_orig.clone(), &me.ident) - .map_err(|e| { - admin_error!("modify: error in pre-candidate selection {:?}", e); - e - })?; - - if pre_candidates.is_empty() { - if me.ident.is_internal() { - trace!( - "modify: no candidates match filter ... continuing {:?}", - me.filter - ); - return Ok(None); - } else { - request_error!( - "modify: no candidates match filter, failure {:?}", - me.filter - ); - return Err(OperationError::NoMatchingEntries); - } - }; - - trace!("modify: pre_candidates -> {:?}", pre_candidates); - trace!("modify: modlist -> {:?}", me.modlist); - - // Are we allowed to make the changes we want to? - // modify_allow_operation - let access = self.get_accesscontrols(); - let op_allow = access - .modify_allow_operation(me, &pre_candidates) - .map_err(|e| { - admin_error!("Unable to check modify access {:?}", e); - e - })?; - if !op_allow { - return Err(OperationError::AccessDenied); - } - - // Clone a set of writeables. - // Apply the modlist -> Remember, we have a set of origs - // and the new modified ents. - let mut candidates: Vec> = pre_candidates - .iter() - .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) - .collect(); - - candidates - .iter_mut() - .for_each(|er| er.apply_modlist(&me.modlist)); - - trace!("modify: candidates -> {:?}", candidates); - - // Did any of the candidates now become masked? - if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) { - admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine."); - return Err(OperationError::AccessDenied); - } - - // Pre mod plugins - // We should probably supply the pre-post cands here. - Plugins::run_pre_modify(self, &mut candidates, me).map_err(|e| { - admin_error!("Pre-Modify operation failed (plugin), {:?}", e); + // This also checks access controls due to use of the impersonation. + let pre_candidates = self + .impersonate_search_valid(me.filter.clone(), me.filter_orig.clone(), &me.ident) + .map_err(|e| { + admin_error!("modify: error in pre-candidate selection {:?}", e); e })?; - // NOTE: There is a potential optimisation here, where if - // candidates == pre-candidates, then we don't need to store anything - // because we effectively just did an assert. However, like all - // optimisations, this could be premature - so we for now, just - // do the CORRECT thing and recommit as we may find later we always - // want to add CSN's or other. - - let res: Result>, OperationError> = candidates - .into_iter() - .map(|entry| { - entry - .validate(&self.schema) - .map_err(|e| { - admin_error!( - "Schema Violation in validation of modify_pre_apply {:?}", - e - ); - OperationError::SchemaViolation(e) - }) - .map(|entry| entry.seal(&self.schema)) - }) - .collect(); - - let norm_cand: Vec> = res?; - - Ok(Some(ModifyPartial { - norm_cand, - pre_candidates, - me, - })) - }) - } - - pub(crate) fn modify_apply(&self, mp: ModifyPartial<'_>) -> Result<(), OperationError> { - spanned!("server::modify_apply", { - let ModifyPartial { - norm_cand, - pre_candidates, - me, - } = mp; - - // Backend Modify - self.be_txn - .modify(&self.cid, &pre_candidates, &norm_cand) - .map_err(|e| { - admin_error!("Modify operation failed (backend), {:?}", e); - e - })?; - - // Post Plugins - // - // memberOf actually wants the pre cand list and the norm_cand list to see what - // changed. Could be optimised, but this is correct still ... - Plugins::run_post_modify(self, &pre_candidates, &norm_cand, me).map_err(|e| { - admin_error!("Post-Modify operation failed (plugin), {:?}", e); - e - })?; - - // We have finished all plugs and now have a successful operation - flag if - // schema or acp requires reload. Remember, this is a modify, so we need to check - // pre and post cands. - if !self.changed_schema.get() { - self.changed_schema.set( - norm_cand - .iter() - .chain(pre_candidates.iter().map(|e| e.as_ref())) - .any(|e| { - e.attribute_equality("class", &PVCLASS_CLASSTYPE) - || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) - }), - ) - } - if !self.changed_acp.get() { - self.changed_acp.set( - norm_cand - .iter() - .chain(pre_candidates.iter().map(|e| e.as_ref())) - .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), - ) - } - if !self.changed_oauth2.get() { - self.changed_oauth2.set( - norm_cand - .iter() - .chain(pre_candidates.iter().map(|e| e.as_ref())) - .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), - ) - } - if !self.changed_domain.get() { - self.changed_domain.set( - norm_cand - .iter() - .chain(pre_candidates.iter().map(|e| e.as_ref())) - .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), - ) - } - - let cu = self.changed_uuid.as_ptr(); - unsafe { - (*cu).extend( - norm_cand - .iter() - .map(|e| e.get_uuid()) - .chain(pre_candidates.iter().map(|e| e.get_uuid())), - ); - } - - trace!( - schema_reload = ?self.changed_schema, - acp_reload = ?self.changed_acp, - oauth2_reload = ?self.changed_oauth2, - domain_reload = ?self.changed_domain, - ); - - // return + if pre_candidates.is_empty() { if me.ident.is_internal() { - trace!("Modify operation success"); + trace!( + "modify: no candidates match filter ... continuing {:?}", + me.filter + ); + return Ok(None); } else { - admin_info!("Modify operation success"); + request_error!( + "modify: no candidates match filter, failure {:?}", + me.filter + ); + return Err(OperationError::NoMatchingEntries); } - Ok(()) - }) + }; + + trace!("modify: pre_candidates -> {:?}", pre_candidates); + trace!("modify: modlist -> {:?}", me.modlist); + + // Are we allowed to make the changes we want to? + // modify_allow_operation + let access = self.get_accesscontrols(); + let op_allow = access + .modify_allow_operation(me, &pre_candidates) + .map_err(|e| { + admin_error!("Unable to check modify access {:?}", e); + e + })?; + if !op_allow { + return Err(OperationError::AccessDenied); + } + + // Clone a set of writeables. + // Apply the modlist -> Remember, we have a set of origs + // and the new modified ents. + let mut candidates: Vec> = pre_candidates + .iter() + .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) + .collect(); + + candidates + .iter_mut() + .for_each(|er| er.apply_modlist(&me.modlist)); + + trace!("modify: candidates -> {:?}", candidates); + + // Did any of the candidates now become masked? + if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) { + admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine."); + return Err(OperationError::AccessDenied); + } + + // Pre mod plugins + // We should probably supply the pre-post cands here. + Plugins::run_pre_modify(self, &mut candidates, me).map_err(|e| { + admin_error!("Pre-Modify operation failed (plugin), {:?}", e); + e + })?; + + // NOTE: There is a potential optimisation here, where if + // candidates == pre-candidates, then we don't need to store anything + // because we effectively just did an assert. However, like all + // optimisations, this could be premature - so we for now, just + // do the CORRECT thing and recommit as we may find later we always + // want to add CSN's or other. + + let res: Result>, OperationError> = candidates + .into_iter() + .map(|entry| { + entry + .validate(&self.schema) + .map_err(|e| { + admin_error!("Schema Violation in validation of modify_pre_apply {:?}", e); + OperationError::SchemaViolation(e) + }) + .map(|entry| entry.seal(&self.schema)) + }) + .collect(); + + let norm_cand: Vec> = res?; + + Ok(Some(ModifyPartial { + norm_cand, + pre_candidates, + me, + })) } + #[instrument(level = "debug", skip_all)] + pub(crate) fn modify_apply(&self, mp: ModifyPartial<'_>) -> Result<(), OperationError> { + let ModifyPartial { + norm_cand, + pre_candidates, + me, + } = mp; + + // Backend Modify + self.be_txn + .modify(&self.cid, &pre_candidates, &norm_cand) + .map_err(|e| { + admin_error!("Modify operation failed (backend), {:?}", e); + e + })?; + + // Post Plugins + // + // memberOf actually wants the pre cand list and the norm_cand list to see what + // changed. Could be optimised, but this is correct still ... + Plugins::run_post_modify(self, &pre_candidates, &norm_cand, me).map_err(|e| { + admin_error!("Post-Modify operation failed (plugin), {:?}", e); + e + })?; + + // We have finished all plugs and now have a successful operation - flag if + // schema or acp requires reload. Remember, this is a modify, so we need to check + // pre and post cands. + if !self.changed_schema.get() { + self.changed_schema.set( + norm_cand + .iter() + .chain(pre_candidates.iter().map(|e| e.as_ref())) + .any(|e| { + e.attribute_equality("class", &PVCLASS_CLASSTYPE) + || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) + }), + ) + } + if !self.changed_acp.get() { + self.changed_acp.set( + norm_cand + .iter() + .chain(pre_candidates.iter().map(|e| e.as_ref())) + .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), + ) + } + if !self.changed_oauth2.get() { + self.changed_oauth2.set( + norm_cand + .iter() + .chain(pre_candidates.iter().map(|e| e.as_ref())) + .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), + ) + } + if !self.changed_domain.get() { + self.changed_domain.set( + norm_cand + .iter() + .chain(pre_candidates.iter().map(|e| e.as_ref())) + .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), + ) + } + + let cu = self.changed_uuid.as_ptr(); + unsafe { + (*cu).extend( + norm_cand + .iter() + .map(|e| e.get_uuid()) + .chain(pre_candidates.iter().map(|e| e.get_uuid())), + ); + } + + trace!( + schema_reload = ?self.changed_schema, + acp_reload = ?self.changed_acp, + oauth2_reload = ?self.changed_oauth2, + domain_reload = ?self.changed_domain, + ); + + // return + if me.ident.is_internal() { + trace!("Modify operation success"); + } else { + admin_info!("Modify operation success"); + } + Ok(()) + } + + #[instrument(level = "debug", skip_all)] pub fn modify(&self, me: &ModifyEvent) -> Result<(), OperationError> { - spanned!("server::modify", { - let mp = unsafe { self.modify_pre_apply(me)? }; - if let Some(mp) = mp { - self.modify_apply(mp) - } else { - // No action to apply, the pre-apply said nothing to be done. - Ok(()) - } - }) + let mp = unsafe { self.modify_pre_apply(me)? }; + if let Some(mp) = mp { + self.modify_apply(mp) + } else { + // No action to apply, the pre-apply said nothing to be done. + Ok(()) + } } /// Used in conjunction with internal_batch_modify, to get a pre/post /// pair, where post is pre-configured with metadata to allow /// modificiation before submit back to internal_batch_modify + #[instrument(level = "debug", skip_all)] pub(crate) fn internal_search_writeable( &self, filter: &Filter, ) -> Result, OperationError> { - spanned!("server::internal_search_writeable", { - let f_valid = filter - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - let se = SearchEvent::new_internal(f_valid); - self.search(&se).map(|vs| { - vs.into_iter() - .map(|e| { - let writeable = e.as_ref().clone().invalidate(self.cid.clone()); - (e, writeable) - }) - .collect() - }) + let f_valid = filter + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let se = SearchEvent::new_internal(f_valid); + self.search(&se).map(|vs| { + vs.into_iter() + .map(|e| { + let writeable = e.as_ref().clone().invalidate(self.cid.clone()); + (e, writeable) + }) + .collect() }) } @@ -2083,113 +2034,112 @@ impl<'a> QueryServerWriteTransaction<'a> { /// uphold all other plugin and state rules that are important. You /// probably want modify instead. #[allow(clippy::needless_pass_by_value)] + #[instrument(level = "debug", skip_all)] pub(crate) fn internal_batch_modify( &self, pre_candidates: Vec>, candidates: Vec>, ) -> Result<(), OperationError> { - spanned!("server::internal_batch_modify", { - if pre_candidates.is_empty() && candidates.is_empty() { - // No action needed. - return Ok(()); - } + if pre_candidates.is_empty() && candidates.is_empty() { + // No action needed. + return Ok(()); + } - if pre_candidates.len() != candidates.len() { - admin_error!("internal_batch_modify - cand lengths differ"); - return Err(OperationError::InvalidRequestState); - } + if pre_candidates.len() != candidates.len() { + admin_error!("internal_batch_modify - cand lengths differ"); + return Err(OperationError::InvalidRequestState); + } - let res: Result>, OperationError> = candidates - .into_iter() - .map(|e| { - e.validate(&self.schema) - .map_err(|e| { - admin_error!( - "Schema Violation in internal_batch_modify validate: {:?}", - e - ); - OperationError::SchemaViolation(e) - }) - .map(|e| e.seal(&self.schema)) - }) - .collect(); + let res: Result>, OperationError> = candidates + .into_iter() + .map(|e| { + e.validate(&self.schema) + .map_err(|e| { + admin_error!( + "Schema Violation in internal_batch_modify validate: {:?}", + e + ); + OperationError::SchemaViolation(e) + }) + .map(|e| e.seal(&self.schema)) + }) + .collect(); - let norm_cand: Vec> = res?; + let norm_cand: Vec> = res?; - if cfg!(debug_assertions) { - pre_candidates - .iter() - .zip(norm_cand.iter()) - .try_for_each(|(pre, post)| { - if pre.get_uuid() == post.get_uuid() { - Ok(()) - } else { - admin_error!("modify - cand sets not correctly aligned"); - Err(OperationError::InvalidRequestState) - } - })?; - } - - // Backend Modify - self.be_txn - .modify(&self.cid, &pre_candidates, &norm_cand) - .map_err(|e| { - admin_error!("Modify operation failed (backend), {:?}", e); - e + if cfg!(debug_assertions) { + pre_candidates + .iter() + .zip(norm_cand.iter()) + .try_for_each(|(pre, post)| { + if pre.get_uuid() == post.get_uuid() { + Ok(()) + } else { + admin_error!("modify - cand sets not correctly aligned"); + Err(OperationError::InvalidRequestState) + } })?; + } - if !self.changed_schema.get() { - self.changed_schema.set( - norm_cand - .iter() - .chain(pre_candidates.iter().map(|e| e.as_ref())) - .any(|e| { - e.attribute_equality("class", &PVCLASS_CLASSTYPE) - || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) - }), - ) - } - if !self.changed_acp.get() { - self.changed_acp.set( - norm_cand - .iter() - .chain(pre_candidates.iter().map(|e| e.as_ref())) - .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), - ) - } - if !self.changed_oauth2.get() { - self.changed_oauth2.set( - norm_cand - .iter() - .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), - ) - } - if !self.changed_domain.get() { - self.changed_domain.set( - norm_cand - .iter() - .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), - ) - } - let cu = self.changed_uuid.as_ptr(); - unsafe { - (*cu).extend( - norm_cand - .iter() - .map(|e| e.get_uuid()) - .chain(pre_candidates.iter().map(|e| e.get_uuid())), - ); - } - trace!( - schema_reload = ?self.changed_schema, - acp_reload = ?self.changed_acp, - oauth2_reload = ?self.changed_oauth2, - domain_reload = ?self.changed_domain, + // Backend Modify + self.be_txn + .modify(&self.cid, &pre_candidates, &norm_cand) + .map_err(|e| { + admin_error!("Modify operation failed (backend), {:?}", e); + e + })?; + + if !self.changed_schema.get() { + self.changed_schema.set( + norm_cand + .iter() + .chain(pre_candidates.iter().map(|e| e.as_ref())) + .any(|e| { + e.attribute_equality("class", &PVCLASS_CLASSTYPE) + || e.attribute_equality("class", &PVCLASS_ATTRIBUTETYPE) + }), + ) + } + if !self.changed_acp.get() { + self.changed_acp.set( + norm_cand + .iter() + .chain(pre_candidates.iter().map(|e| e.as_ref())) + .any(|e| e.attribute_equality("class", &PVCLASS_ACP)), + ) + } + if !self.changed_oauth2.get() { + self.changed_oauth2.set( + norm_cand + .iter() + .any(|e| e.attribute_equality("class", &PVCLASS_OAUTH2_RS)), + ) + } + if !self.changed_domain.get() { + self.changed_domain.set( + norm_cand + .iter() + .any(|e| e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO)), + ) + } + let cu = self.changed_uuid.as_ptr(); + unsafe { + (*cu).extend( + norm_cand + .iter() + .map(|e| e.get_uuid()) + .chain(pre_candidates.iter().map(|e| e.get_uuid())), ); + } + trace!( + schema_reload = ?self.changed_schema, + acp_reload = ?self.changed_acp, + oauth2_reload = ?self.changed_oauth2, + domain_reload = ?self.changed_domain, + ); - trace!("Modify operation success"); - Ok(()) - }) + trace!("Modify operation success"); + Ok(()) } pub(crate) fn get_dyngroup_cache(&self) -> &mut DynGroupCache { @@ -2200,149 +2150,143 @@ impl<'a> QueryServerWriteTransaction<'a> { } /// Migrate 2 to 3 changes the name, domain_name types from iutf8 to iname. + #[instrument(level = "debug", skip_all)] pub fn migrate_2_to_3(&self) -> Result<(), OperationError> { - spanned!("server::migrate_2_to_3", { - admin_warn!("starting 2 to 3 migration. THIS MAY TAKE A LONG TIME!"); - // Get all entries where pres name or domain_name. INCLUDE TS + RECYCLE. + admin_warn!("starting 2 to 3 migration. THIS MAY TAKE A LONG TIME!"); + // Get all entries where pres name or domain_name. INCLUDE TS + RECYCLE. - let filt = filter_all!(f_or!([f_pres("name"), f_pres("domain_name"),])); + let filt = filter_all!(f_or!([f_pres("name"), f_pres("domain_name"),])); - let pre_candidates = self.internal_search(filt).map_err(|e| { - admin_error!(err = ?e, "migrate_2_to_3 internal search failure"); - e - })?; + let pre_candidates = self.internal_search(filt).map_err(|e| { + admin_error!(err = ?e, "migrate_2_to_3 internal search failure"); + e + })?; - // If there is nothing, we donn't need to do anything. - if pre_candidates.is_empty() { - admin_info!("migrate_2_to_3 no entries to migrate, complete"); - return Ok(()); + // If there is nothing, we donn't need to do anything. + if pre_candidates.is_empty() { + admin_info!("migrate_2_to_3 no entries to migrate, complete"); + return Ok(()); + } + + // Change the value type. + let mut candidates: Vec> = pre_candidates + .iter() + .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) + .collect(); + + candidates.iter_mut().try_for_each(|er| { + let nvs = if let Some(vs) = er.get_ava_set("name") { + vs.migrate_iutf8_iname()? + } else { + None + }; + if let Some(nvs) = nvs { + er.set_ava_set("name", nvs) } - // Change the value type. - let mut candidates: Vec> = pre_candidates - .iter() - .map(|er| er.as_ref().clone().invalidate(self.cid.clone())) - .collect(); - - candidates.iter_mut().try_for_each(|er| { - let nvs = if let Some(vs) = er.get_ava_set("name") { - vs.migrate_iutf8_iname()? - } else { - None - }; - if let Some(nvs) = nvs { - er.set_ava_set("name", nvs) - } - - let nvs = if let Some(vs) = er.get_ava_set("domain_name") { - vs.migrate_iutf8_iname()? - } else { - None - }; - if let Some(nvs) = nvs { - er.set_ava_set("domain_name", nvs) - } - - Ok(()) - })?; - - // Schema check all. - let res: Result>, SchemaError> = candidates - .into_iter() - .map(|e| e.validate(&self.schema).map(|e| e.seal(&self.schema))) - .collect(); - - let norm_cand: Vec> = match res { - Ok(v) => v, - Err(e) => { - admin_error!("migrate_2_to_3 schema error -> {:?}", e); - return Err(OperationError::SchemaViolation(e)); - } + let nvs = if let Some(vs) = er.get_ava_set("domain_name") { + vs.migrate_iutf8_iname()? + } else { + None }; + if let Some(nvs) = nvs { + er.set_ava_set("domain_name", nvs) + } - // Write them back. - self.be_txn - .modify(&self.cid, &pre_candidates, &norm_cand) - .map_err(|e| { - admin_error!("migrate_2_to_3 modification failure -> {:?}", e); - e - }) - // Complete - }) + Ok(()) + })?; + + // Schema check all. + let res: Result>, SchemaError> = candidates + .into_iter() + .map(|e| e.validate(&self.schema).map(|e| e.seal(&self.schema))) + .collect(); + + let norm_cand: Vec> = match res { + Ok(v) => v, + Err(e) => { + admin_error!("migrate_2_to_3 schema error -> {:?}", e); + return Err(OperationError::SchemaViolation(e)); + } + }; + + // Write them back. + self.be_txn + .modify(&self.cid, &pre_candidates, &norm_cand) + .map_err(|e| { + admin_error!("migrate_2_to_3 modification failure -> {:?}", e); + e + }) + // Complete } /// Migrate 3 to 4 - this triggers a regen of the domains security token /// as we previously did not have it in the entry. + #[instrument(level = "debug", skip_all)] pub fn migrate_3_to_4(&self) -> Result<(), OperationError> { - spanned!("server::migrate_3_to_4", { - admin_warn!("starting 3 to 4 migration."); - let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone())); - let modlist = ModifyList::new_purge("domain_token_key"); - self.internal_modify(&filter, &modlist) - // Complete - }) + admin_warn!("starting 3 to 4 migration."); + let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone())); + let modlist = ModifyList::new_purge("domain_token_key"); + self.internal_modify(&filter, &modlist) + // Complete } /// Migrate 4 to 5 - this triggers a regen of all oauth2 RS es256 der keys /// as we previously did not generate them on entry creation. + #[instrument(level = "debug", skip_all)] pub fn migrate_4_to_5(&self) -> Result<(), OperationError> { - spanned!("server::migrate_4_to_5", { - admin_warn!("starting 4 to 5 migration."); - let filter = filter!(f_and!([ - f_eq("class", (*PVCLASS_OAUTH2_RS).clone()), - f_andnot(f_pres("es256_private_key_der")), - ])); - let modlist = ModifyList::new_purge("es256_private_key_der"); - self.internal_modify(&filter, &modlist) - // Complete - }) + admin_warn!("starting 4 to 5 migration."); + let filter = filter!(f_and!([ + f_eq("class", (*PVCLASS_OAUTH2_RS).clone()), + f_andnot(f_pres("es256_private_key_der")), + ])); + let modlist = ModifyList::new_purge("es256_private_key_der"); + self.internal_modify(&filter, &modlist) + // Complete } /// Migrate 5 to 6 - This updates the domain info item to reset the token /// keys based on the new encryption types. + #[instrument(level = "debug", skip_all)] pub fn migrate_5_to_6(&self) -> Result<(), OperationError> { - spanned!("server::migrate_5_to_6", { - admin_warn!("starting 5 to 6 migration."); - let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone())); - let mut modlist = ModifyList::new_purge("domain_token_key"); - // We need to also push the version here so that we pass schema. - modlist.push_mod(Modify::Present( - AttrString::from("version"), - Value::Uint32(0), - )); - self.internal_modify(&filter, &modlist) - // Complete - }) + admin_warn!("starting 5 to 6 migration."); + let filter = filter!(f_eq("uuid", (*PVUUID_DOMAIN_INFO).clone())); + let mut modlist = ModifyList::new_purge("domain_token_key"); + // We need to also push the version here so that we pass schema. + modlist.push_mod(Modify::Present( + AttrString::from("version"), + Value::Uint32(0), + )); + self.internal_modify(&filter, &modlist) + // Complete } /// Migrate 6 to 7 /// /// Modify accounts that are not persons, to be service accounts so that the extension /// rules remain valid. + #[instrument(level = "debug", skip_all)] pub fn migrate_6_to_7(&self) -> Result<(), OperationError> { - spanned!("server::migrate_6_to_7", { - admin_warn!("starting 6 to 7 migration."); - let filter = filter!(f_and!([ - f_eq("class", (*PVCLASS_ACCOUNT).clone()), - f_andnot(f_eq("class", (*PVCLASS_PERSON).clone())), - ])); - let modlist = ModifyList::new_append("class", Value::new_class("service_account")); - self.internal_modify(&filter, &modlist) - // Complete - }) + admin_warn!("starting 6 to 7 migration."); + let filter = filter!(f_and!([ + f_eq("class", (*PVCLASS_ACCOUNT).clone()), + f_andnot(f_eq("class", (*PVCLASS_PERSON).clone())), + ])); + let modlist = ModifyList::new_append("class", Value::new_class("service_account")); + self.internal_modify(&filter, &modlist) + // Complete } /// Migrate 7 to 8 /// /// Touch all service accounts to trigger a regen of their es256 jws keys for api tokens + #[instrument(level = "debug", skip_all)] pub fn migrate_7_to_8(&self) -> Result<(), OperationError> { - spanned!("server::migrate_7_to_8", { - admin_warn!("starting 7 to 8 migration."); - let filter = filter!(f_eq("class", (*PVCLASS_SERVICE_ACCOUNT).clone())); - let modlist = ModifyList::new_append("class", Value::new_class("service_account")); - self.internal_modify(&filter, &modlist) - // Complete - }) + admin_warn!("starting 7 to 8 migration."); + let filter = filter!(f_eq("class", (*PVCLASS_SERVICE_ACCOUNT).clone())); + let modlist = ModifyList::new_append("class", Value::new_class("service_account")); + self.internal_modify(&filter, &modlist) + // Complete } // These are where searches and other actions are actually implemented. This @@ -2367,21 +2311,20 @@ impl<'a> QueryServerWriteTransaction<'a> { self.delete(&de) } + #[instrument(level = "debug", skip_all)] pub fn internal_modify( &self, filter: &Filter, modlist: &ModifyList, ) -> Result<(), OperationError> { - spanned!("server::internal_modify", { - let f_valid = filter - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - let m_valid = modlist - .validate(self.get_schema()) - .map_err(OperationError::SchemaViolation)?; - let me = ModifyEvent::new_internal(f_valid, m_valid); - self.modify(&me) - }) + let f_valid = filter + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let m_valid = modlist + .validate(self.get_schema()) + .map_err(OperationError::SchemaViolation)?; + let me = ModifyEvent::new_internal(f_valid, m_valid); + self.modify(&me) } pub fn impersonate_modify_valid( @@ -2461,18 +2404,17 @@ impl<'a> QueryServerWriteTransaction<'a> { } */ + #[instrument(level = "debug", skip_all)] pub fn internal_migrate_or_create_str(&self, e_str: &str) -> Result<(), OperationError> { - let res = spanned!("server::internal_migrate_or_create_str", { - Entry::from_proto_entry_str(e_str, self) - /* - .and_then(|e: Entry| { - let schema = self.get_schema(); - e.validate(schema).map_err(OperationError::SchemaViolation) - }) - */ - .and_then(|e: Entry| self.internal_migrate_or_create(e)) - }); - trace!(?res, "internal_migrate_or_create_str -> result"); + let res = Entry::from_proto_entry_str(e_str, self) + /* + .and_then(|e: Entry| { + let schema = self.get_schema(); + e.validate(schema).map_err(OperationError::SchemaViolation) + }) + */ + .and_then(|e: Entry| self.internal_migrate_or_create(e)); + trace!(?res); debug_assert!(res.is_ok()); res } @@ -3024,25 +2966,24 @@ impl<'a> QueryServerWriteTransaction<'a> { } /// Pulls the domain name from the database and updates the DomainInfo data in memory + #[instrument(level = "debug", skip_all)] fn reload_domain_info(&mut self) -> Result<(), OperationError> { - spanned!("server::reload_domain_info", { - let domain_name = self.get_db_domain_name()?; - let display_name = self.get_db_domain_display_name()?; - let mut_d_info = self.d_info.get_mut(); - if mut_d_info.d_name != domain_name { - admin_warn!( - "Using domain name from the database {} - was {} in memory", - domain_name, - mut_d_info.d_name, - ); - admin_warn!( + let domain_name = self.get_db_domain_name()?; + let display_name = self.get_db_domain_display_name()?; + let mut_d_info = self.d_info.get_mut(); + if mut_d_info.d_name != domain_name { + admin_warn!( + "Using domain name from the database {} - was {} in memory", + domain_name, + mut_d_info.d_name, + ); + admin_warn!( "If you think this is an error, see https://kanidm.github.io/kanidm/stable/administrivia.html#rename-the-domain" ); - mut_d_info.d_name = domain_name; - } - mut_d_info.d_display = display_name; - Ok(()) - }) + mut_d_info.d_name = domain_name; + } + mut_d_info.d_display = display_name; + Ok(()) } /// Initiate a domain display name change process. This isn't particularly scary @@ -3183,13 +3124,15 @@ impl<'a> QueryServerWriteTransaction<'a> { #[cfg(test)] mod tests { + use std::sync::Arc; + use std::time::Duration; + + use kanidm_proto::v1::SchemaError; + use crate::credential::policy::CryptoPolicy; use crate::credential::Credential; use crate::event::{CreateEvent, DeleteEvent, ModifyEvent, ReviveRecycledEvent, SearchEvent}; use crate::prelude::*; - use kanidm_proto::v1::SchemaError; - use std::sync::Arc; - use std::time::Duration; #[test] fn test_qs_create_user() { diff --git a/kanidmd/idm/src/status.rs b/kanidmd/idm/src/status.rs index 2f700813f..9ea46dd41 100644 --- a/kanidmd/idm/src/status.rs +++ b/kanidmd/idm/src/status.rs @@ -1,8 +1,9 @@ //! An actor that shows the servers current status and statistics. (TODO). -use crate::prelude::*; use uuid::Uuid; +use crate::prelude::*; + pub struct StatusRequestEvent { pub eventid: Uuid, } diff --git a/kanidmd/idm/src/utils.rs b/kanidmd/idm/src/utils.rs index 70922a4ff..d28ceb75c 100644 --- a/kanidmd/idm/src/utils.rs +++ b/kanidmd/idm/src/utils.rs @@ -1,26 +1,23 @@ -use hashbrown::HashSet; -use std::io::ErrorKind; -use std::path::PathBuf; -use std::time::{Duration, SystemTime}; - -use filetime::FileTime; -use touch::file as touch_file; -use uuid::{Builder, Uuid}; - -use rand::distributions::Distribution; -use rand::{thread_rng, Rng}; - #[cfg(not(target_family = "windows"))] use std::fs::Metadata; +use std::io::ErrorKind; #[cfg(target_os = "linux")] use std::os::linux::fs::MetadataExt; #[cfg(target_os = "macos")] use std::os::macos::fs::MetadataExt; +use std::path::PathBuf; +use std::time::{Duration, SystemTime}; + +use filetime::FileTime; +use hashbrown::HashSet; +use rand::distributions::Distribution; +use rand::{thread_rng, Rng}; +use touch::file as touch_file; // #[cfg(target_os = "windows")] // use std::os::windows::fs::MetadataExt; - #[cfg(target_family = "unix")] use users::{get_current_gid, get_current_uid}; +use uuid::{Builder, Uuid}; #[derive(Debug)] pub struct DistinctAlpha; @@ -188,10 +185,12 @@ pub fn file_permissions_readonly(meta: &Metadata) -> bool { #[cfg(test)] mod tests { - use crate::utils::{uuid_from_duration, uuid_to_gid_u32}; use std::time::Duration; + use uuid::Uuid; + use crate::utils::{uuid_from_duration, uuid_to_gid_u32}; + #[test] fn test_utils_uuid_from_duration() { let u1 = uuid_from_duration(Duration::from_secs(1), [0xff; 4]); diff --git a/kanidmd/idm/src/value.rs b/kanidmd/idm/src/value.rs index 883f0b398..258a64672 100644 --- a/kanidmd/idm/src/value.rs +++ b/kanidmd/idm/src/value.rs @@ -3,29 +3,26 @@ //! typed values, allows their comparison, filtering and more. It also has the code for serialising //! these into a form for the backend that can be persistent into the [`Backend`](crate::be::Backend). -use crate::be::dbentry::DbIdentSpn; -use crate::credential::Credential; -use crate::identity::IdentityId; -use crate::repl::cid::Cid; -use kanidm_proto::v1::Filter as ProtoFilter; - -use compact_jwt::JwsSigner; -use serde::{Deserialize, Serialize}; use std::collections::BTreeSet; use std::convert::TryFrom; use std::fmt; use std::str::FromStr; use std::time::Duration; + +use compact_jwt::JwsSigner; +use kanidm_proto::v1::Filter as ProtoFilter; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use sshkeys::PublicKey as SshPublicKey; use time::OffsetDateTime; use url::Url; use uuid::Uuid; +use webauthn_rs::prelude::{DeviceKey as DeviceKeyV4, Passkey as PasskeyV4}; -use sshkeys::PublicKey as SshPublicKey; - -use regex::Regex; - -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; +use crate::be::dbentry::DbIdentSpn; +use crate::credential::Credential; +use crate::identity::IdentityId; +use crate::repl::cid::Cid; lazy_static! { pub static ref SPN_RE: Regex = { diff --git a/kanidmd/idm/src/valueset/address.rs b/kanidmd/idm/src/valueset/address.rs index 7c0db4f7a..74c01c37d 100644 --- a/kanidmd/idm/src/valueset/address.rs +++ b/kanidmd/idm/src/valueset/address.rs @@ -1,12 +1,12 @@ +use std::collections::BTreeSet; + +use smolset::SmolSet; + use crate::be::dbvalue::DbValueAddressV1; use crate::prelude::*; use crate::schema::SchemaAttribute; use crate::value::Address; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; - -use std::collections::BTreeSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetAddress { diff --git a/kanidmd/idm/src/valueset/binary.rs b/kanidmd/idm/src/valueset/binary.rs index 2b2d65fec..8e31ad374 100644 --- a/kanidmd/idm/src/valueset/binary.rs +++ b/kanidmd/idm/src/valueset/binary.rs @@ -1,12 +1,12 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; - use std::collections::btree_map::Entry as BTreeEntry; use std::collections::BTreeMap; +use smolset::SmolSet; + +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, ValueSet}; + #[derive(Debug, Clone)] pub struct ValueSetPrivateBinary { set: SmolSet<[Vec; 1]>, diff --git a/kanidmd/idm/src/valueset/bool.rs b/kanidmd/idm/src/valueset/bool.rs index 3f1efda3c..b5e8ba7a5 100644 --- a/kanidmd/idm/src/valueset/bool.rs +++ b/kanidmd/idm/src/valueset/bool.rs @@ -1,8 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetBool { diff --git a/kanidmd/idm/src/valueset/cid.rs b/kanidmd/idm/src/valueset/cid.rs index a864cbede..36aedacc9 100644 --- a/kanidmd/idm/src/valueset/cid.rs +++ b/kanidmd/idm/src/valueset/cid.rs @@ -1,11 +1,10 @@ +use smolset::SmolSet; + use crate::be::dbvalue::DbCidV1; use crate::prelude::*; use crate::repl::cid::Cid; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; - -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetCid { diff --git a/kanidmd/idm/src/valueset/cred.rs b/kanidmd/idm/src/valueset/cred.rs index 366149430..a22f0b108 100644 --- a/kanidmd/idm/src/valueset/cred.rs +++ b/kanidmd/idm/src/valueset/cred.rs @@ -1,18 +1,15 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::BTreeMap; +use webauthn_rs::prelude::{DeviceKey as DeviceKeyV4, Passkey as PasskeyV4}; + use crate::be::dbvalue::{ DbValueCredV1, DbValueDeviceKeyV1, DbValueIntentTokenStateV1, DbValuePasskeyV1, }; use crate::credential::Credential; -use crate::valueset::IntentTokenState; - -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, IntentTokenState, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetCredential { diff --git a/kanidmd/idm/src/valueset/datetime.rs b/kanidmd/idm/src/valueset/datetime.rs index 8ea8ab6ee..4741fdf02 100644 --- a/kanidmd/idm/src/valueset/datetime.rs +++ b/kanidmd/idm/src/valueset/datetime.rs @@ -1,10 +1,10 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use smolset::SmolSet; use time::OffsetDateTime; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, ValueSet}; + #[derive(Debug, Clone)] pub struct ValueSetDateTime { set: SmolSet<[OffsetDateTime; 1]>, diff --git a/kanidmd/idm/src/valueset/iname.rs b/kanidmd/idm/src/valueset/iname.rs index b44308a97..a9d35b99e 100644 --- a/kanidmd/idm/src/valueset/iname.rs +++ b/kanidmd/idm/src/valueset/iname.rs @@ -1,10 +1,9 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use std::collections::BTreeSet; +use crate::prelude::*; +use crate::schema::SchemaAttribute; use crate::value::INAME_RE; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetIname { diff --git a/kanidmd/idm/src/valueset/index.rs b/kanidmd/idm/src/valueset/index.rs index 1969b0b7e..7aebcb66a 100644 --- a/kanidmd/idm/src/valueset/index.rs +++ b/kanidmd/idm/src/valueset/index.rs @@ -1,8 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetIndex { diff --git a/kanidmd/idm/src/valueset/iutf8.rs b/kanidmd/idm/src/valueset/iutf8.rs index d6b4da6d8..8fd50d56e 100644 --- a/kanidmd/idm/src/valueset/iutf8.rs +++ b/kanidmd/idm/src/valueset/iutf8.rs @@ -1,10 +1,9 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use std::collections::BTreeSet; use super::iname::ValueSetIname; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetIutf8 { diff --git a/kanidmd/idm/src/valueset/json.rs b/kanidmd/idm/src/valueset/json.rs index 6f234892f..09449f302 100644 --- a/kanidmd/idm/src/valueset/json.rs +++ b/kanidmd/idm/src/valueset/json.rs @@ -1,10 +1,10 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use kanidm_proto::v1::Filter as ProtoFilter; use smolset::SmolSet; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, ValueSet}; + #[derive(Debug, Clone)] pub struct ValueSetJsonFilter { set: SmolSet<[ProtoFilter; 1]>, diff --git a/kanidmd/idm/src/valueset/jws.rs b/kanidmd/idm/src/valueset/jws.rs index 3d85ffefd..063099050 100644 --- a/kanidmd/idm/src/valueset/jws.rs +++ b/kanidmd/idm/src/valueset/jws.rs @@ -1,10 +1,9 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; +use compact_jwt::{JwaAlg, JwsSigner}; use hashbrown::HashSet; -use compact_jwt::{JwaAlg, JwsSigner}; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetJwsKeyEs256 { diff --git a/kanidmd/idm/src/valueset/mod.rs b/kanidmd/idm/src/valueset/mod.rs index de9e054c0..65ce03f6f 100644 --- a/kanidmd/idm/src/valueset/mod.rs +++ b/kanidmd/idm/src/valueset/mod.rs @@ -1,27 +1,21 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use compact_jwt::JwsSigner; +use dyn_clone::DynClone; +use hashbrown::HashSet; +use kanidm_proto::v1::Filter as ProtoFilter; +use smolset::SmolSet; +use time::OffsetDateTime; +// use std::fmt::Debug; +use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; +use webauthn_rs::prelude::Passkey as PasskeyV4; + +use crate::be::dbvalue::DbValueSetV2; use crate::credential::Credential; use crate::prelude::*; use crate::repl::cid::Cid; use crate::schema::SchemaAttribute; - -use crate::be::dbvalue::DbValueSetV2; -use crate::value::Address; -use crate::value::IntentTokenState; -use crate::value::Session; -use compact_jwt::JwsSigner; - -use kanidm_proto::v1::Filter as ProtoFilter; - -use std::collections::{BTreeMap, BTreeSet}; - -use dyn_clone::DynClone; -use hashbrown::HashSet; -use smolset::SmolSet; -// use std::fmt::Debug; - -use webauthn_rs::prelude::DeviceKey as DeviceKeyV4; -use webauthn_rs::prelude::Passkey as PasskeyV4; - -use time::OffsetDateTime; +use crate::value::{Address, IntentTokenState, Session}; mod address; mod binary; @@ -69,8 +63,7 @@ pub use self::syntax::ValueSetSyntax; pub use self::uint32::ValueSetUint32; pub use self::url::ValueSetUrl; pub use self::utf8::ValueSetUtf8; -pub use self::uuid::ValueSetRefer; -pub use self::uuid::ValueSetUuid; +pub use self::uuid::{ValueSetRefer, ValueSetUuid}; pub type ValueSet = Box; diff --git a/kanidmd/idm/src/valueset/nsuniqueid.rs b/kanidmd/idm/src/valueset/nsuniqueid.rs index ee1e69984..b2f06d403 100644 --- a/kanidmd/idm/src/valueset/nsuniqueid.rs +++ b/kanidmd/idm/src/valueset/nsuniqueid.rs @@ -1,9 +1,9 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; use crate::value::NSUNIQUEID_RE; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetNsUniqueId { diff --git a/kanidmd/idm/src/valueset/oauth.rs b/kanidmd/idm/src/valueset/oauth.rs index 10f7f6c7c..e6af6d08c 100644 --- a/kanidmd/idm/src/valueset/oauth.rs +++ b/kanidmd/idm/src/valueset/oauth.rs @@ -1,15 +1,11 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use std::collections::BTreeSet; +use std::collections::btree_map::Entry as BTreeEntry; +use std::collections::{BTreeMap, BTreeSet}; use crate::be::dbvalue::DbValueOauthScopeMapV1; -use crate::valueset::uuid_to_proto_string; -use std::collections::btree_map::Entry as BTreeEntry; -use std::collections::BTreeMap; - +use crate::prelude::*; +use crate::schema::SchemaAttribute; use crate::value::OAUTHSCOPE_RE; +use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetOauthScope { diff --git a/kanidmd/idm/src/valueset/restricted.rs b/kanidmd/idm/src/valueset/restricted.rs index 9a12bdc18..3f680977d 100644 --- a/kanidmd/idm/src/valueset/restricted.rs +++ b/kanidmd/idm/src/valueset/restricted.rs @@ -1,8 +1,8 @@ +use std::collections::BTreeSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use std::collections::BTreeSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetRestricted { diff --git a/kanidmd/idm/src/valueset/secret.rs b/kanidmd/idm/src/valueset/secret.rs index 2482b8b32..c906eeab2 100644 --- a/kanidmd/idm/src/valueset/secret.rs +++ b/kanidmd/idm/src/valueset/secret.rs @@ -1,8 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetSecret { diff --git a/kanidmd/idm/src/valueset/session.rs b/kanidmd/idm/src/valueset/session.rs index 0cd6353c3..49a26db99 100644 --- a/kanidmd/idm/src/valueset/session.rs +++ b/kanidmd/idm/src/valueset/session.rs @@ -1,15 +1,14 @@ +use std::collections::btree_map::Entry as BTreeEntry; +use std::collections::BTreeMap; + +use time::OffsetDateTime; + use crate::be::dbvalue::{DbValueIdentityId, DbValueSession}; use crate::identity::IdentityId; use crate::prelude::*; use crate::schema::SchemaAttribute; use crate::value::Session; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use std::collections::btree_map::Entry as BTreeEntry; -use std::collections::BTreeMap; -use time::OffsetDateTime; - -use crate::valueset::uuid_to_proto_string; +use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetSession { diff --git a/kanidmd/idm/src/valueset/spn.rs b/kanidmd/idm/src/valueset/spn.rs index 489e27cd5..6b001a71b 100644 --- a/kanidmd/idm/src/valueset/spn.rs +++ b/kanidmd/idm/src/valueset/spn.rs @@ -1,9 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; - -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetSpn { diff --git a/kanidmd/idm/src/valueset/ssh.rs b/kanidmd/idm/src/valueset/ssh.rs index f9d449ce9..eab58aad8 100644 --- a/kanidmd/idm/src/valueset/ssh.rs +++ b/kanidmd/idm/src/valueset/ssh.rs @@ -1,11 +1,10 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use std::collections::btree_map::Entry as BTreeEntry; use std::collections::BTreeMap; use crate::be::dbvalue::DbValueTaggedStringV1; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetSshKey { diff --git a/kanidmd/idm/src/valueset/syntax.rs b/kanidmd/idm/src/valueset/syntax.rs index 098211442..c094db468 100644 --- a/kanidmd/idm/src/valueset/syntax.rs +++ b/kanidmd/idm/src/valueset/syntax.rs @@ -1,8 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetSyntax { diff --git a/kanidmd/idm/src/valueset/uint32.rs b/kanidmd/idm/src/valueset/uint32.rs index f978de79b..f82a31b28 100644 --- a/kanidmd/idm/src/valueset/uint32.rs +++ b/kanidmd/idm/src/valueset/uint32.rs @@ -1,8 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetUint32 { diff --git a/kanidmd/idm/src/valueset/url.rs b/kanidmd/idm/src/valueset/url.rs index ec3993edd..b011425f8 100644 --- a/kanidmd/idm/src/valueset/url.rs +++ b/kanidmd/idm/src/valueset/url.rs @@ -1,8 +1,8 @@ +use smolset::SmolSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use smolset::SmolSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetUrl { diff --git a/kanidmd/idm/src/valueset/utf8.rs b/kanidmd/idm/src/valueset/utf8.rs index 04e18c4e2..6d04ae262 100644 --- a/kanidmd/idm/src/valueset/utf8.rs +++ b/kanidmd/idm/src/valueset/utf8.rs @@ -1,8 +1,8 @@ +use std::collections::BTreeSet; + use crate::prelude::*; use crate::schema::SchemaAttribute; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; -use std::collections::BTreeSet; +use crate::valueset::{DbValueSetV2, ValueSet}; #[derive(Debug, Clone)] pub struct ValueSetUtf8 { diff --git a/kanidmd/idm/src/valueset/uuid.rs b/kanidmd/idm/src/valueset/uuid.rs index dbdcac214..bac7933a2 100644 --- a/kanidmd/idm/src/valueset/uuid.rs +++ b/kanidmd/idm/src/valueset/uuid.rs @@ -1,12 +1,11 @@ -use crate::prelude::*; -use crate::schema::SchemaAttribute; -use crate::valueset::uuid_to_proto_string; -use crate::valueset::DbValueSetV2; -use crate::valueset::ValueSet; use std::collections::BTreeSet; use smolset::SmolSet; +use crate::prelude::*; +use crate::schema::SchemaAttribute; +use crate::valueset::{uuid_to_proto_string, DbValueSetV2, ValueSet}; + #[derive(Debug, Clone)] pub struct ValueSetUuid { set: SmolSet<[Uuid; 1]>, diff --git a/kanidmd/score/Cargo.toml b/kanidmd/score/Cargo.toml index 3743627a2..ff5160d11 100644 --- a/kanidmd/score/Cargo.toml +++ b/kanidmd/score/Cargo.toml @@ -1,52 +1,52 @@ [package] name = "score" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Server Library and Binary" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -async-std = { version = "^1.12.0", features = ["tokio1"] } -async-trait = "^0.1.57" -compact_jwt = "^0.2.3" -futures-util = "^0.3.21" -http-types = "^2.12.0" -kanidm = { path = "../idm" } -kanidm_proto = { path = "../../kanidm_proto" } -ldap3_proto = "^0.2.3" -libc = "^0.2.127" -openssl = "^0.10.41" -regex = "1.5.6" -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -sketching = { path = "../../sketching" } -tide = "^0.16.0" +async-std = { workspace = true, features = ["tokio1"] } +async-trait.workspace = true +compact_jwt.workspace = true +futures-util.workspace = true +http-types.workspace = true +kanidm.workspace = true +kanidm_proto.workspace = true +ldap3_proto.workspace = true +libc.workspace = true +openssl.workspace = true +regex.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +sketching.workspace = true +tide.workspace = true # I tried including brotli and it didn't work, including "default" pulls a mime-type list from the internet on build -tide-compress = { version = "0.10.6", default-features = false, features = [ "deflate", "gzip", "regex-check" ] } -tide-openssl = "^0.1.1" -tokio = { version = "^1.21.1", features = ["net", "sync", "io-util", "macros"] } -tokio-openssl = "^0.6.3" -tokio-util = { version = "^0.7.4", features = ["codec"] } -tracing = { version = "^0.1.35", features = ["attributes"] } -uuid = { version = "^1.1.2", features = ["serde", "v4" ] } +tide-compress = { workspace = true, default-features = false, features = [ "deflate", "gzip", "regex-check" ] } +tide-openssl.workspace = true +tokio = { workspace = true, features = ["net", "sync", "io-util", "macros"] } +tokio-openssl.workspace = true +tokio-util = { workspace = true, features = ["codec"] } +tracing = { workspace = true, features = ["attributes"] } +uuid = { workspace = true, features = ["serde", "v4" ] } [build-dependencies] -profiles = { path = "../../profiles" } +profiles.workspace = true [dev-dependencies] -kanidm_client = { path = "../../kanidm_client" } -futures = "^0.3.21" +kanidm_client.workspace = true +futures.workspace = true webauthn-authenticator-rs.workspace = true -oauth2_ext = { package = "oauth2", version = "^4.1.0", default-features = false } -base64 = "^0.13.0" +oauth2_ext = { workspace = true, default-features = false } -url = { version = "^2.3.1", features = ["serde"] } -reqwest = { version = "0.11.11", features=["cookies", "json", "native-tls"] } +url = { workspace = true, features = ["serde"] } +reqwest = { workspace = true, features=["cookies", "json", "native-tls"] } diff --git a/kanidmd/score/src/https/manifest.rs b/kanidmd/score/src/https/manifest.rs index c76b348b6..d1634165d 100644 --- a/kanidmd/score/src/https/manifest.rs +++ b/kanidmd/score/src/https/manifest.rs @@ -1,7 +1,8 @@ +use serde::{Deserialize, Serialize}; + ///! Builds a Progressive Web App Manifest page. // Thanks to the webmanifest crate for a lot of this code use crate::https::{AppState, RequestExtensions}; -use serde::{Deserialize, Serialize}; /// The MIME type for `.webmanifest` files. const MIME_TYPE_MANIFEST: &str = "application/manifest+json;charset=utf-8"; diff --git a/kanidmd/score/src/https/middleware.rs b/kanidmd/score/src/https/middleware.rs index fc163768a..27c3b5114 100644 --- a/kanidmd/score/src/https/middleware.rs +++ b/kanidmd/score/src/https/middleware.rs @@ -1,6 +1,7 @@ +use regex::Regex; + ///! Custom tide middleware for Kanidm use crate::https::JavaScriptFile; -use regex::Regex; /// This is for the tide_compression middleware so that we only compress certain content types. /// diff --git a/kanidmd/score/src/https/mod.rs b/kanidmd/score/src/https/mod.rs index bc935bb03..c4a831220 100644 --- a/kanidmd/score/src/https/mod.rs +++ b/kanidmd/score/src/https/mod.rs @@ -4,11 +4,9 @@ mod oauth2; mod routemaps; mod v1; -use self::manifest::manifest; -use self::middleware::*; -use self::oauth2::*; -use self::routemaps::{RouteMap, RouteMaps}; -use self::v1::*; +use std::fs::canonicalize; +use std::path::PathBuf; +use std::str::FromStr; use compact_jwt::{Jws, JwsSigner, JwsUnverified, JwsValidator}; use kanidm::actors::v1_read::QueryServerReadV1; @@ -17,14 +15,17 @@ use kanidm::config::{ServerRole, TlsConfiguration}; use kanidm::prelude::*; use kanidm::status::StatusActor; use serde::Serialize; -use std::fs::canonicalize; -use std::path::PathBuf; -use std::str::FromStr; use tide_compress::CompressMiddleware; use tide_openssl::TlsListener; use tracing::{error, info}; use uuid::Uuid; +use self::manifest::manifest; +use self::middleware::*; +use self::oauth2::*; +use self::routemaps::{RouteMap, RouteMaps}; +use self::v1::*; + #[derive(Clone)] pub struct JavaScriptFile { // Relative to the pkg/ dir diff --git a/kanidmd/score/src/https/oauth2.rs b/kanidmd/score/src/https/oauth2.rs index 85270b4ff..e40d65524 100644 --- a/kanidmd/score/src/https/oauth2.rs +++ b/kanidmd/score/src/https/oauth2.rs @@ -1,5 +1,3 @@ -use super::v1::{json_rest_event_get, json_rest_event_post}; -use super::{to_tide_response, AppState, RequestExtensions}; use kanidm::idm::oauth2::{ AccessTokenIntrospectRequest, AccessTokenRequest, AuthorisationRequest, AuthorisePermitSuccess, AuthoriseResponse, ErrorResponse, Oauth2Error, @@ -9,6 +7,9 @@ use kanidm_proto::oauth2::AuthorisationResponse; use kanidm_proto::v1::Entry as ProtoEntry; use serde::{Deserialize, Serialize}; +use super::v1::{json_rest_event_get, json_rest_event_post}; +use super::{to_tide_response, AppState, RequestExtensions}; + // == Oauth2 Configuration Endpoints == pub async fn oauth2_get(req: tide::Request) -> tide::Result { diff --git a/kanidmd/score/src/https/routemaps.rs b/kanidmd/score/src/https/routemaps.rs index cace10f13..8befd96c5 100644 --- a/kanidmd/score/src/https/routemaps.rs +++ b/kanidmd/score/src/https/routemaps.rs @@ -1,11 +1,11 @@ -use crate::https::AppState; ///! Route-mapping magic for tide /// /// Instead of adding routes with (for example) the .post method you add them with .mapped_post, pasing an instance of [RouteMap] and it'll do the rest... -/// use serde::{Deserialize, Serialize}; use tide::{Endpoint, Route}; +use crate::https::AppState; + // Extends the tide::Route for RouteMaps, this would really be nice if it was generic :( pub trait RouteMaps { fn mapped_method( @@ -43,21 +43,27 @@ impl RouteMaps for Route<'_, AppState> { routemap.routelist.push(RouteInfo { path, method }); self.method(method, ep) } + fn mapped_delete(&mut self, routemap: &mut RouteMap, ep: impl Endpoint) -> &mut Self { self.mapped_method(routemap, http_types::Method::Delete, ep) } + fn mapped_get(&mut self, routemap: &mut RouteMap, ep: impl Endpoint) -> &mut Self { self.mapped_method(routemap, http_types::Method::Get, ep) } + fn mapped_patch(&mut self, routemap: &mut RouteMap, ep: impl Endpoint) -> &mut Self { self.mapped_method(routemap, http_types::Method::Patch, ep) } + fn mapped_post(&mut self, routemap: &mut RouteMap, ep: impl Endpoint) -> &mut Self { self.mapped_method(routemap, http_types::Method::Post, ep) } + fn mapped_put(&mut self, routemap: &mut RouteMap, ep: impl Endpoint) -> &mut Self { self.mapped_method(routemap, http_types::Method::Put, ep) } + fn mapped_update(&mut self, routemap: &mut RouteMap, ep: impl Endpoint) -> &mut Self { self.mapped_method(routemap, http_types::Method::Update, ep) } @@ -88,6 +94,7 @@ impl RouteMap { pub fn do_map(&self) -> String { serde_json::to_string_pretty(self).unwrap() } + // Inject the route for the routemap endpoint pub fn push_self(&mut self, path: String, method: http_types::Method) { self.routelist.push(RouteInfo { path, method }); diff --git a/kanidmd/score/src/https/v1.rs b/kanidmd/score/src/https/v1.rs index 464d8adba..3dbf6d482 100644 --- a/kanidmd/score/src/https/v1.rs +++ b/kanidmd/score/src/https/v1.rs @@ -1,23 +1,21 @@ +use std::str::FromStr; +use std::time::Duration; + +use async_std::task; +use compact_jwt::Jws; use kanidm::event::AuthResult; use kanidm::filter::{Filter, FilterInvalid}; use kanidm::idm::AuthState; use kanidm::prelude::*; use kanidm::status::StatusRequestEvent; - -use kanidm_proto::v1::Entry as ProtoEntry; use kanidm_proto::v1::{ AccountUnixExtend, ApiTokenGenerate, AuthRequest, AuthResponse, AuthState as ProtoAuthState, - CUIntentToken, CURequest, CUSessionToken, CreateRequest, DeleteRequest, GroupUnixExtend, - ModifyRequest, OperationError, SearchRequest, SingleStringRequest, + CUIntentToken, CURequest, CUSessionToken, CreateRequest, DeleteRequest, Entry as ProtoEntry, + GroupUnixExtend, ModifyRequest, OperationError, SearchRequest, SingleStringRequest, }; +use serde::{Deserialize, Serialize}; use super::{to_tide_response, AppState, RequestExtensions, RouteMap}; -use async_std::task; -use compact_jwt::Jws; -use std::str::FromStr; -use std::time::Duration; - -use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, Clone)] pub(crate) struct SessionId { diff --git a/kanidmd/score/src/ldaps.rs b/kanidmd/score/src/ldaps.rs index 823a27b8f..90bb3408f 100644 --- a/kanidmd/score/src/ldaps.rs +++ b/kanidmd/score/src/ldaps.rs @@ -1,18 +1,19 @@ -use kanidm::actors::v1_read::QueryServerReadV1; -use kanidm::ldap::{LdapBoundToken, LdapResponseState}; -use kanidm::prelude::*; -use openssl::ssl::{Ssl, SslAcceptor, SslAcceptorBuilder}; +use std::marker::Unpin; +use std::net; use std::pin::Pin; -use tokio_openssl::SslStream; +use std::str::FromStr; use futures_util::sink::SinkExt; use futures_util::stream::StreamExt; -use ldap3_proto::{proto::LdapMsg, LdapCodec}; -use std::marker::Unpin; -use std::net; -use std::str::FromStr; +use kanidm::actors::v1_read::QueryServerReadV1; +use kanidm::ldap::{LdapBoundToken, LdapResponseState}; +use kanidm::prelude::*; +use ldap3_proto::proto::LdapMsg; +use ldap3_proto::LdapCodec; +use openssl::ssl::{Ssl, SslAcceptor, SslAcceptorBuilder}; use tokio::io::{AsyncRead, AsyncWrite}; use tokio::net::TcpListener; +use tokio_openssl::SslStream; use tokio_util::codec::{FramedRead, FramedWrite}; struct LdapSession { diff --git a/kanidmd/score/src/lib.rs b/kanidmd/score/src/lib.rs index c521864fe..fe381eb47 100644 --- a/kanidmd/score/src/lib.rs +++ b/kanidmd/score/src/lib.rs @@ -29,12 +29,10 @@ pub mod https; mod ldaps; // use crossbeam::channel::unbounded; +use std::sync::Arc; + use async_std::task; use compact_jwt::JwsSigner; -use kanidm::prelude::*; -#[cfg(not(target_family = "windows"))] -use libc::umask; - use kanidm::actors::v1_read::QueryServerReadV1; use kanidm::actors::v1_write::QueryServerWriteV1; use kanidm::be::{Backend, BackendConfig, BackendTransaction, FsType}; @@ -43,13 +41,14 @@ use kanidm::crypto::setup_tls; use kanidm::idm::server::{IdmServer, IdmServerDelayed}; use kanidm::interval::IntervalActor; use kanidm::ldap::LdapServer; +use kanidm::prelude::*; use kanidm::schema::Schema; use kanidm::status::StatusActor; use kanidm::utils::{duration_from_epoch_now, touch_file_or_quit}; use kanidm_proto::messages::{AccountChangeMessage, MessageStatus}; use kanidm_proto::v1::OperationError; - -use std::sync::Arc; +#[cfg(not(target_family = "windows"))] +use libc::umask; // === internal setup helpers diff --git a/kanidmd/score/tests/https_middleware.rs b/kanidmd/score/tests/https_middleware.rs index d81f0f5ad..708015734 100644 --- a/kanidmd/score/tests/https_middleware.rs +++ b/kanidmd/score/tests/https_middleware.rs @@ -1,14 +1,12 @@ use std::sync::atomic::Ordering; mod common; -use crate::common::{ADMIN_TEST_PASSWORD, ADMIN_TEST_USER, PORT_ALLOC}; - use kanidm::audit::LogLevel; use kanidm::config::{Configuration, IntegrationTestConfig, ServerRole}; use score::create_server_core; use tokio::task; -use crate::common::is_free_port; +use crate::common::{is_free_port, ADMIN_TEST_PASSWORD, ADMIN_TEST_USER, PORT_ALLOC}; #[tokio::test] async fn test_https_middleware_headers() { diff --git a/kanidmd/score/tests/oauth2_test.rs b/kanidmd/score/tests/oauth2_test.rs index 88eb322fc..f43b3f305 100644 --- a/kanidmd/score/tests/oauth2_test.rs +++ b/kanidmd/score/tests/oauth2_test.rs @@ -1,6 +1,8 @@ #![deny(warnings)] mod common; -use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD}; +use std::collections::HashMap; +use std::convert::TryFrom; +use std::str::FromStr; use compact_jwt::{JwkKeySet, JwsValidator, OidcToken, OidcUnverified}; use kanidm_proto::oauth2::{ @@ -8,11 +10,10 @@ use kanidm_proto::oauth2::{ AccessTokenResponse, AuthorisationResponse, OidcDiscoveryResponse, }; use oauth2_ext::PkceCodeChallenge; -use std::collections::HashMap; -use std::convert::TryFrom; -use std::str::FromStr; use url::Url; +use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD}; + macro_rules! assert_no_cache { ($response:expr) => {{ // Check we have correct nocache headers. diff --git a/kanidmd/score/tests/proto_v1_test.rs b/kanidmd/score/tests/proto_v1_test.rs index f9c8e8f88..8ef10f64f 100644 --- a/kanidmd/score/tests/proto_v1_test.rs +++ b/kanidmd/score/tests/proto_v1_test.rs @@ -1,19 +1,20 @@ #![deny(warnings)] use std::time::SystemTime; -use tracing::debug; - use kanidm::credential::totp::Totp; use kanidm_proto::v1::{ ApiToken, CURegState, CredentialDetailType, Entry, Filter, Modify, ModifyList, }; +use tracing::debug; mod common; -use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD}; -use compact_jwt::JwsUnverified; use std::str::FromStr; -use webauthn_authenticator_rs::{softpasskey::SoftPasskey, WebauthnAuthenticator}; +use compact_jwt::JwsUnverified; +use webauthn_authenticator_rs::softpasskey::SoftPasskey; +use webauthn_authenticator_rs::WebauthnAuthenticator; + +use crate::common::{setup_async_test, ADMIN_TEST_PASSWORD}; const UNIX_TEST_PASSWORD: &str = "unix test user password"; diff --git a/kanidmd_web_ui/Cargo.toml b/kanidmd_web_ui/Cargo.toml index 7acb584a0..9a4845e34 100644 --- a/kanidmd_web_ui/Cargo.toml +++ b/kanidmd_web_ui/Cargo.toml @@ -1,47 +1,43 @@ [package] name = "kanidmd_web_ui" -version = "1.1.0-alpha.9" -authors = [ - "William Brown ", - "James Hodgkinson ", - ] -rust-version = "1.64" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Server Web User Interface" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true # These are ignored because the crate is in a workspace #[profile.release] # less code to include into binary - [lib] crate-type = ["cdylib", "rlib"] [dependencies] -compact_jwt = { version = "^0.2.3", default-features = false, features = ["unsafe_release_without_verify"] } -# compact_jwt = { path = "../../compact_jwt" , default-features = false, features = ["unsafe_release_without_verify"] } -gloo = "^0.8.0" -gloo-net = "0.2.4" -js-sys = "^0.3.58" -kanidm_proto = { path = "../kanidm_proto", features = ["wasm"] } -qrcode = { version = "^0.12.0", default-features = false, features = ["svg"] } -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -serde-wasm-bindgen = "0.4" -uuid = "^1.1.2" -wasm-bindgen = { version = "^0.2.81" } -wasm-bindgen-futures = { version = "^0.4.30" } -wasm-bindgen-test = "0.3.33" -yew = "^0.19.3" -yew-agent = "^0.1.0" -yew-router = "^0.16.0" +compact_jwt = { workspace = true, default-features = false, features = ["unsafe_release_without_verify"] } +gloo.workspace = true +gloo-net.workspace = true +js-sys.workspace = true +kanidm_proto = { workspace = true, features = ["wasm"] } +qrcode = { workspace = true, default-features = false, features = ["svg"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +serde-wasm-bindgen.workspace = true +uuid.workspace = true +wasm-bindgen.workspace = true +wasm-bindgen-futures.workspace = true +wasm-bindgen-test.workspace = true +yew.workspace = true +yew-agent.workspace = true +yew-router.workspace = true [dependencies.web-sys] -version = "^0.3.60" +workspace = true features = [ "AuthenticationExtensionsClientOutputs", "AuthenticatorResponse", diff --git a/kanidmd_web_ui/pkg/kanidmd_web_ui.js b/kanidmd_web_ui/pkg/kanidmd_web_ui.js deleted file mode 100644 index 36fc947f2..000000000 --- a/kanidmd_web_ui/pkg/kanidmd_web_ui.js +++ /dev/null @@ -1,1093 +0,0 @@ -import { modal_hide_by_id } from '/pkg/wasmloader.js'; - -let wasm; - -const heap = new Array(32).fill(undefined); - -heap.push(undefined, null, true, false); - -function getObject(idx) { return heap[idx]; } - -function isLikeNone(x) { - return x === undefined || x === null; -} - -let cachedFloat64Memory0 = new Float64Array(); - -function getFloat64Memory0() { - if (cachedFloat64Memory0.byteLength === 0) { - cachedFloat64Memory0 = new Float64Array(wasm.memory.buffer); - } - return cachedFloat64Memory0; -} - -let cachedInt32Memory0 = new Int32Array(); - -function getInt32Memory0() { - if (cachedInt32Memory0.byteLength === 0) { - cachedInt32Memory0 = new Int32Array(wasm.memory.buffer); - } - return cachedInt32Memory0; -} - -let WASM_VECTOR_LEN = 0; - -let cachedUint8Memory0 = new Uint8Array(); - -function getUint8Memory0() { - if (cachedUint8Memory0.byteLength === 0) { - cachedUint8Memory0 = new Uint8Array(wasm.memory.buffer); - } - return cachedUint8Memory0; -} - -const cachedTextEncoder = new TextEncoder('utf-8'); - -const encodeString = (typeof cachedTextEncoder.encodeInto === 'function' - ? function (arg, view) { - return cachedTextEncoder.encodeInto(arg, view); -} - : function (arg, view) { - const buf = cachedTextEncoder.encode(arg); - view.set(buf); - return { - read: arg.length, - written: buf.length - }; -}); - -function passStringToWasm0(arg, malloc, realloc) { - - if (realloc === undefined) { - const buf = cachedTextEncoder.encode(arg); - const ptr = malloc(buf.length); - getUint8Memory0().subarray(ptr, ptr + buf.length).set(buf); - WASM_VECTOR_LEN = buf.length; - return ptr; - } - - let len = arg.length; - let ptr = malloc(len); - - const mem = getUint8Memory0(); - - let offset = 0; - - for (; offset < len; offset++) { - const code = arg.charCodeAt(offset); - if (code > 0x7F) break; - mem[ptr + offset] = code; - } - - if (offset !== len) { - if (offset !== 0) { - arg = arg.slice(offset); - } - ptr = realloc(ptr, len, len = offset + arg.length * 3); - const view = getUint8Memory0().subarray(ptr + offset, ptr + len); - const ret = encodeString(arg, view); - - offset += ret.written; - } - - WASM_VECTOR_LEN = offset; - return ptr; -} - -let heap_next = heap.length; - -function addHeapObject(obj) { - if (heap_next === heap.length) heap.push(heap.length + 1); - const idx = heap_next; - heap_next = heap[idx]; - - heap[idx] = obj; - return idx; -} - -const cachedTextDecoder = new TextDecoder('utf-8', { ignoreBOM: true, fatal: true }); - -cachedTextDecoder.decode(); - -function getStringFromWasm0(ptr, len) { - return cachedTextDecoder.decode(getUint8Memory0().subarray(ptr, ptr + len)); -} - -function dropObject(idx) { - if (idx < 36) return; - heap[idx] = heap_next; - heap_next = idx; -} - -function takeObject(idx) { - const ret = getObject(idx); - dropObject(idx); - return ret; -} - -function debugString(val) { - // primitive types - const type = typeof val; - if (type == 'number' || type == 'boolean' || val == null) { - return `${val}`; - } - if (type == 'string') { - return `"${val}"`; - } - if (type == 'symbol') { - const description = val.description; - if (description == null) { - return 'Symbol'; - } else { - return `Symbol(${description})`; - } - } - if (type == 'function') { - const name = val.name; - if (typeof name == 'string' && name.length > 0) { - return `Function(${name})`; - } else { - return 'Function'; - } - } - // objects - if (Array.isArray(val)) { - const length = val.length; - let debug = '['; - if (length > 0) { - debug += debugString(val[0]); - } - for(let i = 1; i < length; i++) { - debug += ', ' + debugString(val[i]); - } - debug += ']'; - return debug; - } - // Test for built-in - const builtInMatches = /\[object ([^\]]+)\]/.exec(toString.call(val)); - let className; - if (builtInMatches.length > 1) { - className = builtInMatches[1]; - } else { - // Failed to match the standard '[object ClassName]' - return toString.call(val); - } - if (className == 'Object') { - // we're a user defined class or Object - // JSON.stringify avoids problems with cycles, and is generally much - // easier than looping through ownProperties of `val`. - try { - return 'Object(' + JSON.stringify(val) + ')'; - } catch (_) { - return 'Object'; - } - } - // errors - if (val instanceof Error) { - return `${val.name}: ${val.message}\n${val.stack}`; - } - // TODO we could test for more things here, like `Set`s and `Map`s. - return className; -} - -function makeMutClosure(arg0, arg1, dtor, f) { - const state = { a: arg0, b: arg1, cnt: 1, dtor }; - const real = (...args) => { - // First up with a closure we increment the internal reference - // count. This ensures that the Rust closure environment won't - // be deallocated while we're invoking it. - state.cnt++; - const a = state.a; - state.a = 0; - try { - return f(a, state.b, ...args); - } finally { - if (--state.cnt === 0) { - wasm.__wbindgen_export_2.get(state.dtor)(a, state.b); - - } else { - state.a = a; - } - } - }; - real.original = state; - - return real; -} - -let stack_pointer = 32; - -function addBorrowedObject(obj) { - if (stack_pointer == 1) throw new Error('out of js stack'); - heap[--stack_pointer] = obj; - return stack_pointer; -} -function __wbg_adapter_36(arg0, arg1, arg2) { - try { - wasm._dyn_core__ops__function__FnMut___A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__hbcc7dfc6c2687f89(arg0, arg1, addBorrowedObject(arg2)); - } finally { - heap[stack_pointer++] = undefined; - } -} - -function makeClosure(arg0, arg1, dtor, f) { - const state = { a: arg0, b: arg1, cnt: 1, dtor }; - const real = (...args) => { - // First up with a closure we increment the internal reference - // count. This ensures that the Rust closure environment won't - // be deallocated while we're invoking it. - state.cnt++; - try { - return f(state.a, state.b, ...args); - } finally { - if (--state.cnt === 0) { - wasm.__wbindgen_export_2.get(state.dtor)(state.a, state.b); - state.a = 0; - - } - } - }; - real.original = state; - - return real; -} -function __wbg_adapter_39(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__Fn__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h6d98df24b306b11b(arg0, arg1, addHeapObject(arg2)); -} - -function __wbg_adapter_42(arg0, arg1, arg2) { - wasm._dyn_core__ops__function__FnMut__A____Output___R_as_wasm_bindgen__closure__WasmClosure___describe__invoke__h93ba8e63e7e4f60b(arg0, arg1, addHeapObject(arg2)); -} - -/** -*/ -export function run_app() { - try { - const retptr = wasm.__wbindgen_add_to_stack_pointer(-16); - wasm.run_app(retptr); - var r0 = getInt32Memory0()[retptr / 4 + 0]; - var r1 = getInt32Memory0()[retptr / 4 + 1]; - if (r1) { - throw takeObject(r0); - } - } finally { - wasm.__wbindgen_add_to_stack_pointer(16); - } -} - -let cachedUint32Memory0 = new Uint32Array(); - -function getUint32Memory0() { - if (cachedUint32Memory0.byteLength === 0) { - cachedUint32Memory0 = new Uint32Array(wasm.memory.buffer); - } - return cachedUint32Memory0; -} - -function getArrayJsValueFromWasm0(ptr, len) { - const mem = getUint32Memory0(); - const slice = mem.subarray(ptr / 4, ptr / 4 + len); - const result = []; - for (let i = 0; i < slice.length; i++) { - result.push(takeObject(slice[i])); - } - return result; -} - -function handleError(f, args) { - try { - return f.apply(this, args); - } catch (e) { - wasm.__wbindgen_exn_store(addHeapObject(e)); - } -} - -async function load(module, imports) { - if (typeof Response === 'function' && module instanceof Response) { - if (typeof WebAssembly.instantiateStreaming === 'function') { - try { - return await WebAssembly.instantiateStreaming(module, imports); - - } catch (e) { - if (module.headers.get('Content-Type') != 'application/wasm') { - console.warn("`WebAssembly.instantiateStreaming` failed because your server does not serve wasm with `application/wasm` MIME type. Falling back to `WebAssembly.instantiate` which is slower. Original error:\n", e); - - } else { - throw e; - } - } - } - - const bytes = await module.arrayBuffer(); - return await WebAssembly.instantiate(bytes, imports); - - } else { - const instance = await WebAssembly.instantiate(module, imports); - - if (instance instanceof WebAssembly.Instance) { - return { instance, module }; - - } else { - return instance; - } - } -} - -function getImports() { - const imports = {}; - imports.wbg = {}; - imports.wbg.__wbindgen_is_bigint = function(arg0) { - const ret = typeof(getObject(arg0)) === 'bigint'; - return ret; - }; - imports.wbg.__wbindgen_is_undefined = function(arg0) { - const ret = getObject(arg0) === undefined; - return ret; - }; - imports.wbg.__wbindgen_number_get = function(arg0, arg1) { - const obj = getObject(arg1); - const ret = typeof(obj) === 'number' ? obj : undefined; - getFloat64Memory0()[arg0 / 8 + 1] = isLikeNone(ret) ? 0 : ret; - getInt32Memory0()[arg0 / 4 + 0] = !isLikeNone(ret); - }; - imports.wbg.__wbindgen_boolean_get = function(arg0) { - const v = getObject(arg0); - const ret = typeof(v) === 'boolean' ? (v ? 1 : 0) : 2; - return ret; - }; - imports.wbg.__wbindgen_string_get = function(arg0, arg1) { - const obj = getObject(arg1); - const ret = typeof(obj) === 'string' ? obj : undefined; - var ptr0 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - var len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbindgen_is_object = function(arg0) { - const val = getObject(arg0); - const ret = typeof(val) === 'object' && val !== null; - return ret; - }; - imports.wbg.__wbindgen_is_string = function(arg0) { - const ret = typeof(getObject(arg0)) === 'string'; - return ret; - }; - imports.wbg.__wbindgen_object_clone_ref = function(arg0) { - const ret = getObject(arg0); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_string_new = function(arg0, arg1) { - const ret = getStringFromWasm0(arg0, arg1); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_object_drop_ref = function(arg0) { - takeObject(arg0); - }; - imports.wbg.__wbindgen_cb_drop = function(arg0) { - const obj = takeObject(arg0).original; - if (obj.cnt-- == 1) { - obj.a = 0; - return true; - } - const ret = false; - return ret; - }; - imports.wbg.__wbg_modalhidebyid_3090e1f0ff737387 = function(arg0, arg1) { - modal_hide_by_id(getStringFromWasm0(arg0, arg1)); - }; - imports.wbg.__wbg_BigInt_d0c7d465bfa30d3b = function(arg0) { - const ret = BigInt(arg0); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_number_new = function(arg0) { - const ret = arg0; - return addHeapObject(ret); - }; - imports.wbg.__wbg_BigInt_1fab4952b6c4a499 = function(arg0) { - const ret = BigInt(BigInt.asUintN(64, arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_is_null = function(arg0) { - const ret = getObject(arg0) === null; - return ret; - }; - imports.wbg.__wbg_BigInt_06819bca5a5bedef = function(arg0) { - const ret = BigInt(getObject(arg0)); - return ret; - }; - imports.wbg.__wbg_BigInt_67359e71cae1c6c9 = function(arg0) { - const ret = BigInt(getObject(arg0)); - return ret; - }; - imports.wbg.__wbg_get_2268d91a19a98b92 = function(arg0, arg1) { - const ret = getObject(arg0)[takeObject(arg1)]; - return addHeapObject(ret); - }; - imports.wbg.__wbg_set_c943d600fa71e4dd = function(arg0, arg1, arg2) { - getObject(arg0)[takeObject(arg1)] = takeObject(arg2); - }; - imports.wbg.__wbg_new_abda76e883ba8a5f = function() { - const ret = new Error(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_stack_658279fe44541cf6 = function(arg0, arg1) { - const ret = getObject(arg1).stack; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_error_f851667af71bcfc6 = function(arg0, arg1) { - try { - console.error(getStringFromWasm0(arg0, arg1)); - } finally { - wasm.__wbindgen_free(arg0, arg1); - } - }; - imports.wbg.__wbg_debug_783a3d4910bc24c7 = function(arg0, arg1) { - var v0 = getArrayJsValueFromWasm0(arg0, arg1).slice(); - wasm.__wbindgen_free(arg0, arg1 * 4); - console.debug(...v0); - }; - imports.wbg.__wbg_error_71d6845bf00a930f = function(arg0, arg1) { - var v0 = getArrayJsValueFromWasm0(arg0, arg1).slice(); - wasm.__wbindgen_free(arg0, arg1 * 4); - console.error(...v0); - }; - imports.wbg.__wbg_log_1f7f93998ab961f7 = function(arg0, arg1) { - var v0 = getArrayJsValueFromWasm0(arg0, arg1).slice(); - wasm.__wbindgen_free(arg0, arg1 * 4); - console.log(...v0); - }; - imports.wbg.__wbg_warn_0b90a269a514ae1d = function(arg0, arg1) { - var v0 = getArrayJsValueFromWasm0(arg0, arg1).slice(); - wasm.__wbindgen_free(arg0, arg1 * 4); - console.warn(...v0); - }; - imports.wbg.__wbg_instanceof_Window_acc97ff9f5d2c7b4 = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Window; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_document_3ead31dbcad65886 = function(arg0) { - const ret = getObject(arg0).document; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_location_8cc8ccf27e342c0a = function(arg0) { - const ret = getObject(arg0).location; - return addHeapObject(ret); - }; - imports.wbg.__wbg_history_2a104346a1208269 = function() { return handleError(function (arg0) { - const ret = getObject(arg0).history; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_navigator_d1dcf282b97e2495 = function(arg0) { - const ret = getObject(arg0).navigator; - return addHeapObject(ret); - }; - imports.wbg.__wbg_localStorage_753b6d15a844c3dc = function() { return handleError(function (arg0) { - const ret = getObject(arg0).localStorage; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_sessionStorage_4ab60c7f3cb9633b = function() { return handleError(function (arg0) { - const ret = getObject(arg0).sessionStorage; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_fetch_0fe04905cccfc2aa = function(arg0, arg1) { - const ret = getObject(arg0).fetch(getObject(arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_body_3cb4b4042b9a632b = function(arg0) { - const ret = getObject(arg0).body; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_createElement_976dbb84fe1661b5 = function() { return handleError(function (arg0, arg1, arg2) { - const ret = getObject(arg0).createElement(getStringFromWasm0(arg1, arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_createElementNS_1561aca8ee3693c0 = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - const ret = getObject(arg0).createElementNS(arg1 === 0 ? undefined : getStringFromWasm0(arg1, arg2), getStringFromWasm0(arg3, arg4)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_createTextNode_300f845fab76642f = function(arg0, arg1, arg2) { - const ret = getObject(arg0).createTextNode(getStringFromWasm0(arg1, arg2)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_getElementById_3a708b83e4f034d7 = function(arg0, arg1, arg2) { - const ret = getObject(arg0).getElementById(getStringFromWasm0(arg1, arg2)); - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_querySelector_3628dc2c3319e7e0 = function() { return handleError(function (arg0, arg1, arg2) { - const ret = getObject(arg0).querySelector(getStringFromWasm0(arg1, arg2)); - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_getItem_845e475f85f593e4 = function() { return handleError(function (arg0, arg1, arg2, arg3) { - const ret = getObject(arg1).getItem(getStringFromWasm0(arg2, arg3)); - var ptr0 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - var len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }, arguments) }; - imports.wbg.__wbg_removeItem_9da69ede4eea3326 = function() { return handleError(function (arg0, arg1, arg2) { - getObject(arg0).removeItem(getStringFromWasm0(arg1, arg2)); - }, arguments) }; - imports.wbg.__wbg_setItem_9c469d634d0c321c = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).setItem(getStringFromWasm0(arg1, arg2), getStringFromWasm0(arg3, arg4)); - }, arguments) }; - imports.wbg.__wbg_pathname_78a642e573bf8169 = function(arg0, arg1) { - const ret = getObject(arg1).pathname; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_search_afb25c63fe262036 = function(arg0, arg1) { - const ret = getObject(arg1).search; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_setsearch_40007c2a91333011 = function(arg0, arg1, arg2) { - getObject(arg0).search = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_new_7d95b89914e4d377 = function() { return handleError(function (arg0, arg1) { - const ret = new URL(getStringFromWasm0(arg0, arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_new_ca4d3a3eca340210 = function() { return handleError(function () { - const ret = new URLSearchParams(); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_new_2d0053ee81e4dd2a = function() { return handleError(function () { - const ret = new Headers(); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_get_31b57952dfc2c6cc = function() { return handleError(function (arg0, arg1, arg2, arg3) { - const ret = getObject(arg1).get(getStringFromWasm0(arg2, arg3)); - var ptr0 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - var len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }, arguments) }; - imports.wbg.__wbg_set_992c1d31586b2957 = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).set(getStringFromWasm0(arg1, arg2), getStringFromWasm0(arg3, arg4)); - }, arguments) }; - imports.wbg.__wbg_value_ccb32485ee1b3928 = function(arg0, arg1) { - const ret = getObject(arg1).value; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_setvalue_df64bc6794c098f2 = function(arg0, arg1, arg2) { - getObject(arg0).value = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_url_1c013f0875e97715 = function(arg0, arg1) { - const ret = getObject(arg1).url; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_headers_85824e993aa739bf = function(arg0) { - const ret = getObject(arg0).headers; - return addHeapObject(ret); - }; - imports.wbg.__wbg_newwithstr_fdce36db91ec5f92 = function() { return handleError(function (arg0, arg1) { - const ret = new Request(getStringFromWasm0(arg0, arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_newwithstrandinit_05d7180788420c40 = function() { return handleError(function (arg0, arg1, arg2) { - const ret = new Request(getStringFromWasm0(arg0, arg1), getObject(arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_add_89a4f3b0846cf0aa = function() { return handleError(function (arg0, arg1, arg2) { - getObject(arg0).add(getStringFromWasm0(arg1, arg2)); - }, arguments) }; - imports.wbg.__wbg_remove_1a26eb5d822902ed = function() { return handleError(function (arg0, arg1, arg2) { - getObject(arg0).remove(getStringFromWasm0(arg1, arg2)); - }, arguments) }; - imports.wbg.__wbg_instanceof_HtmlFormElement_1c489ff7e99e43d3 = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof HTMLFormElement; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_instanceof_HtmlInputElement_970e4026de0fccff = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof HTMLInputElement; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_setchecked_f1e1f3e62cdca8e7 = function(arg0, arg1) { - getObject(arg0).checked = arg1 !== 0; - }; - imports.wbg.__wbg_value_b2a620d34c663701 = function(arg0, arg1) { - const ret = getObject(arg1).value; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_setvalue_e5b519cca37d82a7 = function(arg0, arg1, arg2) { - getObject(arg0).value = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_instanceof_Element_33bd126d58f2021b = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Element; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_namespaceURI_e19c7be2c60e5b5c = function(arg0, arg1) { - const ret = getObject(arg1).namespaceURI; - var ptr0 = isLikeNone(ret) ? 0 : passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - var len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_classList_8a97f5e2e1bc3fa9 = function(arg0) { - const ret = getObject(arg0).classList; - return addHeapObject(ret); - }; - imports.wbg.__wbg_setinnerHTML_32081d8a164e6dc4 = function(arg0, arg1, arg2) { - getObject(arg0).innerHTML = getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_removeAttribute_beaed7727852af78 = function() { return handleError(function (arg0, arg1, arg2) { - getObject(arg0).removeAttribute(getStringFromWasm0(arg1, arg2)); - }, arguments) }; - imports.wbg.__wbg_setAttribute_d8436c14a59ab1af = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).setAttribute(getStringFromWasm0(arg1, arg2), getStringFromWasm0(arg3, arg4)); - }, arguments) }; - imports.wbg.__wbg_instanceof_HtmlElement_eff00d16af7bd6e7 = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof HTMLElement; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_focus_adfe4cc61e2c09bc = function() { return handleError(function (arg0) { - getObject(arg0).focus(); - }, arguments) }; - imports.wbg.__wbg_create_53c6ddb068a22172 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).create(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_get_da97585bbb5a63bb = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).get(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_href_90ff36b5040e3b76 = function(arg0, arg1) { - const ret = getObject(arg1).href; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbg_credentials_eab5c0bffc3e9cc5 = function(arg0) { - const ret = getObject(arg0).credentials; - return addHeapObject(ret); - }; - imports.wbg.__wbg_getClientExtensionResults_0381c2792f96b9fa = function(arg0) { - const ret = getObject(arg0).getClientExtensionResults(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_instanceof_Event_1009dd203d9055ee = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Event; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_target_bf704b7db7ad1387 = function(arg0) { - const ret = getObject(arg0).target; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_cancelBubble_8c0bdf21c08f1717 = function(arg0) { - const ret = getObject(arg0).cancelBubble; - return ret; - }; - imports.wbg.__wbg_preventDefault_3209279b490de583 = function(arg0) { - getObject(arg0).preventDefault(); - }; - imports.wbg.__wbg_addEventListener_1fc744729ac6dc27 = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).addEventListener(getStringFromWasm0(arg1, arg2), getObject(arg3), getObject(arg4)); - }, arguments) }; - imports.wbg.__wbg_removeEventListener_b10f1a66647f3aa0 = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4) { - getObject(arg0).removeEventListener(getStringFromWasm0(arg1, arg2), getObject(arg3), arg4 !== 0); - }, arguments) }; - imports.wbg.__wbg_newwithform_6b545e9ddaccc455 = function() { return handleError(function (arg0) { - const ret = new FormData(getObject(arg0)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_get_f1d748260e3dfd1f = function(arg0, arg1, arg2) { - const ret = getObject(arg0).get(getStringFromWasm0(arg1, arg2)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_parentElement_0cffb3ceb0f107bd = function(arg0) { - const ret = getObject(arg0).parentElement; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_lastChild_a2f5ed739809bb31 = function(arg0) { - const ret = getObject(arg0).lastChild; - return isLikeNone(ret) ? 0 : addHeapObject(ret); - }; - imports.wbg.__wbg_setnodeValue_4077cafeefd0725e = function(arg0, arg1, arg2) { - getObject(arg0).nodeValue = arg1 === 0 ? undefined : getStringFromWasm0(arg1, arg2); - }; - imports.wbg.__wbg_appendChild_e513ef0e5098dfdd = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).appendChild(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_insertBefore_9f2d2defb9471006 = function() { return handleError(function (arg0, arg1, arg2) { - const ret = getObject(arg0).insertBefore(getObject(arg1), getObject(arg2)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_removeChild_6751e9ca5d9aaf00 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).removeChild(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_pushState_38917fb88b4add30 = function() { return handleError(function (arg0, arg1, arg2, arg3, arg4, arg5) { - getObject(arg0).pushState(getObject(arg1), getStringFromWasm0(arg2, arg3), arg4 === 0 ? undefined : getStringFromWasm0(arg4, arg5)); - }, arguments) }; - imports.wbg.__wbg_pathname_4441d4d8fc4aba51 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg1).pathname; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }, arguments) }; - imports.wbg.__wbg_search_4aac147f005678e5 = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg1).search; - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }, arguments) }; - imports.wbg.__wbg_replace_ab0ff56e84982ad2 = function() { return handleError(function (arg0, arg1, arg2) { - getObject(arg0).replace(getStringFromWasm0(arg1, arg2)); - }, arguments) }; - imports.wbg.__wbg_instanceof_Response_eaa426220848a39e = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Response; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_status_c4ef3dd591e63435 = function(arg0) { - const ret = getObject(arg0).status; - return ret; - }; - imports.wbg.__wbg_headers_fd64ad685cf22e5d = function(arg0) { - const ret = getObject(arg0).headers; - return addHeapObject(ret); - }; - imports.wbg.__wbg_json_eb16b12f372e850c = function() { return handleError(function (arg0) { - const ret = getObject(arg0).json(); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_text_1169d752cc697903 = function() { return handleError(function (arg0) { - const ret = getObject(arg0).text(); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_get_57245cc7d7c7619d = function(arg0, arg1) { - const ret = getObject(arg0)[arg1 >>> 0]; - return addHeapObject(ret); - }; - imports.wbg.__wbg_length_6e3bbe7c8bd4dbd8 = function(arg0) { - const ret = getObject(arg0).length; - return ret; - }; - imports.wbg.__wbg_new_1d9a920c6bfc44a8 = function() { - const ret = new Array(); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_is_function = function(arg0) { - const ret = typeof(getObject(arg0)) === 'function'; - return ret; - }; - imports.wbg.__wbg_newnoargs_b5b063fc6c2f0376 = function(arg0, arg1) { - const ret = new Function(getStringFromWasm0(arg0, arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_new_268f7b7dd3430798 = function() { - const ret = new Map(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_next_579e583d33566a86 = function(arg0) { - const ret = getObject(arg0).next; - return addHeapObject(ret); - }; - imports.wbg.__wbg_next_aaef7c8aa5e212ac = function() { return handleError(function (arg0) { - const ret = getObject(arg0).next(); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_done_1b73b0672e15f234 = function(arg0) { - const ret = getObject(arg0).done; - return ret; - }; - imports.wbg.__wbg_value_1ccc36bc03462d71 = function(arg0) { - const ret = getObject(arg0).value; - return addHeapObject(ret); - }; - imports.wbg.__wbg_iterator_6f9d4f28845f426c = function() { - const ret = Symbol.iterator; - return addHeapObject(ret); - }; - imports.wbg.__wbg_get_765201544a2b6869 = function() { return handleError(function (arg0, arg1) { - const ret = Reflect.get(getObject(arg0), getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_call_97ae9d8645dc388b = function() { return handleError(function (arg0, arg1) { - const ret = getObject(arg0).call(getObject(arg1)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_new_0b9bfdd97583284e = function() { - const ret = new Object(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_self_6d479506f72c6a71 = function() { return handleError(function () { - const ret = self.self; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_window_f2557cc78490aceb = function() { return handleError(function () { - const ret = window.window; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_globalThis_7f206bda628d5286 = function() { return handleError(function () { - const ret = globalThis.globalThis; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_global_ba75c50d1cf384f4 = function() { return handleError(function () { - const ret = global.global; - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbg_set_a68214f35c417fa9 = function(arg0, arg1, arg2) { - getObject(arg0)[arg1 >>> 0] = takeObject(arg2); - }; - imports.wbg.__wbg_isArray_27c46c67f498e15d = function(arg0) { - const ret = Array.isArray(getObject(arg0)); - return ret; - }; - imports.wbg.__wbg_push_740e4b286702d964 = function(arg0, arg1) { - const ret = getObject(arg0).push(getObject(arg1)); - return ret; - }; - imports.wbg.__wbg_instanceof_ArrayBuffer_e5e48f4762c5610b = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof ArrayBuffer; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_instanceof_Error_56b496a10a56de66 = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Error; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_new_8d2af00bc1e329ee = function(arg0, arg1) { - const ret = new Error(getStringFromWasm0(arg0, arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_message_fe2af63ccc8985bc = function(arg0) { - const ret = getObject(arg0).message; - return addHeapObject(ret); - }; - imports.wbg.__wbg_name_48eda3ae6aa697ca = function(arg0) { - const ret = getObject(arg0).name; - return addHeapObject(ret); - }; - imports.wbg.__wbg_toString_73c9b562dccf34bd = function(arg0) { - const ret = getObject(arg0).toString(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_set_933729cf5b66ac11 = function(arg0, arg1, arg2) { - const ret = getObject(arg0).set(getObject(arg1), getObject(arg2)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_isSafeInteger_dfa0593e8d7ac35a = function(arg0) { - const ret = Number.isSafeInteger(getObject(arg0)); - return ret; - }; - imports.wbg.__wbg_valueOf_6b6effad03e5c546 = function(arg0) { - const ret = getObject(arg0).valueOf(); - return ret; - }; - imports.wbg.__wbg_entries_65a76a413fc91037 = function(arg0) { - const ret = Object.entries(getObject(arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_is_40a66842732708e7 = function(arg0, arg1) { - const ret = Object.is(getObject(arg0), getObject(arg1)); - return ret; - }; - imports.wbg.__wbg_toString_7be108a12ef03bc2 = function(arg0) { - const ret = getObject(arg0).toString(); - return addHeapObject(ret); - }; - imports.wbg.__wbg_resolve_99fe17964f31ffc0 = function(arg0) { - const ret = Promise.resolve(getObject(arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_then_11f7a54d67b4bfad = function(arg0, arg1) { - const ret = getObject(arg0).then(getObject(arg1)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_then_cedad20fbbd9418a = function(arg0, arg1, arg2) { - const ret = getObject(arg0).then(getObject(arg1), getObject(arg2)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_buffer_3f3d764d4747d564 = function(arg0) { - const ret = getObject(arg0).buffer; - return addHeapObject(ret); - }; - imports.wbg.__wbg_newwithbyteoffsetandlength_d9aa266703cb98be = function(arg0, arg1, arg2) { - const ret = new Uint8Array(getObject(arg0), arg1 >>> 0, arg2 >>> 0); - return addHeapObject(ret); - }; - imports.wbg.__wbg_new_8c3f0052272a457a = function(arg0) { - const ret = new Uint8Array(getObject(arg0)); - return addHeapObject(ret); - }; - imports.wbg.__wbg_set_83db9690f9353e79 = function(arg0, arg1, arg2) { - getObject(arg0).set(getObject(arg1), arg2 >>> 0); - }; - imports.wbg.__wbg_length_9e1ae1900cb0fbd5 = function(arg0) { - const ret = getObject(arg0).length; - return ret; - }; - imports.wbg.__wbg_instanceof_Uint8Array_971eeda69eb75003 = function(arg0) { - let result; - try { - result = getObject(arg0) instanceof Uint8Array; - } catch { - result = false; - } - const ret = result; - return ret; - }; - imports.wbg.__wbg_has_8359f114ce042f5a = function() { return handleError(function (arg0, arg1) { - const ret = Reflect.has(getObject(arg0), getObject(arg1)); - return ret; - }, arguments) }; - imports.wbg.__wbg_set_bf3f89b92d5a34bf = function() { return handleError(function (arg0, arg1, arg2) { - const ret = Reflect.set(getObject(arg0), getObject(arg1), getObject(arg2)); - return ret; - }, arguments) }; - imports.wbg.__wbg_stringify_d6471d300ded9b68 = function() { return handleError(function (arg0) { - const ret = JSON.stringify(getObject(arg0)); - return addHeapObject(ret); - }, arguments) }; - imports.wbg.__wbindgen_debug_string = function(arg0, arg1) { - const ret = debugString(getObject(arg1)); - const ptr0 = passStringToWasm0(ret, wasm.__wbindgen_malloc, wasm.__wbindgen_realloc); - const len0 = WASM_VECTOR_LEN; - getInt32Memory0()[arg0 / 4 + 1] = len0; - getInt32Memory0()[arg0 / 4 + 0] = ptr0; - }; - imports.wbg.__wbindgen_throw = function(arg0, arg1) { - throw new Error(getStringFromWasm0(arg0, arg1)); - }; - imports.wbg.__wbindgen_memory = function() { - const ret = wasm.memory; - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper6332 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 1615, __wbg_adapter_36); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper6493 = function(arg0, arg1, arg2) { - const ret = makeClosure(arg0, arg1, 1650, __wbg_adapter_39); - return addHeapObject(ret); - }; - imports.wbg.__wbindgen_closure_wrapper6732 = function(arg0, arg1, arg2) { - const ret = makeMutClosure(arg0, arg1, 1711, __wbg_adapter_42); - return addHeapObject(ret); - }; - - return imports; -} - -function initMemory(imports, maybe_memory) { - -} - -function finalizeInit(instance, module) { - wasm = instance.exports; - init.__wbindgen_wasm_module = module; - cachedFloat64Memory0 = new Float64Array(); - cachedInt32Memory0 = new Int32Array(); - cachedUint32Memory0 = new Uint32Array(); - cachedUint8Memory0 = new Uint8Array(); - - - return wasm; -} - -function initSync(module) { - const imports = getImports(); - - initMemory(imports); - - if (!(module instanceof WebAssembly.Module)) { - module = new WebAssembly.Module(module); - } - - const instance = new WebAssembly.Instance(module, imports); - - return finalizeInit(instance, module); -} - -async function init(input) { - if (typeof input === 'undefined') { - input = new URL('kanidmd_web_ui_bg.wasm', import.meta.url); - } - const imports = getImports(); - - if (typeof input === 'string' || (typeof Request === 'function' && input instanceof Request) || (typeof URL === 'function' && input instanceof URL)) { - input = fetch(input); - } - - initMemory(imports); - - const { instance, module } = await load(await input, imports); - - return finalizeInit(instance, module); -} - -export { initSync } -export default init; diff --git a/kanidmd_web_ui/pkg/kanidmd_web_ui_bg.wasm b/kanidmd_web_ui/pkg/kanidmd_web_ui_bg.wasm deleted file mode 100644 index 6f144858d..000000000 Binary files a/kanidmd_web_ui/pkg/kanidmd_web_ui_bg.wasm and /dev/null differ diff --git a/kanidmd_web_ui/src/components/admin_accounts.rs b/kanidmd_web_ui/src/components/admin_accounts.rs index 1b6b0c8b7..85fd14f82 100644 --- a/kanidmd_web_ui/src/components/admin_accounts.rs +++ b/kanidmd_web_ui/src/components/admin_accounts.rs @@ -1,3 +1,9 @@ +use std::collections::BTreeMap; + +use gloo::console; +use yew::{html, Component, Context, Html, Properties}; +use yew_router::prelude::Link; + use crate::components::adminmenu::{Entity, EntityType, GetError}; use crate::components::alpha_warning_banner; use crate::constants::{ @@ -6,10 +12,6 @@ use crate::constants::{ use crate::models; use crate::utils::{do_alert_error, do_page_header, init_request}; use crate::views::AdminRoute; -use gloo::console; -use std::collections::BTreeMap; -use yew::{html, Component, Context, Html, Properties}; -use yew_router::prelude::Link; impl From for AdminListAccountsMsg { fn from(ge: GetError) -> Self { @@ -476,6 +478,7 @@ pub enum AdminViewServiceAccountMsg { impl Component for AdminViewServiceAccount { type Message = AdminViewServiceAccountMsg; type Properties = AdminViewAccountProps; + fn create(ctx: &Context) -> Self { let token = match models::get_bearer_token() { Some(value) => value, diff --git a/kanidmd_web_ui/src/components/admin_groups.rs b/kanidmd_web_ui/src/components/admin_groups.rs index 5949d8b67..951ea055b 100644 --- a/kanidmd_web_ui/src/components/admin_groups.rs +++ b/kanidmd_web_ui/src/components/admin_groups.rs @@ -1,14 +1,15 @@ +use std::collections::BTreeMap; + +use gloo::console; +use yew::{html, Component, Context, Html, Properties}; +use yew_router::prelude::Link; + use crate::components::adminmenu::{Entity, EntityType, GetError}; use crate::components::alpha_warning_banner; -use crate::constants::{CSS_BREADCRUMB_ITEM, CSS_BREADCRUMB_ITEM_ACTIVE}; -use crate::constants::{CSS_CELL, CSS_TABLE}; +use crate::constants::{CSS_BREADCRUMB_ITEM, CSS_BREADCRUMB_ITEM_ACTIVE, CSS_CELL, CSS_TABLE}; use crate::models; use crate::utils::{do_alert_error, do_page_header, init_request}; use crate::views::AdminRoute; -use gloo::console; -use std::collections::BTreeMap; -use yew::{html, Component, Context, Html, Properties}; -use yew_router::prelude::Link; impl From for AdminListGroupsMsg { fn from(ge: GetError) -> Self { @@ -282,7 +283,6 @@ pub struct AdminViewGroup { impl Component for AdminViewGroup { type Message = AdminViewGroupMsg; - type Properties = AdminViewGroupProps; fn create(ctx: &Context) -> Self { diff --git a/kanidmd_web_ui/src/components/admin_oauth2.rs b/kanidmd_web_ui/src/components/admin_oauth2.rs index fb13f1aaa..c57a90525 100644 --- a/kanidmd_web_ui/src/components/admin_oauth2.rs +++ b/kanidmd_web_ui/src/components/admin_oauth2.rs @@ -1,14 +1,15 @@ +use std::collections::BTreeMap; + +use gloo::console; +use yew::{html, Component, Context, Html, Properties}; +use yew_router::prelude::Link; + use crate::components::adminmenu::{Entity, EntityType, GetError}; use crate::components::alpha_warning_banner; -use crate::constants::{CSS_BREADCRUMB_ITEM, CSS_BREADCRUMB_ITEM_ACTIVE}; -use crate::constants::{CSS_CELL, CSS_TABLE}; +use crate::constants::{CSS_BREADCRUMB_ITEM, CSS_BREADCRUMB_ITEM_ACTIVE, CSS_CELL, CSS_TABLE}; use crate::models; use crate::utils::{do_alert_error, do_page_header, init_request}; use crate::views::AdminRoute; -use gloo::console; -use std::collections::BTreeMap; -use yew::{html, Component, Context, Html, Properties}; -use yew_router::prelude::Link; impl From for AdminListOAuth2Msg { fn from(ge: GetError) -> Self { diff --git a/kanidmd_web_ui/src/components/adminmenu.rs b/kanidmd_web_ui/src/components/adminmenu.rs index 61f84bbf6..aee95b581 100644 --- a/kanidmd_web_ui/src/components/adminmenu.rs +++ b/kanidmd_web_ui/src/components/adminmenu.rs @@ -1,13 +1,12 @@ +use serde::{Deserialize, Serialize}; +use yew::{html, Component, Context, Html, Properties}; +use yew_router::prelude::Link; + use crate::components::alpha_warning_banner; use crate::constants::{CSS_LINK_DARK_STRETCHED, CSS_PAGE_HEADER}; // use crate::error::FetchError; use crate::views::AdminRoute; -use serde::{Deserialize, Serialize}; - -use yew::{html, Component, Context, Html, Properties}; -use yew_router::prelude::Link; - const CSS_CARD: &str = "card text-center"; const CSS_CARD_BODY: &str = "card-body text-center"; diff --git a/kanidmd_web_ui/src/components/change_unix_password.rs b/kanidmd_web_ui/src/components/change_unix_password.rs index 61a00de26..2ac1bea12 100644 --- a/kanidmd_web_ui/src/components/change_unix_password.rs +++ b/kanidmd_web_ui/src/components/change_unix_password.rs @@ -1,11 +1,10 @@ +use std::str::FromStr; + use compact_jwt::{Jws, JwsUnverified}; use kanidm_proto::v1::{SingleStringRequest, UserAuthToken}; -use std::str::FromStr; use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; -use web_sys::{FormData, HtmlFormElement}; - -use web_sys::{Request, RequestInit, RequestMode, Response}; +use web_sys::{FormData, HtmlFormElement, Request, RequestInit, RequestMode, Response}; use yew::prelude::*; use crate::error::*; @@ -85,6 +84,7 @@ impl Component for ChangeUnixPassword { pw_check_val: "".to_string(), } } + fn update(&mut self, ctx: &Context, msg: Self::Message) -> bool { match msg { Msg::Submit(data) => { @@ -303,6 +303,7 @@ impl ChangeUnixPassword { Ok(Msg::Error { emsg, kopid }) } } + fn reset(&mut self) { self.pw_val = "".to_string(); self.pw_check_val = "".to_string(); diff --git a/kanidmd_web_ui/src/credential/delete.rs b/kanidmd_web_ui/src/credential/delete.rs index caaf0142f..fc5d07fd5 100644 --- a/kanidmd_web_ui/src/credential/delete.rs +++ b/kanidmd_web_ui/src/credential/delete.rs @@ -1,19 +1,16 @@ -use crate::error::*; -use crate::utils; - -use super::eventbus::{EventBus, EventBusMsg}; -use super::reset::ModalProps; - #[cfg(debug)] use gloo::console; -use yew::prelude::*; -use yew_agent::Dispatched; - +use kanidm_proto::v1::{CURequest, CUSessionToken, CUStatus}; use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; +use yew::prelude::*; +use yew_agent::Dispatched; -use kanidm_proto::v1::{CURequest, CUSessionToken, CUStatus}; +use super::eventbus::{EventBus, EventBusMsg}; +use super::reset::ModalProps; +use crate::error::*; +use crate::utils; enum State { Init, diff --git a/kanidmd_web_ui/src/credential/eventbus.rs b/kanidmd_web_ui/src/credential/eventbus.rs index 1f07c85b2..ac40c14ba 100644 --- a/kanidmd_web_ui/src/credential/eventbus.rs +++ b/kanidmd_web_ui/src/credential/eventbus.rs @@ -1,9 +1,8 @@ -use serde::{Deserialize, Serialize}; use std::collections::HashSet; -use yew_agent::{Agent, AgentLink, Context, HandlerId}; - use kanidm_proto::v1::CUStatus; +use serde::{Deserialize, Serialize}; +use yew_agent::{Agent, AgentLink, Context, HandlerId}; #[derive(Serialize, Deserialize, Debug, Clone)] #[allow(clippy::large_enum_variant)] @@ -18,10 +17,10 @@ pub struct EventBus { } impl Agent for EventBus { - type Reach = Context; - type Message = (); type Input = EventBusMsg; + type Message = (); type Output = EventBusMsg; + type Reach = Context; fn create(link: AgentLink) -> Self { Self { diff --git a/kanidmd_web_ui/src/credential/passkey.rs b/kanidmd_web_ui/src/credential/passkey.rs index d8cca8066..f87908c31 100644 --- a/kanidmd_web_ui/src/credential/passkey.rs +++ b/kanidmd_web_ui/src/credential/passkey.rs @@ -1,19 +1,16 @@ -use crate::error::*; -use crate::utils; - -use super::eventbus::{EventBus, EventBusMsg}; -use super::reset::ModalProps; - use gloo::console; -use yew::prelude::*; -use yew_agent::Dispatched; - +use kanidm_proto::v1::{CURegState, CURequest, CUSessionToken, CUStatus}; +use kanidm_proto::webauthn::{CreationChallengeResponse, RegisterPublicKeyCredential}; use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; +use yew::prelude::*; +use yew_agent::Dispatched; -use kanidm_proto::v1::{CURegState, CURequest, CUSessionToken, CUStatus}; -use kanidm_proto::webauthn::{CreationChallengeResponse, RegisterPublicKeyCredential}; +use super::eventbus::{EventBus, EventBusMsg}; +use super::reset::ModalProps; +use crate::error::*; +use crate::utils; pub struct PasskeyModalApp { state: State, diff --git a/kanidmd_web_ui/src/credential/passkeyremove.rs b/kanidmd_web_ui/src/credential/passkeyremove.rs index 4aab0b9be..292e418b6 100644 --- a/kanidmd_web_ui/src/credential/passkeyremove.rs +++ b/kanidmd_web_ui/src/credential/passkeyremove.rs @@ -1,21 +1,17 @@ -use crate::error::*; -use crate::utils; - -use super::eventbus::{EventBus, EventBusMsg}; -use super::reset::PasskeyRemoveModalProps; - #[cfg(debug)] use gloo::console; -use yew::prelude::*; -use yew_agent::Dispatched; - +use kanidm_proto::v1::{CURegState, CURequest, CUSessionToken, CUStatus}; +use uuid::Uuid; use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; +use yew::prelude::*; +use yew_agent::Dispatched; -use uuid::Uuid; - -use kanidm_proto::v1::{CURegState, CURequest, CUSessionToken, CUStatus}; +use super::eventbus::{EventBus, EventBusMsg}; +use super::reset::PasskeyRemoveModalProps; +use crate::error::*; +use crate::utils; pub struct PasskeyRemoveModalApp { state: State, diff --git a/kanidmd_web_ui/src/credential/pwmodal.rs b/kanidmd_web_ui/src/credential/pwmodal.rs index 9e42be8de..5aaa808b7 100644 --- a/kanidmd_web_ui/src/credential/pwmodal.rs +++ b/kanidmd_web_ui/src/credential/pwmodal.rs @@ -1,18 +1,15 @@ -use crate::error::*; -use crate::utils; - -use super::eventbus::{EventBus, EventBusMsg}; -use super::reset::ModalProps; - use gloo::console; -use yew::prelude::*; -use yew_agent::Dispatched; - +use kanidm_proto::v1::{CURequest, CUSessionToken, CUStatus, OperationError, PasswordFeedback}; use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; +use yew::prelude::*; +use yew_agent::Dispatched; -use kanidm_proto::v1::{CURequest, CUSessionToken, CUStatus, OperationError, PasswordFeedback}; +use super::eventbus::{EventBus, EventBusMsg}; +use super::reset::ModalProps; +use crate::error::*; +use crate::utils; enum PwState { Init, diff --git a/kanidmd_web_ui/src/credential/reset.rs b/kanidmd_web_ui/src/credential/reset.rs index c1f1b1c15..28f1cacde 100644 --- a/kanidmd_web_ui/src/credential/reset.rs +++ b/kanidmd_web_ui/src/credential/reset.rs @@ -1,19 +1,14 @@ -use crate::error::*; -use crate::models; -use crate::utils; - use gloo::console; -use yew::prelude::*; -use yew_agent::{Bridge, Bridged}; -use yew_router::prelude::*; - use kanidm_proto::v1::{ CUIntentToken, CUSessionToken, CUStatus, CredentialDetail, CredentialDetailType, }; - +use uuid::Uuid; use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; +use yew::prelude::*; +use yew_agent::{Bridge, Bridged}; +use yew_router::prelude::*; use super::delete::DeleteApp; use super::eventbus::{EventBus, EventBusMsg}; @@ -21,8 +16,8 @@ use super::passkey::PasskeyModalApp; use super::passkeyremove::PasskeyRemoveModalApp; use super::pwmodal::PwModalApp; use super::totpmodal::TotpModalApp; - -use uuid::Uuid; +use crate::error::*; +use crate::{models, utils}; #[derive(PartialEq, Eq, Properties)] pub struct ModalProps { diff --git a/kanidmd_web_ui/src/credential/totpmodal.rs b/kanidmd_web_ui/src/credential/totpmodal.rs index 0ad6920fa..3616ea04c 100644 --- a/kanidmd_web_ui/src/credential/totpmodal.rs +++ b/kanidmd_web_ui/src/credential/totpmodal.rs @@ -1,21 +1,18 @@ -use crate::error::*; -use crate::utils; - -use super::eventbus::{EventBus, EventBusMsg}; -use super::reset::ModalProps; - #[cfg(debug)] use gloo::console; -use web_sys::Node; +use kanidm_proto::v1::{CURegState, CURequest, CUSessionToken, CUStatus, TotpSecret}; +use qrcode::render::svg; +use qrcode::QrCode; +use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; +use wasm_bindgen_futures::JsFuture; +use web_sys::{Node, Request, RequestInit, RequestMode, Response}; use yew::prelude::*; use yew_agent::Dispatched; -use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; -use wasm_bindgen_futures::JsFuture; -use web_sys::{Request, RequestInit, RequestMode, Response}; - -use kanidm_proto::v1::{CURegState, CURequest, CUSessionToken, CUStatus, TotpSecret}; -use qrcode::{render::svg, QrCode}; +use super::eventbus::{EventBus, EventBusMsg}; +use super::reset::ModalProps; +use crate::error::*; +use crate::utils; enum TotpState { Init, diff --git a/kanidmd_web_ui/src/error.rs b/kanidmd_web_ui/src/error.rs index 538c6284c..f24725c32 100644 --- a/kanidmd_web_ui/src/error.rs +++ b/kanidmd_web_ui/src/error.rs @@ -1,5 +1,6 @@ use std::error::Error; use std::fmt; + use wasm_bindgen::JsValue; #[derive(Debug, Clone, PartialEq)] diff --git a/kanidmd_web_ui/src/login.rs b/kanidmd_web_ui/src/login.rs index 974eae65b..681cd831f 100644 --- a/kanidmd_web_ui/src/login.rs +++ b/kanidmd_web_ui/src/login.rs @@ -1,5 +1,9 @@ // use anyhow::Error; use gloo::console; +use kanidm_proto::v1::{ + AuthAllowed, AuthCredential, AuthMech, AuthRequest, AuthResponse, AuthState, AuthStep, +}; +use kanidm_proto::webauthn::PublicKeyCredential; use wasm_bindgen::prelude::*; use wasm_bindgen::JsCast; use wasm_bindgen_futures::{spawn_local, JsFuture}; @@ -10,13 +14,7 @@ use yew_router::prelude::*; use crate::constants::{CLASS_BUTTON_DARK, CLASS_DIV_LOGIN_BUTTON, CLASS_DIV_LOGIN_FIELD}; use crate::error::FetchError; -use crate::models; -use crate::utils; - -use kanidm_proto::v1::{ - AuthAllowed, AuthCredential, AuthMech, AuthRequest, AuthResponse, AuthState, AuthStep, -}; -use kanidm_proto::webauthn::PublicKeyCredential; +use crate::{models, utils}; pub struct LoginApp { inputvalue: String, diff --git a/kanidmd_web_ui/src/manager.rs b/kanidmd_web_ui/src/manager.rs index de6b9e32e..2adeeb228 100644 --- a/kanidmd_web_ui/src/manager.rs +++ b/kanidmd_web_ui/src/manager.rs @@ -5,6 +5,7 @@ //! will allow you to proceed with the oauth flow. use gloo::console; +use serde::{Deserialize, Serialize}; use wasm_bindgen::UnwrapThrowExt; use yew::functional::*; use yew::prelude::*; @@ -14,7 +15,6 @@ use crate::credential::reset::CredentialResetApp; use crate::login::LoginApp; use crate::oauth2::Oauth2App; use crate::views::{ViewRoute, ViewsApp}; -use serde::{Deserialize, Serialize}; // router to decide on state. #[derive(Routable, PartialEq, Eq, Clone, Debug, Serialize, Deserialize)] diff --git a/kanidmd_web_ui/src/models/mod.rs b/kanidmd_web_ui/src/models/mod.rs index 066a8ff8e..c3d6e2e88 100644 --- a/kanidmd_web_ui/src/models/mod.rs +++ b/kanidmd_web_ui/src/models/mod.rs @@ -1,18 +1,16 @@ -use kanidm_proto::oauth2::AuthorisationRequest; - #[cfg(debug)] use gloo::console; -use gloo::storage::LocalStorage as PersistentStorage; -use gloo::storage::SessionStorage as TemporaryStorage; -use gloo::storage::Storage; +use gloo::storage::{ + LocalStorage as PersistentStorage, SessionStorage as TemporaryStorage, Storage, +}; +use kanidm_proto::oauth2::AuthorisationRequest; +use kanidm_proto::v1::{CUSessionToken, CUStatus}; +use serde::{Deserialize, Serialize}; use wasm_bindgen::UnwrapThrowExt; use yew_router::prelude::{AnyHistory, History}; use crate::manager::Route; use crate::views::ViewRoute; -use serde::{Deserialize, Serialize}; - -use kanidm_proto::v1::{CUSessionToken, CUStatus}; pub fn get_bearer_token() -> Option { let prev_session: Result = PersistentStorage::get("kanidm_bearer_token"); diff --git a/kanidmd_web_ui/src/oauth2.rs b/kanidmd_web_ui/src/oauth2.rs index 1f20407bd..4c5e45d84 100644 --- a/kanidmd_web_ui/src/oauth2.rs +++ b/kanidmd_web_ui/src/oauth2.rs @@ -1,8 +1,10 @@ // use anyhow::Error; use gloo::console; -use wasm_bindgen::JsCast; -use wasm_bindgen::JsValue; -use wasm_bindgen::UnwrapThrowExt; +pub use kanidm_proto::oauth2::{ + AccessTokenRequest, AccessTokenResponse, AuthorisationRequest, AuthorisationResponse, + CodeChallengeMethod, ErrorResponse, +}; +use wasm_bindgen::{JsCast, JsValue, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, RequestRedirect, Response}; use yew::prelude::*; @@ -10,13 +12,7 @@ use yew_router::prelude::*; use crate::error::*; use crate::manager::Route; -use crate::models; -use crate::utils; - -pub use kanidm_proto::oauth2::{ - AccessTokenRequest, AccessTokenResponse, AuthorisationRequest, AuthorisationResponse, - CodeChallengeMethod, ErrorResponse, -}; +use crate::{models, utils}; enum State { // We don't have a token, or something is invalid. diff --git a/kanidmd_web_ui/src/views/apps.rs b/kanidmd_web_ui/src/views/apps.rs index 878e9b5e9..2a9ae6f7b 100644 --- a/kanidmd_web_ui/src/views/apps.rs +++ b/kanidmd_web_ui/src/views/apps.rs @@ -1,9 +1,10 @@ -use crate::components::alpha_warning_banner; -use crate::constants::{CSS_CELL, CSS_PAGE_HEADER, CSS_TABLE}; #[cfg(debug)] use gloo::console; use yew::prelude::*; +use crate::components::alpha_warning_banner; +use crate::constants::{CSS_CELL, CSS_PAGE_HEADER, CSS_TABLE}; + pub enum Msg { // Nothing } diff --git a/kanidmd_web_ui/src/views/mod.rs b/kanidmd_web_ui/src/views/mod.rs index 3ad1aac11..315ca1afa 100644 --- a/kanidmd_web_ui/src/views/mod.rs +++ b/kanidmd_web_ui/src/views/mod.rs @@ -1,18 +1,19 @@ -use crate::components::{admin_accounts, admin_groups, admin_oauth2, adminmenu}; -use crate::error::*; -use crate::manager::Route; -use crate::models; -use crate::utils; +use std::str::FromStr; + use compact_jwt::{Jws, JwsUnverified}; use kanidm_proto::v1::UserAuthToken; use serde::{Deserialize, Serialize}; -use std::str::FromStr; use wasm_bindgen::{JsCast, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; use yew::prelude::*; use yew_router::prelude::*; +use crate::components::{admin_accounts, admin_groups, admin_oauth2, adminmenu}; +use crate::error::*; +use crate::manager::Route; +use crate::{models, utils}; + mod apps; mod components; mod profile; @@ -339,6 +340,7 @@ impl ViewsApp { } } + async fn check_token_valid(token: String) -> Result { let mut opts = RequestInit::new(); opts.method("GET"); diff --git a/kanidmd_web_ui/src/views/profile.rs b/kanidmd_web_ui/src/views/profile.rs index 215499ed1..d68801137 100644 --- a/kanidmd_web_ui/src/views/profile.rs +++ b/kanidmd_web_ui/src/views/profile.rs @@ -1,10 +1,10 @@ -use crate::constants::CSS_PAGE_HEADER; -use crate::views::ViewProps; - use gloo::console; use wasm_bindgen::UnwrapThrowExt; use yew::prelude::*; +use crate::constants::CSS_PAGE_HEADER; +use crate::views::ViewProps; + // User Profile UI pub struct ProfileApp {} diff --git a/kanidmd_web_ui/src/views/security.rs b/kanidmd_web_ui/src/views/security.rs index f19b539c9..33d3083f3 100644 --- a/kanidmd_web_ui/src/views/security.rs +++ b/kanidmd_web_ui/src/views/security.rs @@ -1,24 +1,21 @@ -use crate::constants::CSS_PAGE_HEADER; -use crate::error::*; -use crate::models; -use crate::utils; - -use crate::components::change_unix_password::ChangeUnixPassword; -use crate::manager::Route; -use crate::views::{ViewProps, ViewRoute}; +use std::str::FromStr; use compact_jwt::{Jws, JwsUnverified}; #[cfg(debug)] use gloo::console; -use std::str::FromStr; -use yew::prelude::*; -use yew_router::prelude::*; - use kanidm_proto::v1::{CUSessionToken, CUStatus, UiHint, UserAuthToken}; - use wasm_bindgen::{JsCast, UnwrapThrowExt}; use wasm_bindgen_futures::JsFuture; use web_sys::{Request, RequestInit, RequestMode, Response}; +use yew::prelude::*; +use yew_router::prelude::*; + +use crate::components::change_unix_password::ChangeUnixPassword; +use crate::constants::CSS_PAGE_HEADER; +use crate::error::*; +use crate::manager::Route; +use crate::views::{ViewProps, ViewRoute}; +use crate::{models, utils}; #[allow(clippy::large_enum_variant)] // Page state diff --git a/orca/Cargo.toml b/orca/Cargo.toml index dd5864809..37698aede 100644 --- a/orca/Cargo.toml +++ b/orca/Cargo.toml @@ -1,44 +1,44 @@ [package] name = "orca" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.59" -edition = "2021" -license = "MPL-2.0" description = "Orca - load testing for LDAP and Kanidm" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [[bin]] name = "orca" path = "src/main.rs" [dependencies] -clap = { version = "^3.2", features = ["derive"] } -crossbeam = "0.8.1" -csv = "1.1.6" -dialoguer = "0.10.1" -futures-util = { version = "^0.3.21", features = ["sink"] } -kanidm_client = { path = "../kanidm_client" } -kanidm_proto = { path = "../kanidm_proto" } -ldap3_proto = "^0.2.3" -mathru = "^0.13.0" -openssl = "^0.10.41" -rand = "^0.8.5" -serde = { version = "^1.0.142", features = ["derive"] } -serde_json = "^1.0.83" -tokio = { version = "^1.21.1", features = ["rt-multi-thread"] } -tokio-openssl = "^0.6.3" -tokio-util = { version = "^0.7.4", features = ["codec"] } -toml = "^0.5.9" -tracing = "^0.1.35" -tracing-subscriber = "^0.3.14" -uuid = { version = "^1.1.2", features = ["serde", "v4" ] } +clap.workspace = true +crossbeam.workspace = true +csv.workspace = true +dialoguer.workspace = true +futures-util = { workspace = true, features = ["sink"] } +kanidm_client.workspace = true +kanidm_proto.workspace = true +ldap3_proto.workspace = true +mathru.workspace = true +openssl.workspace = true +rand.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +tokio = { workspace = true, features = ["rt-multi-thread"] } +tokio-openssl.workspace = true +tokio-util = { workspace = true, features = ["codec"] } +toml.workspace = true +tracing.workspace = true +tracing-subscriber.workspace = true +uuid = { workspace = true, features = ["serde", "v4" ] } [target.'cfg(not(target_family = "windows"))'.dependencies] -tikv-jemallocator = "0.5" - +tikv-jemallocator.workspace = true [build-dependencies] -profiles = { path = "../profiles" } +profiles.workspace = true diff --git a/orca/src/data.rs b/orca/src/data.rs index f0ad49458..665b1994d 100644 --- a/orca/src/data.rs +++ b/orca/src/data.rs @@ -1,10 +1,10 @@ use std::collections::{HashMap, HashSet}; use std::time::Duration; -use uuid::Uuid; use rand::distributions::Alphanumeric; use rand::{thread_rng, Rng}; use serde::{Deserialize, Serialize}; +use uuid::Uuid; pub fn readable_password_from_random() -> String { let mut trng = thread_rng(); diff --git a/orca/src/ds.rs b/orca/src/ds.rs index fa8db1979..5239ef54b 100644 --- a/orca/src/ds.rs +++ b/orca/src/ds.rs @@ -1,11 +1,13 @@ +use std::collections::{HashMap, HashSet}; +use std::time::{Duration, Instant}; + +use ldap3_proto::proto::*; +use uuid::Uuid; + use crate::data::*; use crate::ldap::{LdapClient, LdapSchema}; use crate::profile::DsConfig; use crate::{TargetServer, TargetServerBuilder}; -use ldap3_proto::proto::*; -use std::collections::{HashMap, HashSet}; -use std::time::{Duration, Instant}; -use uuid::Uuid; #[derive(Debug)] pub struct DirectoryServer { diff --git a/orca/src/kani.rs b/orca/src/kani.rs index 75e57c319..a35a698ae 100644 --- a/orca/src/kani.rs +++ b/orca/src/kani.rs @@ -1,12 +1,14 @@ +use std::collections::{HashMap, HashSet}; +use std::time::{Duration, Instant}; + +use kanidm_client::{ClientError, KanidmClient, KanidmClientBuilder, StatusCode}; +use kanidm_proto::v1::*; +use uuid::Uuid; + use crate::data::*; use crate::ldap::{LdapClient, LdapSchema}; use crate::profile::{KaniHttpConfig, KaniLdapConfig}; use crate::{TargetServer, TargetServerBuilder}; -use kanidm_client::{ClientError, KanidmClient, KanidmClientBuilder, StatusCode}; -use kanidm_proto::v1::*; -use std::collections::{HashMap, HashSet}; -use std::time::{Duration, Instant}; -use uuid::Uuid; #[derive(Debug)] pub struct KaniHttpServer { diff --git a/orca/src/ldap.rs b/orca/src/ldap.rs index d4ba09870..1079ac54b 100644 --- a/orca/src/ldap.rs +++ b/orca/src/ldap.rs @@ -1,9 +1,11 @@ +use core::pin::Pin; use std::net::{SocketAddr, ToSocketAddrs}; use std::time::{Duration, Instant}; -use core::pin::Pin; use futures_util::sink::SinkExt; use futures_util::stream::StreamExt; +use ldap3_proto::proto::*; +use ldap3_proto::LdapCodec; use openssl::ssl::{Ssl, SslConnector, SslMethod, SslVerifyMode}; // use std::sync::atomic::{AtomicUsize, Ordering}; use tokio::net::TcpStream; @@ -11,9 +13,6 @@ use tokio::sync::Mutex; use tokio_openssl::SslStream; use tokio_util::codec::Framed; -use ldap3_proto::proto::*; -use ldap3_proto::LdapCodec; - struct LdapInner { pub framed: Framed, LdapCodec>, pub msgid: i32, diff --git a/orca/src/main.rs b/orca/src/main.rs index 218b8d63d..890411d97 100644 --- a/orca/src/main.rs +++ b/orca/src/main.rs @@ -15,14 +15,16 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; #[macro_use] extern crate tracing; -use crate::ds::DirectoryServer; -use crate::kani::{KaniHttpServer, KaniLdapServer}; -use clap::{Parser, Subcommand}; use std::collections::{HashMap, HashSet}; use std::path::PathBuf; use std::time::{Duration, Instant}; + +use clap::{Parser, Subcommand}; use uuid::Uuid; +use crate::ds::DirectoryServer; +use crate::kani::{KaniHttpServer, KaniLdapServer}; + mod data; mod ds; mod kani; diff --git a/orca/src/preprocess.rs b/orca/src/preprocess.rs index 1e6cc196b..24ca190e3 100644 --- a/orca/src/preprocess.rs +++ b/orca/src/preprocess.rs @@ -1,7 +1,3 @@ -use crate::data::*; -use rand::seq::SliceRandom; -use rand::Rng; -use serde::Deserialize; use std::cmp::Ordering; use std::collections::{BTreeMap, HashMap, HashSet}; use std::convert::TryFrom; @@ -10,8 +6,14 @@ use std::io::BufReader; use std::path::Path; use std::str::FromStr; use std::time::Duration; + +use rand::seq::SliceRandom; +use rand::Rng; +use serde::Deserialize; use uuid::Uuid; +use crate::data::*; + #[derive(Debug, Deserialize)] struct RawRecord { conn: String, diff --git a/orca/src/runner/mod.rs b/orca/src/runner/mod.rs index d35efec3d..76d592291 100644 --- a/orca/src/runner/mod.rs +++ b/orca/src/runner/mod.rs @@ -1,8 +1,10 @@ -use crate::setup::config; -use crate::{TargetOpt, TestTypeOpt}; -use dialoguer::Confirm; use std::fs::create_dir_all; use std::path::{Path, PathBuf}; + +use dialoguer::Confirm; + +use crate::setup::config; +use crate::{TargetOpt, TestTypeOpt}; mod search; pub(crate) async fn doit( diff --git a/orca/src/runner/search.rs b/orca/src/runner/search.rs index b00730ba1..a31f63c68 100644 --- a/orca/src/runner/search.rs +++ b/orca/src/runner/search.rs @@ -1,20 +1,20 @@ -use crate::data::{Entity, OpType, TestData}; -use crate::profile::Profile; -use crate::{TargetServer, TargetServerBuilder}; -use crossbeam::channel::{unbounded, RecvTimeoutError}; -use mathru::statistics::distrib::Continuous; -use mathru::statistics::distrib::Normal; -use rand::seq::IteratorRandom; -use rand::seq::SliceRandom; -use serde::{Deserialize, Serialize}; use std::fs::File; use std::io::BufWriter; use std::path::PathBuf; use std::sync::Arc; use std::time::{Duration, Instant}; + +use crossbeam::channel::{unbounded, RecvTimeoutError}; +use mathru::statistics::distrib::{Continuous, Normal}; +use rand::seq::{IteratorRandom, SliceRandom}; +use serde::{Deserialize, Serialize}; use tokio::sync::broadcast; use tokio::task; +use crate::data::{Entity, OpType, TestData}; +use crate::profile::Profile; +use crate::{TargetServer, TargetServerBuilder}; + #[derive(Debug, Clone)] enum TestPhase { WarmUp, diff --git a/orca/src/setup.rs b/orca/src/setup.rs index b8a5c8210..2bb23bcd6 100644 --- a/orca/src/setup.rs +++ b/orca/src/setup.rs @@ -1,14 +1,14 @@ +use std::fs::File; +use std::io::{BufReader, Read}; +use std::path::{Path, PathBuf}; + +use uuid::Uuid; + use crate::data::TestData; use crate::ds::DirectoryServer; use crate::kani::{KaniHttpServer, KaniLdapServer}; use crate::profile::Profile; -use crate::TargetOpt; -use crate::TargetServer; -use std::fs::File; -use std::io::BufReader; -use std::io::Read; -use std::path::{Path, PathBuf}; -use uuid::Uuid; +use crate::{TargetOpt, TargetServer}; pub(crate) fn config( target: &TargetOpt, diff --git a/profiles/Cargo.toml b/profiles/Cargo.toml index 5ea1b10f6..bde4c7c98 100644 --- a/profiles/Cargo.toml +++ b/profiles/Cargo.toml @@ -1,23 +1,24 @@ [package] name = "profiles" -version = "1.1.0-alpha.9" -authors = ["William Brown "] -rust-version = "1.64" -edition = "2021" -license = "MPL-2.0" description = "Kanidm Build System Profiles" documentation = "https://docs.rs/kanidm/latest/kanidm/" -homepage = "https://github.com/kanidm/kanidm/" -repository = "https://github.com/kanidm/kanidm/" + +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [lib] name = "profiles" path = "src/lib.rs" [dependencies] -serde = { version = "^1.0.142", features = ["derive"] } -toml = "^0.5.9" -base64 = "^0.13.0" +serde = { workspace = true, features = ["derive"] } +toml.workspace = true +base64.workspace = true [build-dependencies] -base64 = "^0.13.0" +base64.workspace = true diff --git a/profiles/build.rs b/profiles/build.rs index 6fc63d576..115b2c1e0 100644 --- a/profiles/build.rs +++ b/profiles/build.rs @@ -1,6 +1,5 @@ -use std::env; -use std::fs; use std::path::PathBuf; +use std::{env, fs}; fn main() { println!("cargo:rerun-if-env-changed=KANIDM_BUILD_PROFILE"); diff --git a/profiles/src/lib.rs b/profiles/src/lib.rs index 6b692d060..27858e728 100644 --- a/profiles/src/lib.rs +++ b/profiles/src/lib.rs @@ -1,6 +1,7 @@ -use serde::Deserialize; use std::env; +use serde::Deserialize; + #[derive(Debug, Deserialize)] #[allow(non_camel_case_types)] enum CpuOptLevel { diff --git a/sketching/Cargo.toml b/sketching/Cargo.toml index 4801c4982..a1e9d627b 100644 --- a/sketching/Cargo.toml +++ b/sketching/Cargo.toml @@ -1,18 +1,19 @@ [package] name = "sketching" -version = "0.1.0" -edition = "2021" -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +version.workspace = true +authors.workspace = true +rust-version.workspace = true +edition.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true [dependencies] -async-trait = "^0.1.57" -tide = "^0.16.0" -num_enum = "^0.5.7" - -tracing = { version = "^0.1.35", features = ["attributes", "max_level_trace", "release_max_level_debug"] } -tracing-subscriber = { version = "^0.3.14", features = ["env-filter"] } - -# tracing-forest = { version = "0.1.4", features = ["uuid", "smallvec", "tokio", "env-filter"] } -tracing-forest = { git = "https://github.com/QnnOkabayashi/tracing-forest.git", rev = "48d78f7294ceee47a22eee5c80964143c4fb3fe1", features = ["uuid", "smallvec", "tokio", "env-filter"] } +async-trait.workspace = true +num_enum.workspace = true +tide.workspace = true +tracing = { workspace = true, features = ["attributes"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing-forest = { workspace = true, features = ["uuid", "smallvec", "tokio", "env-filter"] } diff --git a/sketching/src/lib.rs b/sketching/src/lib.rs index f84d28db5..778e5e21f 100644 --- a/sketching/src/lib.rs +++ b/sketching/src/lib.rs @@ -2,14 +2,13 @@ #![warn(unused_extern_crates)] use num_enum::{IntoPrimitive, TryFromPrimitive}; -use tracing_forest::{util::*, Tag}; +use tracing_forest::util::*; +use tracing_forest::Tag; pub mod macros; pub mod middleware; -pub use tracing; -pub use tracing_forest; -pub use tracing_subscriber; +pub use {tracing, tracing_forest, tracing_subscriber}; pub fn test_init() { // tracing_subscriber::fmt::try_init() diff --git a/sketching/src/middleware.rs b/sketching/src/middleware.rs index b6a4b09d3..5dd8c779c 100644 --- a/sketching/src/middleware.rs +++ b/sketching/src/middleware.rs @@ -1,8 +1,7 @@ -use crate::{request_error, request_info, request_warn, security_info}; use tide::{self, Middleware, Next, Request}; use tracing::{self, instrument}; -use crate::*; +use crate::{request_error, request_info, request_warn, security_info, *}; #[derive(Default)] pub struct TreeMiddleware {}