68 replication basics (#908)

This commit is contained in:
Firstyear 2022-07-07 13:28:36 +10:00 committed by GitHub
parent d2ea936b16
commit 8b84999640
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 1962 additions and 458 deletions

242
Cargo.lock generated
View file

@ -68,7 +68,7 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"
dependencies = [ dependencies = [
"getrandom 0.2.6", "getrandom 0.2.7",
"once_cell", "once_cell",
"version_check", "version_check",
] ]
@ -93,9 +93,9 @@ dependencies = [
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.57" version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08f9b8508dccb7687a1d6c4ce66b2b0ecef467c94667de27d8d7fe1f8d2a9cdc" checksum = "bb07d2053ccdbe10e2af2995a2f116c1330396493dc1269f6a91d0ae82e19704"
[[package]] [[package]]
name = "anymap2" name = "anymap2"
@ -115,12 +115,6 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "arrayvec"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6"
[[package]] [[package]]
name = "async-channel" name = "async-channel"
version = "1.6.1" version = "1.6.1"
@ -171,9 +165,9 @@ dependencies = [
[[package]] [[package]]
name = "async-global-executor" name = "async-global-executor"
version = "2.1.0" version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd8b508d585e01084059b60f06ade4cb7415cd2e4084b71dd1cb44e7d3fb9880" checksum = "5262ed948da60dd8956c6c5aca4d4163593dddb7b32d73267c93dab7b2e98940"
dependencies = [ dependencies = [
"async-channel", "async-channel",
"async-executor", "async-executor",
@ -181,6 +175,7 @@ dependencies = [
"async-lock", "async-lock",
"blocking", "blocking",
"futures-lite", "futures-lite",
"num_cpus",
"once_cell", "once_cell",
"tokio", "tokio",
] ]
@ -380,9 +375,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]] [[package]]
name = "base-x" name = "base-x"
version = "0.2.10" version = "0.2.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc19a4937b4fbd3fe3379793130e42060d10627a360f2127802b10b87e7baf74" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270"
[[package]] [[package]]
name = "base32" name = "base32"
@ -449,7 +444,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3" checksum = "b64485778c4f16a6a5a9d335e80d449ac6c70cdd6a06d2af18a6f6f775a125b3"
dependencies = [ dependencies = [
"arrayref", "arrayref",
"arrayvec 0.5.2", "arrayvec",
"cc", "cc",
"cfg-if 0.1.10", "cfg-if 0.1.10",
"constant_time_eq", "constant_time_eq",
@ -516,15 +511,6 @@ version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfa8873f51c92e232f9bac4065cddef41b714152812bfc5f7672ba16d6ef8cd9" checksum = "cfa8873f51c92e232f9bac4065cddef41b714152812bfc5f7672ba16d6ef8cd9"
[[package]]
name = "brownstone"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "030ea61398f34f1395ccbeb046fb68c87b631d1f34567fed0f0f11fa35d18d8d"
dependencies = [
"arrayvec 0.7.2",
]
[[package]] [[package]]
name = "bstr" name = "bstr"
version = "0.2.17" version = "0.2.17"
@ -675,9 +661,9 @@ dependencies = [
[[package]] [[package]]
name = "clap_lex" name = "clap_lex"
version = "0.2.2" version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5"
dependencies = [ dependencies = [
"os_str_bytes", "os_str_bytes",
] ]
@ -795,7 +781,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05" checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05"
dependencies = [ dependencies = [
"percent-encoding", "percent-encoding",
"time 0.3.9", "time 0.3.11",
"version_check", "version_check",
] ]
@ -811,7 +797,7 @@ dependencies = [
"publicsuffix", "publicsuffix",
"serde", "serde",
"serde_json", "serde_json",
"time 0.3.9", "time 0.3.11",
"url", "url",
] ]
@ -907,9 +893,9 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-channel" name = "crossbeam-channel"
version = "0.5.4" version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"crossbeam-utils", "crossbeam-utils",
@ -928,15 +914,15 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-epoch" name = "crossbeam-epoch"
version = "0.9.8" version = "0.9.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"crossbeam-utils", "crossbeam-utils",
"lazy_static",
"memoffset", "memoffset",
"once_cell",
"scopeguard", "scopeguard",
] ]
@ -952,19 +938,19 @@ dependencies = [
[[package]] [[package]]
name = "crossbeam-utils" name = "crossbeam-utils"
version = "0.8.8" version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" checksum = "7d82ee10ce34d7bc12c2122495e7593a9c41347ecdd64185af4ecf72cb1a7f83"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"lazy_static", "once_cell",
] ]
[[package]] [[package]]
name = "crypto-common" name = "crypto-common"
version = "0.1.3" version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" checksum = "5999502d32b9c48d492abe66392408144895020ec4709e549e840799f3bb74c0"
dependencies = [ dependencies = [
"generic-array 0.14.5", "generic-array 0.14.5",
"typenum", "typenum",
@ -1117,13 +1103,12 @@ dependencies = [
[[package]] [[package]]
name = "devd-rs" name = "devd-rs"
version = "0.3.3" version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4de55a99f4f60e49faf5c7471a5ecbe4f2f16fdf398ba53a4a70829114f305e7" checksum = "9861a4115f5148d32f7bbbadcdc562d5754ed75c8d27316a587440dd2ac4df84"
dependencies = [ dependencies = [
"libc", "libc",
"nom 7.1.1", "nom 7.1.1",
"nom-supreme",
] ]
[[package]] [[package]]
@ -1200,9 +1185,9 @@ checksum = "140206b78fb2bc3edbcfc9b5ccbd0b30699cfe8d348b8b31b330e47df5291a5a"
[[package]] [[package]]
name = "either" name = "either"
version = "1.6.1" version = "1.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be"
[[package]] [[package]]
name = "encode_unicode" name = "encode_unicode"
@ -1221,9 +1206,9 @@ dependencies = [
[[package]] [[package]]
name = "erased-serde" name = "erased-serde"
version = "0.3.20" version = "0.3.21"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ad132dd8d0d0b546348d7d86cb3191aad14b34e5f979781fc005c80d4ac67ffd" checksum = "81d013529d5574a60caeda29e179e695125448e5de52e3874f7b4c1d7360e18e"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@ -1295,7 +1280,7 @@ checksum = "93804560e638370a8be6d59ce71ed803e55e230abdbf42598e666b41adda9b1f"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"byteorder", "byteorder",
"getrandom 0.2.6", "getrandom 0.2.7",
"openssl", "openssl",
"zeroize", "zeroize",
] ]
@ -1495,14 +1480,14 @@ dependencies = [
[[package]] [[package]]
name = "getrandom" name = "getrandom"
version = "0.2.6" version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6"
dependencies = [ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
"js-sys", "js-sys",
"libc", "libc",
"wasi 0.10.0+wasi-snapshot-preview1", "wasi 0.11.0+wasi-snapshot-preview1",
"wasm-bindgen", "wasm-bindgen",
] ]
@ -1585,9 +1570,9 @@ dependencies = [
[[package]] [[package]]
name = "gloo-file" name = "gloo-file"
version = "0.2.1" version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa5d6084efa4a2b182ef3a8649cb6506cb4843f22cf907c6e0a799944248ae90" checksum = "a8d5564e570a38b43d78bdc063374a0c3098c4f0d64005b12f9bbe87e869b6d7"
dependencies = [ dependencies = [
"futures-channel", "futures-channel",
"gloo-events", "gloo-events",
@ -1614,9 +1599,9 @@ dependencies = [
[[package]] [[package]]
name = "gloo-net" name = "gloo-net"
version = "0.2.0" version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f6d37f728c2b2b8c568bd2efb34ce9087e347c182db68f101a969b4fe23054d5" checksum = "351e6f94c76579cc9f9323a15f209086fc7bd428bff4288723d3a417851757b2"
dependencies = [ dependencies = [
"futures-channel", "futures-channel",
"futures-core", "futures-core",
@ -1671,9 +1656,9 @@ dependencies = [
[[package]] [[package]]
name = "gloo-utils" name = "gloo-utils"
version = "0.1.3" version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c0bbef55e98d946adbd89f3c65a497cf9adb995a73b99573f30180e8813ab21" checksum = "929c53c913bb7a88d75d9dc3e9705f963d8c2b9001510b25ddaf671b9fb7049d"
dependencies = [ dependencies = [
"js-sys", "js-sys",
"wasm-bindgen", "wasm-bindgen",
@ -1797,9 +1782,9 @@ dependencies = [
[[package]] [[package]]
name = "http" name = "http"
version = "0.2.7" version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb" checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399"
dependencies = [ dependencies = [
"bytes", "bytes",
"fnv", "fnv",
@ -1819,9 +1804,9 @@ dependencies = [
[[package]] [[package]]
name = "http-client" name = "http-client"
version = "6.5.2" version = "6.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e023af341b797ce2c039f7c6e1d347b68d0f7fd0bc7ac234fe69cfadcca1f89a" checksum = "1947510dc91e2bf586ea5ffb412caad7673264e14bb39fb9078da114a94ce1a5"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"cfg-if 1.0.0", "cfg-if 1.0.0",
@ -1908,9 +1893,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]] [[package]]
name = "idlset" name = "idlset"
version = "0.2.3" version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d1612d42a5e18df31a30916d2005ff41981c1d606b9180ef9bd1dbd919fa4eac" checksum = "340756d15be4b22d5e501bad90a9f68fcdc6b9b7d2f6d6afe350645e9839dac6"
dependencies = [ dependencies = [
"serde", "serde",
"serde_derive", "serde_derive",
@ -1934,20 +1919,14 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed" checksum = "cb56e1aa765b4b4f3aadfab769793b7087bb03a4ea4920644a6d238e2df5b9ed"
[[package]]
name = "indent_write"
version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0cfe9645a18782869361d9c8732246be7b410ad4e919d3609ebabdac00ba12c3"
[[package]] [[package]]
name = "indexmap" name = "indexmap"
version = "1.8.2" version = "1.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6012d540c5baa3589337a98ce73408de9b5a25ec9fc2c6fd6be8f0d39e0ca5a" checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"hashbrown 0.11.2", "hashbrown 0.12.1",
] ]
[[package]] [[package]]
@ -2001,12 +1980,6 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d"
[[package]]
name = "joinery"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72167d68f5fce3b8655487b8038691a3c9984ee769590f93f2a631f4ad64e4f5"
[[package]] [[package]]
name = "js-sys" name = "js-sys"
version = "0.3.58" version = "0.3.58"
@ -2224,7 +2197,7 @@ version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe"
dependencies = [ dependencies = [
"arrayvec 0.5.2", "arrayvec",
"bitflags", "bitflags",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"ryu", "ryu",
@ -2281,9 +2254,9 @@ dependencies = [
[[package]] [[package]]
name = "linked-hash-map" name = "linked-hash-map"
version = "0.5.4" version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
[[package]] [[package]]
name = "lock_api" name = "lock_api"
@ -2386,9 +2359,9 @@ dependencies = [
[[package]] [[package]]
name = "mio" name = "mio"
version = "0.8.3" version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf"
dependencies = [ dependencies = [
"libc", "libc",
"log", "log",
@ -2441,19 +2414,6 @@ dependencies = [
"minimal-lexical", "minimal-lexical",
] ]
[[package]]
name = "nom-supreme"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aadc66631948f6b65da03be4c4cd8bd104d481697ecbb9bbd65719b1ec60bc9f"
dependencies = [
"brownstone",
"indent_write",
"joinery",
"memchr",
"nom 7.1.1",
]
[[package]] [[package]]
name = "nss_kanidm" name = "nss_kanidm"
version = "1.1.0-alpha.8" version = "1.1.0-alpha.8"
@ -2526,13 +2486,13 @@ dependencies = [
[[package]] [[package]]
name = "oauth2" name = "oauth2"
version = "4.2.0" version = "4.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3bd7d544f02ae0fa9e06137962703d043870d7ad6e6d44786d6a5f20679b2c9" checksum = "09edac2677609789a6eb6c95badde366c5162adae0b740a2af0d355604ce7125"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"chrono", "chrono",
"getrandom 0.2.6", "getrandom 0.2.7",
"http", "http",
"rand 0.8.5", "rand 0.8.5",
"serde", "serde",
@ -2545,9 +2505,9 @@ dependencies = [
[[package]] [[package]]
name = "once_cell" name = "once_cell"
version = "1.12.0" version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
[[package]] [[package]]
name = "oncemutex" name = "oncemutex"
@ -2743,18 +2703,18 @@ dependencies = [
[[package]] [[package]]
name = "pin-project" name = "pin-project"
version = "1.0.10" version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" checksum = "78203e83c48cffbe01e4a2d35d566ca4de445d79a85372fc64e378bfc812a260"
dependencies = [ dependencies = [
"pin-project-internal", "pin-project-internal",
] ]
[[package]] [[package]]
name = "pin-project-internal" name = "pin-project-internal"
version = "1.0.10" version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" checksum = "710faf75e1b33345361201d36d04e98ac1ed8909151a017ed384700836104c74"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -2787,9 +2747,9 @@ checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae"
[[package]] [[package]]
name = "plotters" name = "plotters"
version = "0.3.1" version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "32a3fd9ec30b9749ce28cd91f255d569591cdf937fe280c312143e3c4bad6f2a" checksum = "9428003b84df1496fb9d6eeee9c5f8145cb41ca375eb0dad204328888832811f"
dependencies = [ dependencies = [
"num-traits", "num-traits",
"plotters-backend", "plotters-backend",
@ -2800,15 +2760,15 @@ dependencies = [
[[package]] [[package]]
name = "plotters-backend" name = "plotters-backend"
version = "0.3.2" version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d88417318da0eaf0fdcdb51a0ee6c3bed624333bff8f946733049380be67ac1c" checksum = "1c89e57ae773e34419b0f62d68c1934a97ac0637f36741dfde4efb88aaf711a0"
[[package]] [[package]]
name = "plotters-svg" name = "plotters-svg"
version = "0.3.1" version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "521fa9638fa597e1dc53e9412a4f9cefb01187ee1f7413076f9e6749e2885ba9" checksum = "e0918736323d1baff32ee0eade54984f6f201ad7e97d5cfb5d6ab4a358529615"
dependencies = [ dependencies = [
"plotters-backend", "plotters-backend",
] ]
@ -2885,9 +2845,9 @@ checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.39" version = "1.0.40"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7"
dependencies = [ dependencies = [
"unicode-ident", "unicode-ident",
] ]
@ -2945,9 +2905,9 @@ dependencies = [
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.18" version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1feb54ed693b93a84e14094943b84b7c4eae204c512b7ccb95ab0c66d278ad1" checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
] ]
@ -3032,7 +2992,7 @@ version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [ dependencies = [
"getrandom 0.2.6", "getrandom 0.2.7",
] ]
[[package]] [[package]]
@ -3083,16 +3043,16 @@ version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b"
dependencies = [ dependencies = [
"getrandom 0.2.6", "getrandom 0.2.7",
"redox_syscall", "redox_syscall",
"thiserror", "thiserror",
] ]
[[package]] [[package]]
name = "regex" name = "regex"
version = "1.5.6" version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b"
dependencies = [ dependencies = [
"aho-corasick", "aho-corasick",
"memchr", "memchr",
@ -3122,9 +3082,9 @@ dependencies = [
[[package]] [[package]]
name = "regex-syntax" name = "regex-syntax"
version = "0.6.26" version = "0.6.27"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244"
[[package]] [[package]]
name = "remove_dir_all" name = "remove_dir_all"
@ -3235,7 +3195,7 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [ dependencies = [
"semver 1.0.9", "semver 1.0.12",
] ]
[[package]] [[package]]
@ -3363,9 +3323,9 @@ dependencies = [
[[package]] [[package]]
name = "semver" name = "semver"
version = "1.0.9" version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cb243bdfdb5936c8dc3c45762a19d12ab4550cdc753bc247637d4ec35a040fd" checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1"
[[package]] [[package]]
name = "semver-parser" name = "semver-parser"
@ -3572,9 +3532,9 @@ checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32"
[[package]] [[package]]
name = "smallvec" name = "smallvec"
version = "1.8.0" version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" checksum = "2fd0db749597d91ff862fd1d55ea87f7855a744a8425a64695b6fca237d1dad1"
dependencies = [ dependencies = [
"serde", "serde",
] ]
@ -3708,9 +3668,9 @@ dependencies = [
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.96" version = "1.0.98"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0748dd251e24453cb8717f0354206b91557e4ec8703673a4b30208f2abaf1ebf" checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -3861,9 +3821,9 @@ dependencies = [
[[package]] [[package]]
name = "tikv-jemalloc-sys" name = "tikv-jemalloc-sys"
version = "0.5.0+5.3.0" version = "0.5.1+5.3.0-patched"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aeab4310214fe0226df8bfeb893a291a58b19682e8a07e1e1d4483ad4200d315" checksum = "931e876f91fed0827f863a2d153897790da0b24d882c721a79cb3beb0b903261"
dependencies = [ dependencies = [
"cc", "cc",
"fs_extra", "fs_extra",
@ -3909,9 +3869,9 @@ dependencies = [
[[package]] [[package]]
name = "time" name = "time"
version = "0.3.9" version = "0.3.11"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd" checksum = "72c91f41dcb2f096c05f0873d667dceec1087ce5bcf984ec8ffb19acddbb3217"
dependencies = [ dependencies = [
"itoa 1.0.2", "itoa 1.0.2",
"libc", "libc",
@ -4059,9 +4019,9 @@ dependencies = [
[[package]] [[package]]
name = "tower-service" name = "tower-service"
version = "0.3.1" version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52"
[[package]] [[package]]
name = "tracing" name = "tracing"
@ -4077,9 +4037,9 @@ dependencies = [
[[package]] [[package]]
name = "tracing-attributes" name = "tracing-attributes"
version = "0.1.21" version = "0.1.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" checksum = "11c75893af559bc8e10716548bdef5cb2b983f8e637db9d0e15126b61b484ee2"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -4155,15 +4115,15 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992"
[[package]] [[package]]
name = "unicode-ident" name = "unicode-ident"
version = "1.0.0" version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c"
[[package]] [[package]]
name = "unicode-normalization" name = "unicode-normalization"
version = "0.1.19" version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" checksum = "854cbdc4f7bc6ae19c820d44abdc3277ac3e1b2b93db20a636825d9322fb60e6"
dependencies = [ dependencies = [
"tinyvec", "tinyvec",
] ]
@ -4225,7 +4185,7 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f" checksum = "dd6469f4314d5f1ffec476e05f17cc9a78bc7a27a6a857842170bdf8d6f98d2f"
dependencies = [ dependencies = [
"getrandom 0.2.6", "getrandom 0.2.7",
"serde", "serde",
] ]
@ -4652,9 +4612,9 @@ dependencies = [
[[package]] [[package]]
name = "zeroize" name = "zeroize"
version = "1.5.5" version = "1.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94693807d016b2f2d2e14420eb3bfcca689311ff775dcf113d74ea624b7cdf07" checksum = "20b578acffd8516a6c3f2a1bdefc1ec37e547bb4e0fb8b6b01a4cafc886b4442"
dependencies = [ dependencies = [
"zeroize_derive", "zeroize_derive",
] ]
@ -4684,5 +4644,5 @@ dependencies = [
"lazy_static", "lazy_static",
"quick-error", "quick-error",
"regex", "regex",
"time 0.3.9", "time 0.3.11",
] ]

View file

@ -57,6 +57,8 @@ pub enum ConsistencyError {
SqliteIntegrityFailure, SqliteIntegrityFailure,
BackendAllIdsSync, BackendAllIdsSync,
BackendIndexSync, BackendIndexSync,
ChangelogDesynchronised(u64),
RuvInconsistent(String),
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
@ -143,6 +145,9 @@ pub enum OperationError {
QueueDisconnected, QueueDisconnected,
Webauthn, Webauthn,
Wait(time::OffsetDateTime), Wait(time::OffsetDateTime),
ReplReplayFailure,
ReplEntryNotChanged,
ReplInvalidRUVState,
} }
impl PartialEq for OperationError { impl PartialEq for OperationError {

View file

@ -28,7 +28,7 @@ filetime = "^0.2.17"
futures = "^0.3.21" futures = "^0.3.21"
futures-util = "^0.3.21" futures-util = "^0.3.21"
hashbrown = { version = "0.12.0", features = ["serde", "inline-more", "ahash"] } hashbrown = { version = "0.12.0", features = ["serde", "inline-more", "ahash"] }
idlset = { version = "^0.2.3" } idlset = { version = "^0.2.4" }
kanidm_proto = { path = "../../kanidm_proto" } kanidm_proto = { path = "../../kanidm_proto" }
lazy_static = "^1.4.0" lazy_static = "^1.4.0"
ldap3_proto = "^0.2.3" ldap3_proto = "^0.2.3"

View file

@ -28,6 +28,12 @@ use std::ops::DerefMut;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
use crate::repl::cid::Cid;
use crate::repl::ruv::{
ReplicationUpdateVector, ReplicationUpdateVectorReadTransaction,
ReplicationUpdateVectorTransaction, ReplicationUpdateVectorWriteTransaction,
};
pub mod dbentry; pub mod dbentry;
pub mod dbvalue; pub mod dbvalue;
mod idl_arc_sqlite; mod idl_arc_sqlite;
@ -104,17 +110,24 @@ impl BackendConfig {
#[derive(Clone)] #[derive(Clone)]
pub struct Backend { pub struct Backend {
/// This is the actual datastorage layer.
idlayer: Arc<IdlArcSqlite>, idlayer: Arc<IdlArcSqlite>,
/// This is a copy-on-write cache of the index metadata that has been /// This is a copy-on-write cache of the index metadata that has been
/// extracted from attributes set, in the correct format for the backend /// extracted from attributes set, in the correct format for the backend
/// to consume. /// to consume. We use it to extract indexes from entries during write paths
/// and to allow the front end to know what indexes exist during a read.
idxmeta: Arc<CowCell<IdxMeta>>, idxmeta: Arc<CowCell<IdxMeta>>,
/// The current state of the replication update vector. This is effectively a
/// time series index of the full list of all changelog entries and what entries
/// that are part of that change.
ruv: Arc<ReplicationUpdateVector>,
cfg: BackendConfig, cfg: BackendConfig,
} }
pub struct BackendReadTransaction<'a> { pub struct BackendReadTransaction<'a> {
idlayer: UnsafeCell<IdlArcSqliteReadTransaction<'a>>, idlayer: UnsafeCell<IdlArcSqliteReadTransaction<'a>>,
idxmeta: CowCellReadTxn<IdxMeta>, idxmeta: CowCellReadTxn<IdxMeta>,
ruv: UnsafeCell<ReplicationUpdateVectorReadTransaction<'a>>,
} }
unsafe impl<'a> Sync for BackendReadTransaction<'a> {} unsafe impl<'a> Sync for BackendReadTransaction<'a> {}
@ -124,6 +137,7 @@ unsafe impl<'a> Send for BackendReadTransaction<'a> {}
pub struct BackendWriteTransaction<'a> { pub struct BackendWriteTransaction<'a> {
idlayer: UnsafeCell<IdlArcSqliteWriteTransaction<'a>>, idlayer: UnsafeCell<IdlArcSqliteWriteTransaction<'a>>,
idxmeta: CowCellReadTxn<IdxMeta>, idxmeta: CowCellReadTxn<IdxMeta>,
ruv: UnsafeCell<ReplicationUpdateVectorWriteTransaction<'a>>,
idxmeta_wr: CowCellWriteTxn<'a, IdxMeta>, idxmeta_wr: CowCellWriteTxn<'a, IdxMeta>,
} }
@ -149,10 +163,13 @@ impl IdRawEntry {
pub trait BackendTransaction { pub trait BackendTransaction {
type IdlLayerType: IdlArcSqliteTransaction; type IdlLayerType: IdlArcSqliteTransaction;
#[allow(clippy::mut_from_ref)] #[allow(clippy::mut_from_ref)]
fn get_idlayer(&self) -> &mut Self::IdlLayerType; fn get_idlayer(&self) -> &mut Self::IdlLayerType;
type RuvType: ReplicationUpdateVectorTransaction;
#[allow(clippy::mut_from_ref)]
fn get_ruv(&self) -> &mut Self::RuvType;
fn get_idxmeta_ref(&self) -> &IdxMeta; fn get_idxmeta_ref(&self) -> &IdxMeta;
/// Recursively apply a filter, transforming into IdList's on the way. This builds a query /// Recursively apply a filter, transforming into IdList's on the way. This builds a query
@ -788,6 +805,21 @@ pub trait BackendTransaction {
} }
} }
fn verify_ruv(&self, results: &mut Vec<Result<(), ConsistencyError>>) {
// The way we verify this is building a whole second RUV and then comparing it.
let idl = IdList::AllIds;
let entries = match self.get_idlayer().get_identry(&idl) {
Ok(ent) => ent,
Err(e) => {
results.push(Err(ConsistencyError::Unknown));
admin_error!(?e, "get_identry failed");
return;
}
};
self.get_ruv().verify(&entries, results);
}
fn backup(&self, dst_path: &str) -> Result<(), OperationError> { fn backup(&self, dst_path: &str) -> Result<(), OperationError> {
// load all entries into RAM, may need to change this later // load all entries into RAM, may need to change this later
// if the size of the database compared to RAM is an issue // if the size of the database compared to RAM is an issue
@ -867,6 +899,13 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> {
unsafe { &mut (*self.idlayer.get()) } unsafe { &mut (*self.idlayer.get()) }
} }
type RuvType = ReplicationUpdateVectorReadTransaction<'a>;
#[allow(clippy::mut_from_ref)]
fn get_ruv(&self) -> &mut ReplicationUpdateVectorReadTransaction<'a> {
unsafe { &mut (*self.ruv.get()) }
}
fn get_idxmeta_ref(&self) -> &IdxMeta { fn get_idxmeta_ref(&self) -> &IdxMeta {
&self.idxmeta &self.idxmeta
} }
@ -901,6 +940,13 @@ impl<'a> BackendTransaction for BackendWriteTransaction<'a> {
unsafe { &mut (*self.idlayer.get()) } unsafe { &mut (*self.idlayer.get()) }
} }
type RuvType = ReplicationUpdateVectorWriteTransaction<'a>;
#[allow(clippy::mut_from_ref)]
fn get_ruv(&self) -> &mut ReplicationUpdateVectorWriteTransaction<'a> {
unsafe { &mut (*self.ruv.get()) }
}
fn get_idxmeta_ref(&self) -> &IdxMeta { fn get_idxmeta_ref(&self) -> &IdxMeta {
&self.idxmeta &self.idxmeta
} }
@ -909,6 +955,7 @@ impl<'a> BackendTransaction for BackendWriteTransaction<'a> {
impl<'a> BackendWriteTransaction<'a> { impl<'a> BackendWriteTransaction<'a> {
pub fn create( pub fn create(
&self, &self,
cid: &Cid,
entries: Vec<Entry<EntrySealed, EntryNew>>, entries: Vec<Entry<EntrySealed, EntryNew>>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> { ) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
spanned!("be::create", { spanned!("be::create", {
@ -917,6 +964,19 @@ impl<'a> BackendWriteTransaction<'a> {
return Err(OperationError::EmptyRequest); return Err(OperationError::EmptyRequest);
} }
// Check that every entry has a change associated
// that matches the cid?
entries.iter().try_for_each(|e| {
if e.get_changelog().contains_tail_cid(cid) {
Ok(())
} else {
admin_error!(
"Entry changelog does not contain a change related to this transaction"
);
Err(OperationError::ReplEntryNotChanged)
}
})?;
let idlayer = self.get_idlayer(); let idlayer = self.get_idlayer();
// Now, assign id's to all the new entries. // Now, assign id's to all the new entries.
@ -929,6 +989,12 @@ impl<'a> BackendWriteTransaction<'a> {
}) })
.collect(); .collect();
// All good, lets update the RUV.
// This auto compresses.
let ruv_idl = IDLBitRange::from_iter(c_entries.iter().map(|e| e.get_id()));
self.get_ruv().insert_change(cid.clone(), ruv_idl)?;
idlayer.write_identries(c_entries.iter())?; idlayer.write_identries(c_entries.iter())?;
idlayer.set_id2entry_max_id(id_max); idlayer.set_id2entry_max_id(id_max);
@ -944,8 +1010,9 @@ impl<'a> BackendWriteTransaction<'a> {
pub fn modify( pub fn modify(
&self, &self,
cid: &Cid,
pre_entries: &[Arc<EntrySealedCommitted>], pre_entries: &[Arc<EntrySealedCommitted>],
post_entries: &[Entry<EntrySealed, EntryCommitted>], post_entries: &[EntrySealedCommitted],
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {
spanned!("be::modify", { spanned!("be::modify", {
if post_entries.is_empty() || pre_entries.is_empty() { if post_entries.is_empty() || pre_entries.is_empty() {
@ -955,36 +1022,21 @@ impl<'a> BackendWriteTransaction<'a> {
assert!(post_entries.len() == pre_entries.len()); assert!(post_entries.len() == pre_entries.len());
/* post_entries.iter().try_for_each(|e| {
// Assert the Id's exist on the entry, and serialise them. if e.get_changelog().contains_tail_cid(cid) {
// Now, that means the ID must be > 0!!! Ok(())
let ser_entries: Result<Vec<IdEntry>, _> = post_entries } else {
.iter() admin_error!(
.map(|e| { "Entry changelog does not contain a change related to this transaction"
let id = i64::try_from(e.get_id()) );
.map_err(|_| OperationError::InvalidEntryId) Err(OperationError::ReplEntryNotChanged)
.and_then(|id| { }
if id == 0 { })?;
Err(OperationError::InvalidEntryId)
} else {
Ok(id)
}
})?;
Ok(IdEntry { id, data: e.clone() }) // All good, lets update the RUV.
}) // This auto compresses.
.collect(); let ruv_idl = IDLBitRange::from_iter(post_entries.iter().map(|e| e.get_id()));
self.get_ruv().insert_change(cid.clone(), ruv_idl)?;
let ser_entries = try_audit!( ser_entries);
// Simple: If the list of id's is not the same as the input list, we are missing id's
//
// The entry state checks prevent this from really ever being triggered, but we
// still prefer paranoia :)
if post_entries.len() != ser_entries.len() {
return Err(OperationError::InvalidEntryState);
}
*/
// Now, given the list of id's, update them // Now, given the list of id's, update them
self.get_idlayer().write_identries(post_entries.iter())?; self.get_idlayer().write_identries(post_entries.iter())?;
@ -998,23 +1050,81 @@ impl<'a> BackendWriteTransaction<'a> {
}) })
} }
pub fn delete(&self, entries: &[Arc<EntrySealedCommitted>]) -> Result<(), OperationError> { pub fn reap_tombstones(&self, cid: &Cid) -> Result<usize, OperationError> {
spanned!("be::delete", { spanned!("be::reap_tombstones", {
// We plan to clear the RUV up to this cid. So we need to build an IDL
// of all the entries we need to examine.
let idl = self.get_ruv().trim_up_to(cid).map_err(|e| {
admin_error!(?e, "failed to trim RUV to {:?}", cid);
e
})?;
let entries = self
.get_idlayer()
.get_identry(&IdList::Indexed(idl))
.map_err(|e| {
admin_error!(?e, "get_identry failed");
e
})?;
if entries.is_empty() { if entries.is_empty() {
admin_error!("No entries provided to BE to delete, invalid server call!"); admin_info!("No entries affected - reap_tombstones operation success");
return Err(OperationError::EmptyRequest); return Ok(0);
} }
// Assert the id's exist on the entry. // Now that we have a list of entries we need to partition them into
let id_list = entries.iter().map(|e| e.get_id()); // two sets. The entries that are tombstoned and ready to reap_tombstones, and
// the entries that need to have their change logs trimmed.
// Now, given the list of id's, delete them. // First we trim changelogs. Go through each entry, and trim the CL, and write it back.
self.get_idlayer().delete_identry(id_list)?; let mut entries: Vec<_> = entries.iter().map(|er| er.as_ref().clone()).collect();
entries
.iter_mut()
.try_for_each(|e| e.get_changelog_mut().trim_up_to(cid))?;
// Write down the cl trims
self.get_idlayer().write_identries(entries.iter())?;
let (tombstones, leftover): (Vec<_>, Vec<_>) = entries
.into_iter()
.partition(|e| e.get_changelog().can_delete());
// Assert that anything leftover still either is *alive* OR is a tombstone
// and has entries in the RUV!
let ruv_idls = self.get_ruv().ruv_idls();
if !leftover
.iter()
.all(|e| e.get_changelog().is_live() || ruv_idls.contains(e.get_id()))
{
admin_error!("Left over entries may be orphaned due to missing RUV entries");
return Err(OperationError::ReplInvalidRUVState);
}
// Now setup to reap_tombstones the tombstones. Remember, in the post cleanup, it's could
// now have been trimmed to a point we can purge them!
// Assert the id's exist on the entry.
let id_list: IDLBitRange = tombstones.iter().map(|e| e.get_id()).collect();
// Ensure nothing here exists in the RUV index, else it means
// we didn't trim properly, or some other state violation has occured.
if !((&ruv_idls & &id_list).is_empty()) {
admin_error!("RUV still contains entries that are going to be removed.");
return Err(OperationError::ReplInvalidRUVState);
}
// Now, given the list of id's, reap_tombstones them.
let sz = id_list.len();
self.get_idlayer().delete_identry(id_list.into_iter())?;
// Finally, purge the indexes from the entries we removed. // Finally, purge the indexes from the entries we removed.
entries tombstones
.iter() .iter()
.try_for_each(|e| self.entry_index(Some(e), None)) .try_for_each(|e| self.entry_index(Some(e), None))?;
Ok(sz)
}) })
} }
@ -1416,17 +1526,41 @@ impl<'a> BackendWriteTransaction<'a> {
} }
} }
pub fn ruv_rebuild(&mut self) -> Result<(), OperationError> {
// Rebuild the ruv!
spanned!("server::ruv_rebuild", {
// For now this has to read from all the entries in the DB, but in the future
// we'll actually store this properly (?). If it turns out this is really fast
// we may just rebuild this always on startup.
// NOTE: An important detail is that we don't rely on indexes here!
let idl = IdList::AllIds;
let entries = self.get_idlayer().get_identry(&idl).map_err(|e| {
admin_error!(?e, "get_identry failed");
e
})?;
self.get_ruv().rebuild(&entries)?;
Ok(())
})
}
pub fn commit(self) -> Result<(), OperationError> { pub fn commit(self) -> Result<(), OperationError> {
let BackendWriteTransaction { let BackendWriteTransaction {
idlayer, idlayer,
idxmeta: _, idxmeta: _,
ruv,
idxmeta_wr, idxmeta_wr,
} = self; } = self;
// Unwrap the Cell we have finished with it. // Unwrap the Cell we have finished with it.
let idlayer = idlayer.into_inner(); let idlayer = idlayer.into_inner();
let ruv = ruv.into_inner();
idlayer.commit().map(|()| { idlayer.commit().map(|()| {
ruv.commit();
idxmeta_wr.commit(); idxmeta_wr.commit();
}) })
} }
@ -1539,12 +1673,18 @@ impl Backend {
}) })
.collect(); .collect();
// RUV-TODO
// Load the replication update vector here. For now we rebuild every startup
// from the database.
let ruv = Arc::new(ReplicationUpdateVector::default());
// this has a ::memory() type, but will path == "" work? // this has a ::memory() type, but will path == "" work?
spanned!("be::new", { spanned!("be::new", {
let idlayer = Arc::new(IdlArcSqlite::new(&cfg, vacuum)?); let idlayer = Arc::new(IdlArcSqlite::new(&cfg, vacuum)?);
let be = Backend { let be = Backend {
cfg, cfg,
idlayer, idlayer,
ruv,
idxmeta: Arc::new(CowCell::new(IdxMeta::new(idxkeys))), idxmeta: Arc::new(CowCell::new(IdxMeta::new(idxkeys))),
}; };
@ -1552,17 +1692,26 @@ impl Backend {
// In this case we can use an empty idx meta because we don't // In this case we can use an empty idx meta because we don't
// access any parts of // access any parts of
// the indexing subsystem here. // the indexing subsystem here.
let r = { let mut idl_write = be.idlayer.write();
let mut idl_write = be.idlayer.write(); idl_write
idl_write.setup().and_then(|_| idl_write.commit()) .setup()
}; .and_then(|_| idl_write.commit())
.map_err(|e| {
admin_error!(?e, "Failed to setup idlayer");
e
})?;
trace!("be new setup: {:?}", r); // Now rebuild the ruv.
let mut be_write = be.write();
be_write
.ruv_rebuild()
.and_then(|_| be_write.commit())
.map_err(|e| {
admin_error!(?e, "Failed to reload ruv");
e
})?;
match r { Ok(be)
Ok(_) => Ok(be),
Err(e) => Err(e),
}
}) })
} }
@ -1579,6 +1728,7 @@ impl Backend {
BackendReadTransaction { BackendReadTransaction {
idlayer: UnsafeCell::new(self.idlayer.read()), idlayer: UnsafeCell::new(self.idlayer.read()),
idxmeta: self.idxmeta.read(), idxmeta: self.idxmeta.read(),
ruv: UnsafeCell::new(self.ruv.read()),
} }
} }
@ -1586,6 +1736,7 @@ impl Backend {
BackendWriteTransaction { BackendWriteTransaction {
idlayer: UnsafeCell::new(self.idlayer.write()), idlayer: UnsafeCell::new(self.idlayer.write()),
idxmeta: self.idxmeta.read(), idxmeta: self.idxmeta.read(),
ruv: UnsafeCell::new(self.ruv.write()),
idxmeta_wr: self.idxmeta.write(), idxmeta_wr: self.idxmeta.write(),
} }
} }
@ -1628,9 +1779,17 @@ mod tests {
use super::{DbBackup, IdxKey}; use super::{DbBackup, IdxKey};
use crate::identity::Limits; use crate::identity::Limits;
use crate::prelude::*; use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::value::{IndexType, PartialValue, Value}; use crate::value::{IndexType, PartialValue, Value};
use std::time::Duration; use std::time::Duration;
lazy_static! {
static ref CID_ZERO: Cid = unsafe { Cid::new_zero() };
static ref CID_ONE: Cid = unsafe { Cid::new_count(1) };
static ref CID_TWO: Cid = unsafe { Cid::new_count(2) };
static ref CID_THREE: Cid = unsafe { Cid::new_count(3) };
}
macro_rules! run_test { macro_rules! run_test {
($test_fn:expr) => {{ ($test_fn:expr) => {{
let _ = crate::tracing_tree::test_init(); let _ = crate::tracing_tree::test_init();
@ -1683,7 +1842,7 @@ mod tests {
($be:expr, $ent:expr) => {{ ($be:expr, $ent:expr) => {{
let ei = unsafe { $ent.clone().into_sealed_committed() }; let ei = unsafe { $ent.clone().into_sealed_committed() };
let filt = unsafe { let filt = unsafe {
ei.filter_from_attrs(&vec![AttrString::from("userid")]) ei.filter_from_attrs(&vec![AttrString::from("uuid")])
.expect("failed to generate filter") .expect("failed to generate filter")
.into_valid_resolved() .into_valid_resolved()
}; };
@ -1725,7 +1884,7 @@ mod tests {
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
trace!("Simple Create"); trace!("Simple Create");
let empty_result = be.create(Vec::new()); let empty_result = be.create(&CID_ZERO, Vec::new());
trace!("{:?}", empty_result); trace!("{:?}", empty_result);
assert_eq!(empty_result, Err(OperationError::EmptyRequest)); assert_eq!(empty_result, Err(OperationError::EmptyRequest));
@ -1734,7 +1893,7 @@ mod tests {
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
let e = unsafe { e.into_sealed_new() }; let e = unsafe { e.into_sealed_new() };
let single_result = be.create(vec![e.clone()]); let single_result = be.create(&CID_ZERO, vec![e.clone()]);
assert!(single_result.is_ok()); assert!(single_result.is_ok());
@ -1753,7 +1912,7 @@ mod tests {
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
let e = unsafe { e.into_sealed_new() }; let e = unsafe { e.into_sealed_new() };
let single_result = be.create(vec![e.clone()]); let single_result = be.create(&CID_ZERO, vec![e.clone()]);
assert!(single_result.is_ok()); assert!(single_result.is_ok());
// Test a simple EQ search // Test a simple EQ search
@ -1790,7 +1949,7 @@ mod tests {
let ve1 = unsafe { e1.clone().into_sealed_new() }; let ve1 = unsafe { e1.clone().into_sealed_new() };
let ve2 = unsafe { e2.clone().into_sealed_new() }; let ve2 = unsafe { e2.clone().into_sealed_new() };
assert!(be.create(vec![ve1, ve2]).is_ok()); assert!(be.create(&CID_ZERO, vec![ve1, ve2]).is_ok());
assert!(entry_exists!(be, e1)); assert!(entry_exists!(be, e1));
assert!(entry_exists!(be, e2)); assert!(entry_exists!(be, e2));
@ -1810,9 +1969,11 @@ mod tests {
// This is now impossible due to the state machine design. // This is now impossible due to the state machine design.
// However, with some unsafe .... // However, with some unsafe ....
let ue1 = unsafe { e1.clone().into_sealed_committed() }; let ue1 = unsafe { e1.clone().into_sealed_committed() };
assert!(be.modify(&vec![Arc::new(ue1.clone())], &vec![ue1]).is_err()); assert!(be
.modify(&CID_ZERO, &vec![Arc::new(ue1.clone())], &vec![ue1])
.is_err());
// Modify none // Modify none
assert!(be.modify(&vec![], &vec![]).is_err()); assert!(be.modify(&CID_ZERO, &vec![], &vec![]).is_err());
// Make some changes to r1, r2. // Make some changes to r1, r2.
let pre1 = unsafe { Arc::new(r1.clone().into_sealed_committed()) }; let pre1 = unsafe { Arc::new(r1.clone().into_sealed_committed()) };
@ -1826,7 +1987,9 @@ mod tests {
let vr2 = unsafe { r2.into_sealed_committed() }; let vr2 = unsafe { r2.into_sealed_committed() };
// Modify single // Modify single
assert!(be.modify(&vec![pre1.clone()], &vec![vr1.clone()]).is_ok()); assert!(be
.modify(&CID_ZERO, &vec![pre1.clone()], &vec![vr1.clone()])
.is_ok());
// Assert no other changes // Assert no other changes
assert!(entry_attr_pres!(be, vr1, "desc")); assert!(entry_attr_pres!(be, vr1, "desc"));
assert!(!entry_attr_pres!(be, vr2, "desc")); assert!(!entry_attr_pres!(be, vr2, "desc"));
@ -1834,6 +1997,7 @@ mod tests {
// Modify both // Modify both
assert!(be assert!(be
.modify( .modify(
&CID_ZERO,
&vec![Arc::new(vr1.clone()), pre2.clone()], &vec![Arc::new(vr1.clone()), pre2.clone()],
&vec![vr1.clone(), vr2.clone()] &vec![vr1.clone(), vr2.clone()]
) )
@ -1867,7 +2031,7 @@ mod tests {
let ve2 = unsafe { e2.clone().into_sealed_new() }; let ve2 = unsafe { e2.clone().into_sealed_new() };
let ve3 = unsafe { e3.clone().into_sealed_new() }; let ve3 = unsafe { e3.clone().into_sealed_new() };
assert!(be.create(vec![ve1, ve2, ve3]).is_ok()); assert!(be.create(&CID_ZERO, vec![ve1, ve2, ve3]).is_ok());
assert!(entry_exists!(be, e1)); assert!(entry_exists!(be, e1));
assert!(entry_exists!(be, e2)); assert!(entry_exists!(be, e2));
assert!(entry_exists!(be, e3)); assert!(entry_exists!(be, e3));
@ -1882,36 +2046,60 @@ mod tests {
let r2 = results.remove(0); let r2 = results.remove(0);
let r3 = results.remove(0); let r3 = results.remove(0);
// Delete one // Deletes nothing, all entries are live.
assert!(be.delete(&vec![r1.clone()]).is_ok()); assert!(matches!(be.reap_tombstones(&CID_ZERO), Ok(0)));
assert!(!entry_exists!(be, r1.as_ref()));
// delete none (no match filter) // Put them into the tombstone state, and write that down.
assert!(be.delete(&vec![]).is_err()); // This sets up the RUV with the changes.
let r1_ts = unsafe { r1.to_tombstone(CID_ONE.clone()).into_sealed_committed() };
// Delete with no id assert!(be
// WARNING: Normally, this isn't possible, but we are pursposefully breaking .modify(&CID_ONE, &vec![r1.clone(),], &vec![r1_ts.clone()])
// the state machine rules here!!!! .is_ok());
let mut e4: Entry<EntryInit, EntryNew> = Entry::new();
e4.add_ava("userid", Value::from("amy"));
e4.add_ava("uuid", Value::from("21d816b5-1f6a-4696-b7c1-6ed06d22ed81"));
let ve4 = unsafe { Arc::new(e4.clone().into_sealed_committed()) }; let r2_ts = unsafe { r2.to_tombstone(CID_TWO.clone()).into_sealed_committed() };
let r3_ts = unsafe { r3.to_tombstone(CID_TWO.clone()).into_sealed_committed() };
assert!(be.delete(&vec![ve4]).is_err()); assert!(be
.modify(
&CID_TWO,
&vec![r2.clone(), r3.clone()],
&vec![r2_ts.clone(), r3_ts.clone()]
)
.is_ok());
assert!(entry_exists!(be, r2.as_ref())); // The entry are now tombstones, but is still in the ruv. This is because we
assert!(entry_exists!(be, r3.as_ref())); // targeted CID_ZERO, not ONE.
assert!(matches!(be.reap_tombstones(&CID_ZERO), Ok(0)));
// delete batch assert!(entry_exists!(be, r1_ts));
assert!(be.delete(&vec![r2.clone(), r3.clone()]).is_ok()); assert!(entry_exists!(be, r2_ts));
assert!(entry_exists!(be, r3_ts));
assert!(!entry_exists!(be, r2.as_ref())); assert!(matches!(be.reap_tombstones(&CID_ONE), Ok(0)));
assert!(!entry_exists!(be, r3.as_ref()));
// delete none (no entries left) assert!(entry_exists!(be, r1_ts));
// see fn delete for why this is ok, not err assert!(entry_exists!(be, r2_ts));
assert!(be.delete(&vec![r2.clone(), r3.clone()]).is_ok()); assert!(entry_exists!(be, r3_ts));
assert!(matches!(be.reap_tombstones(&CID_TWO), Ok(1)));
assert!(!entry_exists!(be, r1_ts));
assert!(entry_exists!(be, r2_ts));
assert!(entry_exists!(be, r3_ts));
assert!(matches!(be.reap_tombstones(&CID_THREE), Ok(2)));
assert!(!entry_exists!(be, r1_ts));
assert!(!entry_exists!(be, r2_ts));
assert!(!entry_exists!(be, r3_ts));
// Nothing left
assert!(matches!(be.reap_tombstones(&CID_THREE), Ok(0)));
assert!(!entry_exists!(be, r1_ts));
assert!(!entry_exists!(be, r2_ts));
assert!(!entry_exists!(be, r3_ts));
}); });
} }
@ -1945,7 +2133,7 @@ mod tests {
let ve2 = unsafe { e2.clone().into_sealed_new() }; let ve2 = unsafe { e2.clone().into_sealed_new() };
let ve3 = unsafe { e3.clone().into_sealed_new() }; let ve3 = unsafe { e3.clone().into_sealed_new() };
assert!(be.create(vec![ve1, ve2, ve3]).is_ok()); assert!(be.create(&CID_ZERO, vec![ve1, ve2, ve3]).is_ok());
assert!(entry_exists!(be, e1)); assert!(entry_exists!(be, e1));
assert!(entry_exists!(be, e2)); assert!(entry_exists!(be, e2));
assert!(entry_exists!(be, e3)); assert!(entry_exists!(be, e3));
@ -2000,7 +2188,7 @@ mod tests {
let ve2 = unsafe { e2.clone().into_sealed_new() }; let ve2 = unsafe { e2.clone().into_sealed_new() };
let ve3 = unsafe { e3.clone().into_sealed_new() }; let ve3 = unsafe { e3.clone().into_sealed_new() };
assert!(be.create(vec![ve1, ve2, ve3]).is_ok()); assert!(be.create(&CID_ZERO, vec![ve1, ve2, ve3]).is_ok());
assert!(entry_exists!(be, e1)); assert!(entry_exists!(be, e1));
assert!(entry_exists!(be, e2)); assert!(entry_exists!(be, e2));
assert!(entry_exists!(be, e3)); assert!(entry_exists!(be, e3));
@ -2089,7 +2277,7 @@ mod tests {
e2.add_ava("uuid", Value::from("bd651620-00dd-426b-aaa0-4494f7b7906f")); e2.add_ava("uuid", Value::from("bd651620-00dd-426b-aaa0-4494f7b7906f"));
let e2 = unsafe { e2.into_sealed_new() }; let e2 = unsafe { e2.into_sealed_new() };
be.create(vec![e1.clone(), e2.clone()]).unwrap(); be.create(&CID_ZERO, vec![e1.clone(), e2.clone()]).unwrap();
// purge indexes // purge indexes
be.purge_idxs().unwrap(); be.purge_idxs().unwrap();
@ -2181,8 +2369,9 @@ mod tests {
e1.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); e1.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
let e1 = unsafe { e1.into_sealed_new() }; let e1 = unsafe { e1.into_sealed_new() };
let rset = be.create(vec![e1.clone()]).unwrap(); let rset = be.create(&CID_ZERO, vec![e1.clone()]).unwrap();
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect(); let mut rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
let e1 = rset.pop().unwrap();
idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1])); idl_state!(be, "name", IndexType::Equality, "william", Some(vec![1]));
@ -2203,8 +2392,12 @@ mod tests {
assert!(be.uuid2spn(william_uuid) == Ok(Some(Value::from("william")))); assert!(be.uuid2spn(william_uuid) == Ok(Some(Value::from("william"))));
assert!(be.uuid2rdn(william_uuid) == Ok(Some("name=william".to_string()))); assert!(be.uuid2rdn(william_uuid) == Ok(Some("name=william".to_string())));
// == Now we delete, and assert we removed the items. // == Now we reap_tombstones, and assert we removed the items.
be.delete(&rset).unwrap(); let e1_ts = unsafe { e1.to_tombstone(CID_ONE.clone()).into_sealed_committed() };
assert!(be
.modify(&CID_ONE, &vec![e1.clone()], &vec![e1_ts.clone()])
.is_ok());
be.reap_tombstones(&CID_TWO).unwrap();
idl_state!(be, "name", IndexType::Equality, "william", Some(Vec::new())); idl_state!(be, "name", IndexType::Equality, "william", Some(Vec::new()));
@ -2249,12 +2442,25 @@ mod tests {
e3.add_ava("uuid", Value::from("7b23c99d-c06b-4a9a-a958-3afa56383e1d")); e3.add_ava("uuid", Value::from("7b23c99d-c06b-4a9a-a958-3afa56383e1d"));
let e3 = unsafe { e3.into_sealed_new() }; let e3 = unsafe { e3.into_sealed_new() };
let mut rset = be.create(vec![e1.clone(), e2.clone(), e3.clone()]).unwrap(); let mut rset = be
.create(&CID_ZERO, vec![e1.clone(), e2.clone(), e3.clone()])
.unwrap();
rset.remove(1); rset.remove(1);
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect(); let mut rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
let e1 = rset.pop().unwrap();
let e3 = rset.pop().unwrap();
// Now remove e1, e3. // Now remove e1, e3.
be.delete(&rset).unwrap(); let e1_ts = unsafe { e1.to_tombstone(CID_ONE.clone()).into_sealed_committed() };
let e3_ts = unsafe { e3.to_tombstone(CID_ONE.clone()).into_sealed_committed() };
assert!(be
.modify(
&CID_ONE,
&vec![e1.clone(), e3.clone()],
&vec![e1_ts.clone(), e3_ts.clone()]
)
.is_ok());
be.reap_tombstones(&CID_TWO).unwrap();
idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![2])); idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![2]));
@ -2303,7 +2509,7 @@ mod tests {
e1.add_ava("ta", Value::from("test")); e1.add_ava("ta", Value::from("test"));
let e1 = unsafe { e1.into_sealed_new() }; let e1 = unsafe { e1.into_sealed_new() };
let rset = be.create(vec![e1.clone()]).unwrap(); let rset = be.create(&CID_ZERO, vec![e1.clone()]).unwrap();
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect(); let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
// Now, alter the new entry. // Now, alter the new entry.
let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() }; let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() };
@ -2317,7 +2523,7 @@ mod tests {
let ce1 = unsafe { ce1.into_sealed_committed() }; let ce1 = unsafe { ce1.into_sealed_committed() };
be.modify(&rset, &vec![ce1]).unwrap(); be.modify(&CID_ZERO, &rset, &vec![ce1]).unwrap();
// Now check the idls // Now check the idls
idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![1])); idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![1]));
@ -2349,7 +2555,7 @@ mod tests {
e1.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); e1.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
let e1 = unsafe { e1.into_sealed_new() }; let e1 = unsafe { e1.into_sealed_new() };
let rset = be.create(vec![e1.clone()]).unwrap(); let rset = be.create(&CID_ZERO, vec![e1.clone()]).unwrap();
let rset: Vec<_> = rset.into_iter().map(Arc::new).collect(); let rset: Vec<_> = rset.into_iter().map(Arc::new).collect();
// Now, alter the new entry. // Now, alter the new entry.
let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() }; let mut ce1 = unsafe { rset[0].as_ref().clone().into_invalid() };
@ -2359,7 +2565,7 @@ mod tests {
ce1.add_ava("uuid", Value::from("04091a7a-6ce4-42d2-abf5-c2ce244ac9e8")); ce1.add_ava("uuid", Value::from("04091a7a-6ce4-42d2-abf5-c2ce244ac9e8"));
let ce1 = unsafe { ce1.into_sealed_committed() }; let ce1 = unsafe { ce1.into_sealed_committed() };
be.modify(&rset, &vec![ce1]).unwrap(); be.modify(&CID_ZERO, &rset, &vec![ce1]).unwrap();
idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![1])); idl_state!(be, "name", IndexType::Equality, "claire", Some(vec![1]));
@ -2412,7 +2618,7 @@ mod tests {
e2.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d2")); e2.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d2"));
let e2 = unsafe { e2.into_sealed_new() }; let e2 = unsafe { e2.into_sealed_new() };
let _rset = be.create(vec![e1.clone(), e2.clone()]).unwrap(); let _rset = be.create(&CID_ZERO, vec![e1.clone(), e2.clone()]).unwrap();
// Test fully unindexed // Test fully unindexed
let f_un = let f_un =
unsafe { filter_resolved!(f_eq("no-index", PartialValue::new_utf8s("william"))) }; unsafe { filter_resolved!(f_eq("no-index", PartialValue::new_utf8s("william"))) };
@ -2729,7 +2935,9 @@ mod tests {
e3.add_ava("tb", Value::from("2")); e3.add_ava("tb", Value::from("2"));
let e3 = unsafe { e3.into_sealed_new() }; let e3 = unsafe { e3.into_sealed_new() };
let _rset = be.create(vec![e1.clone(), e2.clone(), e3.clone()]).unwrap(); let _rset = be
.create(&CID_ZERO, vec![e1.clone(), e2.clone(), e3.clone()])
.unwrap();
// If the slopes haven't been generated yet, there are some hardcoded values // If the slopes haven't been generated yet, there are some hardcoded values
// that we can use instead. They aren't generated until a first re-index. // that we can use instead. They aren't generated until a first re-index.
@ -2813,7 +3021,7 @@ mod tests {
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
e.add_ava("nonexist", Value::from("x")); e.add_ava("nonexist", Value::from("x"));
let e = unsafe { e.into_sealed_new() }; let e = unsafe { e.into_sealed_new() };
let single_result = be.create(vec![e.clone()]); let single_result = be.create(&CID_ZERO, vec![e.clone()]);
assert!(single_result.is_ok()); assert!(single_result.is_ok());
let filt = unsafe { let filt = unsafe {
@ -2849,7 +3057,7 @@ mod tests {
e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1")); e.add_ava("uuid", Value::from("db237e8a-0079-4b8c-8a56-593b22aa44d1"));
e.add_ava("nonexist", Value::from("x")); e.add_ava("nonexist", Value::from("x"));
let e = unsafe { e.into_sealed_new() }; let e = unsafe { e.into_sealed_new() };
let single_result = be.create(vec![e.clone()]); let single_result = be.create(&CID_ZERO, vec![e.clone()]);
assert!(single_result.is_ok()); assert!(single_result.is_ok());
let filt = unsafe { let filt = unsafe {
@ -2907,7 +3115,7 @@ mod tests {
e.add_ava("nonexist", Value::from("x")); e.add_ava("nonexist", Value::from("x"));
e.add_ava("nonexist", Value::from("y")); e.add_ava("nonexist", Value::from("y"));
let e = unsafe { e.into_sealed_new() }; let e = unsafe { e.into_sealed_new() };
let single_result = be.create(vec![e.clone()]); let single_result = be.create(&CID_ZERO, vec![e.clone()]);
assert!(single_result.is_ok()); assert!(single_result.is_ok());
// Reindex so we have things in place for our query // Reindex so we have things in place for our query

View file

@ -46,7 +46,7 @@ pub enum Policy {
// Why PBKDF2? Rust's bcrypt has a number of hardcodings like max pw len of 72 // Why PBKDF2? Rust's bcrypt has a number of hardcodings like max pw len of 72
// I don't really feel like adding in so many restrictions, so I'll use // I don't really feel like adding in so many restrictions, so I'll use
// pbkdf2 in openssl because it doesn't have the same limits. // pbkdf2 in openssl because it doesn't have the same limits.
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
enum Kdf { enum Kdf {
// cost, salt, hash // cost, salt, hash
PBKDF2(usize, Vec<u8>, Vec<u8>), PBKDF2(usize, Vec<u8>, Vec<u8>),
@ -54,7 +54,7 @@ enum Kdf {
SSHA512(Vec<u8>, Vec<u8>), SSHA512(Vec<u8>, Vec<u8>),
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub struct Password { pub struct Password {
material: Kdf, material: Kdf,
} }
@ -213,7 +213,7 @@ impl Password {
} }
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
pub struct BackupCodes { pub struct BackupCodes {
code_set: HashSet<String>, code_set: HashSet<String>,
} }
@ -245,7 +245,7 @@ impl BackupCodes {
} }
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
/// This is how we store credentials in the server. An account can have many credentials, and /// This is how we store credentials in the server. An account can have many credentials, and
/// a credential can have many factors. Only successful auth to a credential as a whole unit /// a credential can have many factors. Only successful auth to a credential as a whole unit
/// will succeed. For example: /// will succeed. For example:
@ -268,7 +268,7 @@ pub struct Credential {
// locked: bool // locked: bool
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug, PartialEq)]
/// The type of credential that is stored. Each of these represents a full set of 'what is required' /// The type of credential that is stored. Each of these represents a full set of 'what is required'
/// to complete an authentication session. The reason to have these typed like this is so we can /// to complete an authentication session. The reason to have these typed like this is so we can
/// apply policy later to what classes or levels of credentials can be used. We use these types /// apply policy later to what classes or levels of credentials can be used. We use these types

View file

@ -21,7 +21,7 @@ pub enum TotpError {
TimeError, TimeError,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone, PartialEq)]
pub enum TotpAlgo { pub enum TotpAlgo {
Sha1, Sha1,
Sha256, Sha256,
@ -58,7 +58,7 @@ impl TotpAlgo {
} }
/// <https://tools.ietf.org/html/rfc6238> which relies on <https://tools.ietf.org/html/rfc4226> /// <https://tools.ietf.org/html/rfc6238> which relies on <https://tools.ietf.org/html/rfc4226>
#[derive(Debug, Clone)] #[derive(Debug, Clone, PartialEq)]
pub struct Totp { pub struct Totp {
secret: Vec<u8>, secret: Vec<u8>,
pub(crate) step: u64, pub(crate) step: u64,

View file

@ -30,13 +30,14 @@ use crate::ldap::ldap_vattr_map;
use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid}; use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid};
use crate::prelude::*; use crate::prelude::*;
use crate::repl::cid::Cid; use crate::repl::cid::Cid;
use crate::repl::entry::EntryChangelog;
use crate::schema::{SchemaAttribute, SchemaClass, SchemaTransaction}; use crate::schema::{SchemaAttribute, SchemaClass, SchemaTransaction};
use crate::value::{IndexType, SyntaxType}; use crate::value::{IndexType, SyntaxType};
use crate::value::{IntentTokenState, PartialValue, Value}; use crate::value::{IntentTokenState, PartialValue, Value};
use crate::valueset::{self, ValueSet}; use crate::valueset::{self, ValueSet};
use kanidm_proto::v1::Entry as ProtoEntry; use kanidm_proto::v1::Entry as ProtoEntry;
use kanidm_proto::v1::Filter as ProtoFilter; use kanidm_proto::v1::Filter as ProtoFilter;
use kanidm_proto::v1::{OperationError, SchemaError}; use kanidm_proto::v1::{ConsistencyError, OperationError, SchemaError};
use tracing::trace; use tracing::trace;
use crate::be::dbentry::{DbEntry, DbEntryV2, DbEntryVers}; use crate::be::dbentry::{DbEntry, DbEntryV2, DbEntryVers};
@ -110,8 +111,10 @@ pub struct EntryNew; // new
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct EntryCommitted { pub struct EntryCommitted {
id: u64, id: u64,
} // It's been in the DB, so it has an id }
// pub struct EntryPurged;
// It's been in the DB, so it has an id
// pub struct EntryPurged;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct EntryInit; pub struct EntryInit;
@ -125,6 +128,7 @@ pub struct EntryInit;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct EntryInvalid { pub struct EntryInvalid {
cid: Cid, cid: Cid,
eclog: EntryChangelog,
} }
/* | /* |
@ -137,6 +141,7 @@ pub struct EntryValid {
// Asserted with schema, so we know it has a UUID now ... // Asserted with schema, so we know it has a UUID now ...
uuid: Uuid, uuid: Uuid,
cid: Cid, cid: Cid,
eclog: EntryChangelog,
} }
/* | /* |
@ -148,14 +153,22 @@ pub struct EntryValid {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct EntrySealed { pub struct EntrySealed {
uuid: Uuid, uuid: Uuid,
eclog: EntryChangelog,
} }
/* |
* | The entry has access controls applied to reduce what is yielded to a client
* V
*/
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct EntryReduced { pub struct EntryReduced {
uuid: Uuid, uuid: Uuid,
} }
fn compare_attrs(left: &Map<AttrString, ValueSet>, right: &Map<AttrString, ValueSet>) -> bool { pub type Eattrs = Map<AttrString, ValueSet>;
pub(crate) fn compare_attrs(left: &Eattrs, right: &Eattrs) -> bool {
// We can't shortcut based on len because cid mod may not be present. // We can't shortcut based on len because cid mod may not be present.
// Build the set of all keys between both. // Build the set of all keys between both.
let allkeys: Set<&str> = left let allkeys: Set<&str> = left
@ -167,10 +180,14 @@ fn compare_attrs(left: &Map<AttrString, ValueSet>, right: &Map<AttrString, Value
allkeys.into_iter().all(|k| { allkeys.into_iter().all(|k| {
// Both must be Some, and both must have the same interiors. // Both must be Some, and both must have the same interiors.
match (left.get(k), right.get(k)) { let r = match (left.get(k), right.get(k)) {
(Some(l), Some(r)) => l.eq(r), (Some(l), Some(r)) => l.eq(r),
_ => false, _ => false,
};
if !r {
trace!(?k, "compare_attrs_allkeys");
} }
r
}) })
} }
@ -205,7 +222,7 @@ pub struct Entry<VALID, STATE> {
valid: VALID, valid: VALID,
state: STATE, state: STATE,
// We may need to change this to Set to allow borrow of Value -> PartialValue for lookups. // We may need to change this to Set to allow borrow of Value -> PartialValue for lookups.
attrs: Map<AttrString, ValueSet>, attrs: Eattrs,
} }
impl<VALID, STATE> std::fmt::Debug for Entry<VALID, STATE> impl<VALID, STATE> std::fmt::Debug for Entry<VALID, STATE>
@ -271,7 +288,7 @@ impl Entry<EntryInit, EntryNew> {
// Somehow we need to take the tree of e attrs, and convert // Somehow we need to take the tree of e attrs, and convert
// all ref types to our types ... // all ref types to our types ...
let map2: Result<Map<AttrString, ValueSet>, OperationError> = e let map2: Result<Eattrs, OperationError> = e
.attrs .attrs
.iter() .iter()
.filter(|(_, v)| !v.is_empty()) .filter(|(_, v)| !v.is_empty())
@ -332,7 +349,7 @@ impl Entry<EntryInit, EntryNew> {
// str -> proto entry // str -> proto entry
let pe: ProtoEntry = serde_json::from_str(es).expect("Invalid Proto Entry"); let pe: ProtoEntry = serde_json::from_str(es).expect("Invalid Proto Entry");
// use a const map to convert str -> ava // use a const map to convert str -> ava
let x: Map<AttrString, ValueSet> = pe.attrs.into_iter() let x: Eattrs = pe.attrs.into_iter()
.filter_map(|(k, vs)| { .filter_map(|(k, vs)| {
if vs.is_empty() { if vs.is_empty() {
None None
@ -482,12 +499,23 @@ impl Entry<EntryInit, EntryNew> {
/// Assign the Change Identifier to this Entry, allowing it to be modified and then /// Assign the Change Identifier to this Entry, allowing it to be modified and then
/// written to the `Backend` /// written to the `Backend`
pub fn assign_cid(mut self, cid: Cid) -> Entry<EntryInvalid, EntryNew> { pub fn assign_cid(
mut self,
cid: Cid,
schema: &dyn SchemaTransaction,
) -> Entry<EntryInvalid, EntryNew> {
/* setup our last changed time */ /* setup our last changed time */
self.set_last_changed(cid.clone()); self.set_last_changed(cid.clone());
/*
* Create the change log. This must be the last thing BEFORE we return!
* This is because we need to capture the set_last_changed attribute in
* the create transition.
*/
let eclog = EntryChangelog::new(cid.clone(), self.attrs.clone(), schema);
Entry { Entry {
valid: EntryInvalid { cid }, valid: EntryInvalid { cid, eclog },
state: EntryNew, state: EntryNew,
attrs: self.attrs, attrs: self.attrs,
} }
@ -500,21 +528,28 @@ impl Entry<EntryInit, EntryNew> {
#[cfg(test)] #[cfg(test)]
pub unsafe fn into_invalid_new(mut self) -> Entry<EntryInvalid, EntryNew> { pub unsafe fn into_invalid_new(mut self) -> Entry<EntryInvalid, EntryNew> {
self.set_last_changed(Cid::new_zero()); let cid = Cid::new_zero();
self.set_last_changed(cid.clone());
let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone());
Entry { Entry {
valid: EntryInvalid { valid: EntryInvalid { cid, eclog },
cid: Cid::new_zero(),
},
state: EntryNew, state: EntryNew,
attrs: self.attrs, attrs: self.attrs,
} }
} }
#[cfg(test)] #[cfg(test)]
pub unsafe fn into_valid_new(self) -> Entry<EntryValid, EntryNew> { pub unsafe fn into_valid_new(mut self) -> Entry<EntryValid, EntryNew> {
let cid = Cid::new_zero();
self.set_last_changed(cid.clone());
let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone());
Entry { Entry {
valid: EntryValid { valid: EntryValid {
cid: Cid::new_zero(), cid,
eclog,
uuid: self.get_uuid().expect("Invalid uuid").clone(), uuid: self.get_uuid().expect("Invalid uuid").clone(),
}, },
state: EntryNew, state: EntryNew,
@ -523,36 +558,47 @@ impl Entry<EntryInit, EntryNew> {
} }
#[cfg(test)] #[cfg(test)]
pub unsafe fn into_sealed_committed(self) -> Entry<EntrySealed, EntryCommitted> { pub unsafe fn into_sealed_committed(mut self) -> Entry<EntrySealed, EntryCommitted> {
let cid = Cid::new_zero();
self.set_last_changed(cid.clone());
let eclog = EntryChangelog::new_without_schema(cid, self.attrs.clone());
let uuid = self let uuid = self
.get_uuid() .get_uuid()
.and_then(|u| Some(u.clone())) .and_then(|u| Some(u.clone()))
.unwrap_or_else(|| Uuid::new_v4()); .unwrap_or_else(|| Uuid::new_v4());
Entry { Entry {
valid: EntrySealed { uuid }, valid: EntrySealed { uuid, eclog },
state: EntryCommitted { id: 0 }, state: EntryCommitted { id: 0 },
attrs: self.attrs, attrs: self.attrs,
} }
} }
#[cfg(test)] #[cfg(test)]
pub unsafe fn into_sealed_new(self) -> Entry<EntrySealed, EntryNew> { pub unsafe fn into_sealed_new(mut self) -> Entry<EntrySealed, EntryNew> {
let cid = Cid::new_zero();
self.set_last_changed(cid.clone());
let eclog = EntryChangelog::new_without_schema(cid, self.attrs.clone());
Entry { Entry {
valid: EntrySealed { valid: EntrySealed {
uuid: self.get_uuid().expect("Invalid uuid").clone(), uuid: self.get_uuid().expect("Invalid uuid").clone(),
eclog,
}, },
state: EntryNew, state: EntryNew,
attrs: self.attrs, attrs: self.attrs,
} }
} }
// ⚠️ replication safety ⚠️
// These functions are SAFE because they occur in the EntryInit
// state, which precedes the generation of the initial Create
// event for the attribute.
/// Add an attribute-value-assertion to this Entry. /// Add an attribute-value-assertion to this Entry.
pub fn add_ava(&mut self, attr: &str, value: Value) { pub fn add_ava(&mut self, attr: &str, value: Value) {
self.add_ava_int(attr, value) self.add_ava_int(attr, value)
} }
/// Replace the existing content of an attribute set of this Entry, with a new set of Values. /// Replace the existing content of an attribute set of this Entry, with a new set of Values.
// pub fn set_ava(&mut self, attr: &str, values: Set<Value>) {
pub fn set_ava<T>(&mut self, attr: &str, iter: T) pub fn set_ava<T>(&mut self, attr: &str, iter: T)
where where
T: IntoIterator<Item = Value>, T: IntoIterator<Item = Value>,
@ -594,6 +640,7 @@ impl<STATE> Entry<EntryInvalid, STATE> {
valid: EntryValid { valid: EntryValid {
uuid, uuid,
cid: self.valid.cid, cid: self.valid.cid,
eclog: self.valid.eclog,
}, },
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
@ -794,6 +841,7 @@ impl Entry<EntryInvalid, EntryCommitted> {
valid: EntryValid { valid: EntryValid {
cid: self.valid.cid, cid: self.valid.cid,
uuid, uuid,
eclog: self.valid.eclog,
}, },
state: EntryNew, state: EntryNew,
attrs: self.attrs, attrs: self.attrs,
@ -801,11 +849,30 @@ impl Entry<EntryInvalid, EntryCommitted> {
} }
/// Convert this entry into a recycled entry, that is "in the recycle bin". /// Convert this entry into a recycled entry, that is "in the recycle bin".
pub fn into_recycled(mut self) -> Self { pub fn to_recycled(mut self) -> Self {
// This will put the modify ahead of the recycle transition.
self.add_ava("class", Value::new_class("recycled")); self.add_ava("class", Value::new_class("recycled"));
// Last step before we proceed.
self.valid.eclog.recycled(&self.valid.cid);
Entry { Entry {
valid: self.valid.clone(), valid: self.valid,
state: self.state,
attrs: self.attrs,
}
}
/// Convert this entry into a recycled entry, that is "in the recycle bin".
pub fn to_revived(mut self) -> Self {
// This will put the modify ahead of the revive transition.
self.remove_ava("class", &PVCLASS_RECYCLED);
// Last step before we proceed.
self.valid.eclog.revive(&self.valid.cid);
Entry {
valid: self.valid,
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
@ -821,6 +888,7 @@ impl Entry<EntryInvalid, EntryNew> {
valid: EntryValid { valid: EntryValid {
cid: self.valid.cid, cid: self.valid.cid,
uuid, uuid,
eclog: self.valid.eclog,
}, },
state: EntryNew, state: EntryNew,
attrs: self.attrs, attrs: self.attrs,
@ -834,7 +902,10 @@ impl Entry<EntryInvalid, EntryNew> {
.and_then(|u| Some(u.clone())) .and_then(|u| Some(u.clone()))
.unwrap_or_else(|| Uuid::new_v4()); .unwrap_or_else(|| Uuid::new_v4());
Entry { Entry {
valid: EntrySealed { uuid }, valid: EntrySealed {
uuid,
eclog: self.valid.eclog,
},
state: EntryCommitted { id: 0 }, state: EntryCommitted { id: 0 },
attrs: self.attrs, attrs: self.attrs,
} }
@ -868,6 +939,7 @@ impl Entry<EntryInvalid, EntryNew> {
valid: EntryValid { valid: EntryValid {
cid: self.valid.cid, cid: self.valid.cid,
uuid, uuid,
eclog: self.valid.eclog,
}, },
state: EntryCommitted { id: 0 }, state: EntryCommitted { id: 0 },
attrs: self.attrs, attrs: self.attrs,
@ -883,7 +955,10 @@ impl Entry<EntryInvalid, EntryCommitted> {
.and_then(|u| Some(u.clone())) .and_then(|u| Some(u.clone()))
.unwrap_or_else(|| Uuid::new_v4()); .unwrap_or_else(|| Uuid::new_v4());
Entry { Entry {
valid: EntrySealed { uuid }, valid: EntrySealed {
uuid,
eclog: self.valid.eclog,
},
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
@ -942,6 +1017,10 @@ impl Entry<EntrySealed, EntryCommitted> {
self self
} }
pub fn get_changelog_mut(&mut self) -> &mut EntryChangelog {
&mut self.valid.eclog
}
/// Insert a claim to this entry. This claim can NOT be persisted to disk, this is only /// Insert a claim to this entry. This claim can NOT be persisted to disk, this is only
/// used during a single Event session. /// used during a single Event session.
pub fn insert_claim(&mut self, value: &str) { pub fn insert_claim(&mut self, value: &str) {
@ -1018,25 +1097,6 @@ impl Entry<EntrySealed, EntryCommitted> {
.unwrap_or_else(|| format!("uuid={}", self.get_uuid().as_hyphenated())) .unwrap_or_else(|| format!("uuid={}", self.get_uuid().as_hyphenated()))
} }
#[inline]
/// Determine if this entry is recycled or a tombstone, and map that to "None". This allows
/// filter_map to effectively remove entries that should not be considered as "alive".
pub(crate) fn mask_recycled_ts(&self) -> Option<&Self> {
// Only when cls has ts/rc then None, else lways Some(self).
match self.attrs.get("class") {
Some(cls) => {
if cls.contains(&PVCLASS_TOMBSTONE as &PartialValue)
|| cls.contains(&PVCLASS_RECYCLED as &PartialValue)
{
None
} else {
Some(self)
}
}
None => Some(self),
}
}
/// Generate the required values for a name2uuid index. IE this is /// Generate the required values for a name2uuid index. IE this is
/// ALL possible names this entry COULD be known uniquely by! /// ALL possible names this entry COULD be known uniquely by!
pub(crate) fn idx_name2uuid_diff( pub(crate) fn idx_name2uuid_diff(
@ -1352,7 +1412,7 @@ impl Entry<EntrySealed, EntryCommitted> {
pub fn from_dbentry(db_e: DbEntry, id: u64) -> Option<Self> { pub fn from_dbentry(db_e: DbEntry, id: u64) -> Option<Self> {
// Convert attrs from db format to value // Convert attrs from db format to value
let r_attrs: Result<Map<AttrString, ValueSet>, ()> = match db_e.ent { let r_attrs: Result<Eattrs, ()> = match db_e.ent {
DbEntryVers::V1(_) => { DbEntryVers::V1(_) => {
admin_error!("Db V1 entry should have been migrated!"); admin_error!("Db V1 entry should have been migrated!");
Err(()) Err(())
@ -1376,8 +1436,25 @@ impl Entry<EntrySealed, EntryCommitted> {
let uuid = attrs.get("uuid").and_then(|vs| vs.to_uuid_single())?; let uuid = attrs.get("uuid").and_then(|vs| vs.to_uuid_single())?;
/*
* ==== The Hack Zoen ====
*
* For now to make replication work, we are synthesising an in-memory change
* log, pinned to "the last time the entry was modified" as it's "create time".
*
* This means that a simple restart of the server flushes and resets the cl
* content. In the future though, this will actually be "part of" the entry
* and loaded from disk proper.
*/
let cid = attrs
.get("last_modified_cid")
.and_then(|vs| vs.as_cid_set())
.and_then(|set| set.iter().cloned().next())?;
let eclog = EntryChangelog::new_without_schema(cid, attrs.clone());
Some(Entry { Some(Entry {
valid: EntrySealed { uuid }, valid: EntrySealed { uuid, eclog },
state: EntryCommitted { id }, state: EntryCommitted { id },
attrs, attrs,
}) })
@ -1432,53 +1509,72 @@ impl Entry<EntrySealed, EntryCommitted> {
/// Convert this recycled entry, into a tombstone ready for reaping. /// Convert this recycled entry, into a tombstone ready for reaping.
pub fn to_tombstone(&self, cid: Cid) -> Entry<EntryInvalid, EntryCommitted> { pub fn to_tombstone(&self, cid: Cid) -> Entry<EntryInvalid, EntryCommitted> {
let mut eclog = self.valid.eclog.clone();
// Duplicate this to a tombstone entry // Duplicate this to a tombstone entry
let class_ava = vs_iutf8!["object", "tombstone"]; let class_ava = vs_iutf8!["object", "tombstone"];
let last_mod_ava = vs_cid![cid.clone()]; let last_mod_ava = vs_cid![cid.clone()];
let mut attrs_new: Map<AttrString, ValueSet> = Map::new(); let mut attrs_new: Eattrs = Map::new();
attrs_new.insert(AttrString::from("uuid"), vs_uuid![self.get_uuid()]); attrs_new.insert(AttrString::from("uuid"), vs_uuid![self.get_uuid()]);
attrs_new.insert(AttrString::from("class"), class_ava); attrs_new.insert(AttrString::from("class"), class_ava);
attrs_new.insert(AttrString::from("last_modified_cid"), last_mod_ava); attrs_new.insert(AttrString::from("last_modified_cid"), last_mod_ava);
// ⚠️ No return from this point!
eclog.tombstone(&cid, attrs_new.clone());
Entry { Entry {
valid: EntryInvalid { cid }, valid: EntryInvalid { cid, eclog },
state: self.state.clone(), state: self.state.clone(),
attrs: attrs_new, attrs: attrs_new,
} }
} }
/// Given a current transaction change identifier, mark this entry as valid and committed. /// Given a current transaction change identifier, mark this entry as valid and committed.
pub fn into_valid(self, cid: Cid) -> Entry<EntryValid, EntryCommitted> { pub fn into_valid(self, cid: Cid, eclog: EntryChangelog) -> Entry<EntryValid, EntryCommitted> {
Entry { Entry {
valid: EntryValid { valid: EntryValid {
uuid: self.valid.uuid, uuid: self.valid.uuid,
cid, cid,
eclog,
}, },
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
} }
pub fn verify(
&self,
schema: &dyn SchemaTransaction,
results: &mut Vec<Result<(), ConsistencyError>>,
) {
self.valid
.eclog
.verify(schema, &self.attrs, self.state.id, results);
}
} }
impl<STATE> Entry<EntryValid, STATE> { impl<STATE> Entry<EntryValid, STATE> {
// Returns the entry in the latest DbEntry format we are aware of. pub fn invalidate(self, eclog: EntryChangelog) -> Entry<EntryInvalid, STATE> {
pub fn invalidate(self) -> Entry<EntryInvalid, STATE> {
Entry { Entry {
valid: EntryInvalid { valid: EntryInvalid {
cid: self.valid.cid, cid: self.valid.cid,
eclog,
}, },
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
} }
pub fn seal(self) -> Entry<EntrySealed, STATE> { pub fn seal(self, _schema: &dyn SchemaTransaction) -> Entry<EntrySealed, STATE> {
let EntryValid {
cid: _,
uuid,
eclog,
} = self.valid;
Entry { Entry {
valid: EntrySealed { valid: EntrySealed { uuid, eclog },
uuid: self.valid.uuid,
},
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
@ -1490,13 +1586,15 @@ impl<STATE> Entry<EntryValid, STATE> {
} }
impl<STATE> Entry<EntrySealed, STATE> { impl<STATE> Entry<EntrySealed, STATE> {
// Returns the entry in the latest DbEntry format we are aware of.
pub fn invalidate(mut self, cid: Cid) -> Entry<EntryInvalid, STATE> { pub fn invalidate(mut self, cid: Cid) -> Entry<EntryInvalid, STATE> {
/* Setup our last changed time. */ /* Setup our last changed time. */
self.set_last_changed(cid.clone()); self.set_last_changed(cid.clone());
Entry { Entry {
valid: EntryInvalid { cid }, valid: EntryInvalid {
cid,
eclog: self.valid.eclog,
},
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
@ -1506,13 +1604,19 @@ impl<STATE> Entry<EntrySealed, STATE> {
self.valid.uuid self.valid.uuid
} }
pub fn get_changelog(&self) -> &EntryChangelog {
&self.valid.eclog
}
#[cfg(test)] #[cfg(test)]
pub unsafe fn into_invalid(mut self) -> Entry<EntryInvalid, STATE> { pub unsafe fn into_invalid(mut self) -> Entry<EntryInvalid, STATE> {
self.set_last_changed(Cid::new_zero()); let cid = Cid::new_zero();
self.set_last_changed(cid.clone());
let eclog = EntryChangelog::new_without_schema(cid.clone(), self.attrs.clone());
Entry { Entry {
valid: EntryInvalid { valid: EntryInvalid { cid, eclog },
cid: Cid::new_zero(),
},
state: self.state, state: self.state,
attrs: self.attrs, attrs: self.attrs,
} }
@ -1625,7 +1729,7 @@ impl<VALID, STATE> Entry<VALID, STATE> {
} }
/// Overwrite the current set of values for an attribute, with this new set. /// Overwrite the current set of values for an attribute, with this new set.
pub fn set_ava_int<T>(&mut self, attr: &str, iter: T) fn set_ava_int<T>(&mut self, attr: &str, iter: T)
where where
T: IntoIterator<Item = Value>, T: IntoIterator<Item = Value>,
{ {
@ -2025,6 +2129,56 @@ impl<VALID, STATE> Entry<VALID, STATE> {
Ok(mods) Ok(mods)
} }
/// Determine if this entry is recycled or a tombstone, and map that to "None". This allows
/// filter_map to effectively remove entries that should not be considered as "alive".
pub(crate) fn mask_recycled_ts(&self) -> Option<&Self> {
// Only when cls has ts/rc then None, else lways Some(self).
match self.attrs.get("class") {
Some(cls) => {
if cls.contains(&PVCLASS_TOMBSTONE as &PartialValue)
|| cls.contains(&PVCLASS_RECYCLED as &PartialValue)
{
None
} else {
Some(self)
}
}
None => Some(self),
}
}
/// Determine if this entry is recycled, and map that to "None". This allows
/// filter_map to effectively remove entries that are recycled in some cases.
pub(crate) fn mask_recycled(&self) -> Option<&Self> {
// Only when cls has ts/rc then None, else lways Some(self).
match self.attrs.get("class") {
Some(cls) => {
if cls.contains(&PVCLASS_RECYCLED as &PartialValue) {
None
} else {
Some(self)
}
}
None => Some(self),
}
}
/// Determine if this entry is a tombstone, and map that to "None". This allows
/// filter_map to effectively remove entries that are tombstones in some cases.
pub(crate) fn mask_tombstone(&self) -> Option<&Self> {
// Only when cls has ts/rc then None, else lways Some(self).
match self.attrs.get("class") {
Some(cls) => {
if cls.contains(&PVCLASS_TOMBSTONE as &PartialValue) {
None
} else {
Some(self)
}
}
None => Some(self),
}
}
} }
impl<STATE> Entry<EntryInvalid, STATE> impl<STATE> Entry<EntryInvalid, STATE>
@ -2033,42 +2187,38 @@ where
{ {
// This should always work? It's only on validate that we'll build // This should always work? It's only on validate that we'll build
// a list of syntax violations ... // a list of syntax violations ...
// If this already exists, we silently drop the event? Is that an // If this already exists, we silently drop the event. This is because
// acceptable interface? // we need this to be *state* based where we assert presence.
//
// TODO: This should take Value not &Value, would save a lot of clones
// around the codebase.
pub fn add_ava(&mut self, attr: &str, value: Value) { pub fn add_ava(&mut self, attr: &str, value: Value) {
self.valid
.eclog
.add_ava_iter(&self.valid.cid, attr, std::iter::once(value.clone()));
self.add_ava_int(attr, value) self.add_ava_int(attr, value)
} }
/// Merge an existing value set into this attributes value set. If they are not /// Remove an attribute-value pair from this entry. If the ava doesn't exist, we
/// the same type, an error is returned. If no attribute exists, then this valueset is /// don't do anything else since we are asserting the abscence of a value.
/// cloned "as is".
pub fn merge_ava(&mut self, attr: &str, valueset: &ValueSet) -> Result<(), OperationError> {
if let Some(vs) = self.attrs.get_mut(attr) {
vs.merge(valueset)
} else {
self.attrs.insert(AttrString::from(attr), valueset.clone());
Ok(())
}
}
/// Remove an attribute-value pair from this entry.
fn remove_ava(&mut self, attr: &str, value: &PartialValue) { fn remove_ava(&mut self, attr: &str, value: &PartialValue) {
self.valid
.eclog
.remove_ava_iter(&self.valid.cid, attr, std::iter::once(value.clone()));
let rm = if let Some(vs) = self.attrs.get_mut(attr) { let rm = if let Some(vs) = self.attrs.get_mut(attr) {
vs.remove(value); vs.remove(value);
vs.is_empty() vs.is_empty()
} else { } else {
false false
}; };
//
if rm { if rm {
self.attrs.remove(attr); self.attrs.remove(attr);
}; };
} }
pub(crate) fn remove_avas(&mut self, attr: &str, values: &BTreeSet<PartialValue>) { pub(crate) fn remove_avas(&mut self, attr: &str, values: &BTreeSet<PartialValue>) {
self.valid
.eclog
.remove_ava_iter(&self.valid.cid, attr, values.iter().cloned());
let rm = if let Some(vs) = self.attrs.get_mut(attr) { let rm = if let Some(vs) = self.attrs.get_mut(attr) {
values.iter().for_each(|k| { values.iter().for_each(|k| {
vs.remove(k); vs.remove(k);
@ -2082,37 +2232,40 @@ where
}; };
} }
/// Remove all values of this attribute from the entry. /// Remove all values of this attribute from the entry. If it doesn't exist, this
pub fn purge_ava(&mut self, attr: &str) { /// asserts that no content of that attribute exist.
pub(crate) fn purge_ava(&mut self, attr: &str) {
self.valid.eclog.purge_ava(&self.valid.cid, attr);
self.attrs.remove(attr); self.attrs.remove(attr);
} }
/// Remove all values of this attribute from the entry, and return their content. /// Remove all values of this attribute from the entry, and return their content.
pub fn pop_ava(&mut self, attr: &str) -> Option<ValueSet> { pub fn pop_ava(&mut self, attr: &str) -> Option<ValueSet> {
self.valid.eclog.purge_ava(&self.valid.cid, attr);
self.attrs.remove(attr) self.attrs.remove(attr)
} }
/// Replace the content of this attribute with a new value set. /// Replace the content of this attribute with a new value set. Effectively this is
// pub fn set_ava(&mut self, attr: &str, values: Set<Value>) { /// a a "purge and set".
pub fn set_ava<T>(&mut self, attr: &str, iter: T) pub fn set_ava<T>(&mut self, attr: &str, iter: T)
where where
T: IntoIterator<Item = Value>, T: Clone + IntoIterator<Item = Value>,
{ {
self.valid.eclog.purge_ava(&self.valid.cid, attr);
self.valid
.eclog
.add_ava_iter(&self.valid.cid, attr, iter.clone());
self.set_ava_int(attr, iter) self.set_ava_int(attr, iter)
} }
pub fn get_ava_mut(&mut self, attr: &str) -> Option<&mut ValueSet> { pub fn set_ava_set(&mut self, attr: &str, vs: ValueSet) {
self.attrs.get_mut(attr) self.valid.eclog.purge_ava(&self.valid.cid, attr);
self.valid
.eclog
.add_ava_iter(&self.valid.cid, attr, vs.to_value_iter());
self.attrs.insert(AttrString::from(attr), vs);
} }
/*
pub fn avas_mut(&mut self) -> EntryAvasMut {
EntryAvasMut {
inner: self.attrs.iter_mut(),
}
}
*/
/// Apply the content of this modlist to this entry, enforcing the expressed state. /// Apply the content of this modlist to this entry, enforcing the expressed state.
pub fn apply_modlist(&mut self, modlist: &ModifyList<ModifyValid>) { pub fn apply_modlist(&mut self, modlist: &ModifyList<ModifyValid>) {
// -> Result<Entry<EntryInvalid, STATE>, OperationError> { // -> Result<Entry<EntryInvalid, STATE>, OperationError> {

View file

@ -1326,10 +1326,12 @@ impl FilterResolved {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::event::CreateEvent; use crate::event::CreateEvent;
use crate::event::DeleteEvent;
use crate::filter::{Filter, FilterInvalid, FILTER_DEPTH_MAX}; use crate::filter::{Filter, FilterInvalid, FILTER_DEPTH_MAX};
use crate::prelude::*; use crate::prelude::*;
use std::cmp::{Ordering, PartialOrd}; use std::cmp::{Ordering, PartialOrd};
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::time::Duration;
use kanidm_proto::v1::Filter as ProtoFilter; use kanidm_proto::v1::Filter as ProtoFilter;
use ldap3_proto::simple::LdapFilter; use ldap3_proto::simple::LdapFilter;
@ -1786,7 +1788,11 @@ mod tests {
#[test] #[test]
fn test_filter_resolve_value() { fn test_filter_resolve_value() {
run_test!(|server: &QueryServer| { run_test!(|server: &QueryServer| {
let server_txn = server.write(duration_from_epoch_now()); let time_p1 = duration_from_epoch_now();
let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
let server_txn = server.write(time_p1);
let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str( let e1: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{ r#"{
"attrs": { "attrs": {
@ -1809,18 +1815,45 @@ mod tests {
} }
}"#, }"#,
); );
let e_ts: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{ // We need to add these and then push through the state machine.
"attrs": { let e_ts = entry_init!(
"class": ["tombstone", "object"], ("class", Value::new_class("object")),
"uuid": ["9557f49c-97a5-4277-a9a5-097d17eb8317"] ("class", Value::new_class("person")),
} ("name", Value::new_iname("testperson3")),
}"#, (
"uuid",
Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid")
),
("description", Value::new_utf8s("testperson3")),
("displayname", Value::new_utf8s("testperson3"))
); );
let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]); let ce = CreateEvent::new_internal(vec![e1, e2, e_ts]);
let cr = server_txn.create(&ce); let cr = server_txn.create(&ce);
assert!(cr.is_ok()); assert!(cr.is_ok());
let de_sin = unsafe {
DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq(
"name",
PartialValue::new_iname("testperson3")
)])))
};
assert!(server_txn.delete(&de_sin).is_ok());
// Commit
assert!(server_txn.commit().is_ok());
// Now, establish enough time for the recycled items to be purged.
let server_txn = server.write(time_p2);
assert!(server_txn.purge_recycled().is_ok());
assert!(server_txn.commit().is_ok());
let server_txn = server.write(time_p3);
assert!(server_txn.purge_tombstones().is_ok());
// ===== ✅ now ready to test!
// Resolving most times should yield expected results // Resolving most times should yield expected results
let t1 = vs_utf8!["teststring".to_string()] as _; let t1 = vs_utf8!["teststring".to_string()] as _;
let r1 = server_txn.resolve_valueset(&t1); let r1 = server_txn.resolve_valueset(&t1);

View file

@ -17,10 +17,22 @@ pub(crate) mod unix;
use kanidm_proto::v1::{AuthAllowed, AuthMech}; use kanidm_proto::v1::{AuthAllowed, AuthMech};
#[derive(Debug)] use std::fmt;
pub enum AuthState { pub enum AuthState {
Choose(Vec<AuthMech>), Choose(Vec<AuthMech>),
Continue(Vec<AuthAllowed>), Continue(Vec<AuthAllowed>),
Denied(String), Denied(String),
Success(String), Success(String),
} }
impl fmt::Debug for AuthState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
AuthState::Choose(mechs) => write!(f, "AuthState::Choose({:?})", mechs),
AuthState::Continue(allow) => write!(f, "AuthState::Continue({:?})", allow),
AuthState::Denied(reason) => write!(f, "AuthState::Denied({:?})", reason),
AuthState::Success(_token) => write!(f, "AuthState::Success"),
}
}
}

View file

@ -598,7 +598,7 @@ impl<'a> IdmServerAuthTransaction<'a> {
let entry = self.qs_read.internal_search_uuid(&euuid)?; let entry = self.qs_read.internal_search_uuid(&euuid)?;
security_info!( security_info!(
?entry, name = %init.name,
uuid = %euuid, uuid = %euuid,
"Initiating Authentication Session", "Initiating Authentication Session",
); );

View file

@ -1,8 +1,8 @@
//! The Kanidmd server library. This implements all of the internal components of the server //! The Kanidmd server library. This implements all of the internal components of the server
//! which is used to process authentication, store identities and enforce access controls. //! which is used to process authentication, store identities and enforce access controls.
#![recursion_limit = "512"]
#![deny(warnings)] #![deny(warnings)]
#![recursion_limit = "512"]
#![warn(unused_extern_crates)] #![warn(unused_extern_crates)]
#![deny(clippy::todo)] #![deny(clippy::todo)]
#![deny(clippy::unimplemented)] #![deny(clippy::unimplemented)]

View file

@ -275,6 +275,7 @@ macro_rules! run_modify_test {
$modify_filter:expr, $modify_filter:expr,
$modify_list:expr, $modify_list:expr,
$internal:expr, $internal:expr,
$pre_hook:expr,
$check:expr $check:expr
) => {{ ) => {{
use crate::be::{Backend, BackendConfig}; use crate::be::{Backend, BackendConfig};
@ -285,6 +286,14 @@ macro_rules! run_modify_test {
spanned!("plugins::macros::run_modify_test", { spanned!("plugins::macros::run_modify_test", {
let qs = setup_test!($preload_entries); let qs = setup_test!($preload_entries);
{
let qs_write = qs.write(duration_from_epoch_now());
spanned!("plugins::macros::run_modify_test -> pre_test hook", {
$pre_hook(&qs_write)
});
qs_write.commit().expect("commit failure!");
}
let me = match $internal { let me = match $internal {
None => unsafe { ModifyEvent::new_internal_invalid($modify_filter, $modify_list) }, None => unsafe { ModifyEvent::new_internal_invalid($modify_filter, $modify_list) },
Some(e_str) => unsafe { Some(e_str) => unsafe {
@ -370,6 +379,18 @@ macro_rules! run_delete_test {
}}; }};
} }
#[cfg(test)]
macro_rules! run_entrychangelog_test {
($test_fn:expr) => {{
let _ = crate::tracing_tree::test_init();
let schema_outer = Schema::new().expect("Failed to init schema");
let schema_txn = schema_outer.read();
$test_fn(&schema_txn)
}};
}
#[allow(unused_macros)] #[allow(unused_macros)]
#[macro_export] #[macro_export]
macro_rules! modlist { macro_rules! modlist {

View file

@ -16,12 +16,12 @@ use serde::{Deserialize, Serialize};
use smartstring::alias::String as AttrString; use smartstring::alias::String as AttrString;
use std::slice; use std::slice;
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ModifyValid; pub struct ModifyValid;
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct ModifyInvalid; pub struct ModifyInvalid;
#[derive(Debug)] #[derive(Debug, Clone)]
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
pub enum Modify { pub enum Modify {
// This value *should* exist. // This value *should* exist.
@ -59,7 +59,7 @@ impl Modify {
} }
} }
#[derive(Debug, Default)] #[derive(Clone, Debug, Default)]
pub struct ModifyList<VALID> { pub struct ModifyList<VALID> {
// This is never read, it's just used for state machine enforcement. // This is never read, it's just used for state machine enforcement.
#[allow(dead_code)] #[allow(dead_code)]
@ -207,8 +207,7 @@ impl ModifyList<ModifyInvalid> {
}) })
} }
#[cfg(test)] pub(crate) unsafe fn into_valid(self) -> ModifyList<ModifyValid> {
pub unsafe fn into_valid(self) -> ModifyList<ModifyValid> {
ModifyList { ModifyList {
valid: ModifyValid, valid: ModifyValid,
mods: self.mods, mods: self.mods,

View file

@ -286,6 +286,7 @@ mod tests {
Modify::Present(AttrString::from("name"), Value::new_iname("testgroup_a")) Modify::Present(AttrString::from("name"), Value::new_iname("testgroup_a"))
]), ]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -329,6 +330,7 @@ mod tests {
Modify::Present(AttrString::from("name"), Value::new_iname("testgroup")) Modify::Present(AttrString::from("name"), Value::new_iname("testgroup"))
]), ]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }

View file

@ -520,6 +520,7 @@ mod tests {
Value::from("f15a7219-1d15-44e3-a7b4-bec899c07788") Value::from("f15a7219-1d15-44e3-a7b4-bec899c07788")
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -549,6 +550,7 @@ mod tests {
PartialValue::new_uuids("f15a7219-1d15-44e3-a7b4-bec899c07788").unwrap() PartialValue::new_uuids("f15a7219-1d15-44e3-a7b4-bec899c07788").unwrap()
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -575,6 +577,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iname("testgroup_a"))), filter!(f_eq("name", PartialValue::new_iname("testgroup_a"))),
ModifyList::new_list(vec![Modify::Purged(AttrString::from("uuid"))]), ModifyList::new_list(vec![Modify::Purged(AttrString::from("uuid"))]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }

View file

@ -190,6 +190,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iname("testperson"))), filter!(f_eq("name", PartialValue::new_iname("testperson"))),
modlist!([m_pres("class", &Value::new_class("posixgroup"))]), modlist!([m_pres("class", &Value::new_class("posixgroup"))]),
None, None,
|_| {},
|qs_write: &QueryServerWriteTransaction| check_gid( |qs_write: &QueryServerWriteTransaction| check_gid(
qs_write, qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244", "83a0927f-3de1-45ec-bea0-2f7b997ef244",
@ -222,6 +223,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iname("testperson"))), filter!(f_eq("name", PartialValue::new_iname("testperson"))),
modlist!([m_purge("gidnumber")]), modlist!([m_purge("gidnumber")]),
None, None,
|_| {},
|qs_write: &QueryServerWriteTransaction| check_gid( |qs_write: &QueryServerWriteTransaction| check_gid(
qs_write, qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244", "83a0927f-3de1-45ec-bea0-2f7b997ef244",
@ -257,6 +259,7 @@ mod tests {
m_pres("gidnumber", &Value::new_uint32(2000)) m_pres("gidnumber", &Value::new_uint32(2000))
]), ]),
None, None,
|_| {},
|qs_write: &QueryServerWriteTransaction| check_gid( |qs_write: &QueryServerWriteTransaction| check_gid(
qs_write, qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244", "83a0927f-3de1-45ec-bea0-2f7b997ef244",

View file

@ -49,37 +49,54 @@ fn do_memberof(
tgte.add_ava("class", CLASS_MEMBEROF.clone()); tgte.add_ava("class", CLASS_MEMBEROF.clone());
// Clear the dmo + mos, we will recreate them now. // Clear the dmo + mos, we will recreate them now.
// This is how we handle deletes/etc. // This is how we handle deletes/etc.
tgte.pop_ava("memberof"); tgte.purge_ava("memberof");
tgte.pop_ava("directmemberof"); tgte.purge_ava("directmemberof");
// What are our direct and indirect mos?
let dmo = ValueSetRefer::from_iter(groups.iter().map(|g| g.get_uuid()));
let mut mo = ValueSetRefer::from_iter(
groups
.iter()
.filter_map(|g| {
g.get_ava_set("memberof")
.and_then(|s| s.as_refer_set())
.map(|s| s.iter())
})
.flatten()
.copied(),
);
// Add all the direct mo's and mos. // Add all the direct mo's and mos.
let r: Result<(), _> = groups.iter().try_for_each(|g| { if let Some(dmo) = dmo {
// TODO: Change add_ava to remove this alloc/clone. // We need to clone this else type checker gets real sad.
let dmo = Value::new_refer(g.get_uuid()); tgte.set_ava_set("directmemberof", dmo.clone());
tgte.add_ava("directmemberof", dmo.clone());
tgte.add_ava("memberof", dmo);
if let Some(vs) = g.get_ava_set("memberof") { if let Some(mo) = &mut mo {
tgte.merge_ava("memberof", vs) let dmo = dmo as ValueSet;
mo.merge(&dmo)?;
} else { } else {
Ok(()) // Means MO is empty, so we need to duplicate dmo to allow things to
} // proceed.
}); mo = Some(dmo.clone());
};
};
if r.is_err() { if let Some(mo) = mo {
admin_error!("Invalid valueset type -> {:?}", r); tgte.set_ava_set("memberof", mo);
} else {
trace!(
"Updating {:?} to be dir mo {:?}",
uuid,
tgte.get_ava_set("directmemberof")
);
trace!(
"Updating {:?} to be mo {:?}",
uuid,
tgte.get_ava_set("memberof")
);
} }
r
trace!(
"Updating {:?} to be dir mo {:?}",
uuid,
tgte.get_ava_set("directmemberof")
);
trace!(
"Updating {:?} to be mo {:?}",
uuid,
tgte.get_ava_set("memberof")
);
Ok(())
} }
#[allow(clippy::cognitive_complexity)] #[allow(clippy::cognitive_complexity)]
@ -276,7 +293,9 @@ impl Plugin for MemberOf {
// //
// NOTE: DO NOT purge directmemberof - we use that to restore memberships // NOTE: DO NOT purge directmemberof - we use that to restore memberships
// in recycle revive! // in recycle revive!
cand.iter_mut().for_each(|e| e.purge_ava("memberof")); let mo_purge = unsafe { ModifyList::new_purge("memberof").into_valid() };
cand.iter_mut().for_each(|e| e.apply_modlist(&mo_purge));
Ok(()) Ok(())
} }
@ -715,6 +734,7 @@ mod tests {
Value::new_refer_s(&UUID_B).unwrap() Value::new_refer_s(&UUID_B).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -750,6 +770,7 @@ mod tests {
Value::new_refer_s(&UUID_B).unwrap() Value::new_refer_s(&UUID_B).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -803,6 +824,7 @@ mod tests {
Value::new_refer_s(&UUID_C).unwrap() Value::new_refer_s(&UUID_C).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -859,6 +881,7 @@ mod tests {
Value::new_refer_s(&UUID_A).unwrap() Value::new_refer_s(&UUID_A).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -925,6 +948,7 @@ mod tests {
Value::new_refer_s(&UUID_A).unwrap() Value::new_refer_s(&UUID_A).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -993,6 +1017,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_B).unwrap() PartialValue::new_refer_s(&UUID_B).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -1031,6 +1056,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_B).unwrap() PartialValue::new_refer_s(&UUID_B).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -1088,6 +1114,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_C).unwrap() PartialValue::new_refer_s(&UUID_C).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -1155,6 +1182,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_A).unwrap() PartialValue::new_refer_s(&UUID_A).unwrap()
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID
@ -1246,6 +1274,7 @@ mod tests {
), ),
]), ]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
// V-- this uuid is // V-- this uuid is
// V-- memberof this UUID // V-- memberof this UUID

View file

@ -207,16 +207,17 @@ impl Plugins {
}) })
} }
pub fn run_verify(qs: &QueryServerReadTransaction) -> Vec<Result<(), ConsistencyError>> { pub fn run_verify(
qs: &QueryServerReadTransaction,
results: &mut Vec<Result<(), ConsistencyError>>,
) {
let _entered = trace_span!("plugins::run_verify").entered(); let _entered = trace_span!("plugins::run_verify").entered();
spanned!("plugins::run_verify", { spanned!("plugins::run_verify", {
let mut results = Vec::new(); run_verify_plugin!(qs, results, base::Base);
run_verify_plugin!(qs, &mut results, base::Base); run_verify_plugin!(qs, results, attrunique::AttrUnique);
run_verify_plugin!(qs, &mut results, attrunique::AttrUnique); run_verify_plugin!(qs, results, refint::ReferentialIntegrity);
run_verify_plugin!(qs, &mut results, refint::ReferentialIntegrity); run_verify_plugin!(qs, results, memberof::MemberOf);
run_verify_plugin!(qs, &mut results, memberof::MemberOf); run_verify_plugin!(qs, results, spn::Spn);
run_verify_plugin!(qs, &mut results, spn::Spn);
results
}) })
} }
} }

View file

@ -155,6 +155,7 @@ mod tests {
Modify::Purged(AttrString::from("oauth2_rs_token_key"),) Modify::Purged(AttrString::from("oauth2_rs_token_key"),)
]), ]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
let e = qs let e = qs
.internal_search_uuid(&uuid) .internal_search_uuid(&uuid)

View file

@ -172,6 +172,7 @@ mod tests {
Value::from(IMPORT_HASH) Value::from(IMPORT_HASH)
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -206,6 +207,7 @@ mod tests {
Value::from(IMPORT_HASH) Value::from(IMPORT_HASH)
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -243,6 +245,7 @@ mod tests {
Value::from(IMPORT_HASH) Value::from(IMPORT_HASH)
)]), )]),
None, None,
|_| {},
|qs: &QueryServerWriteTransaction| { |qs: &QueryServerWriteTransaction| {
let e = qs let e = qs
.internal_search_uuid( .internal_search_uuid(

View file

@ -264,6 +264,7 @@ mod tests {
m_pres("displayname", &Value::new_utf8s("system test")), m_pres("displayname", &Value::new_utf8s("system test")),
]), ]),
Some(JSON_ADMIN_V1), Some(JSON_ADMIN_V1),
|_| {},
|_| {} |_| {}
); );
} }
@ -294,6 +295,7 @@ mod tests {
m_pres("must", &Value::new_iutf8("name")), m_pres("must", &Value::new_iutf8("name")),
]), ]),
Some(JSON_ADMIN_V1), Some(JSON_ADMIN_V1),
|_| {},
|_| {} |_| {}
); );
} }
@ -360,6 +362,7 @@ mod tests {
m_pres("domain_ssid", &Value::new_utf8s("NewExampleWifi")), m_pres("domain_ssid", &Value::new_utf8s("NewExampleWifi")),
]), ]),
Some(JSON_ADMIN_V1), Some(JSON_ADMIN_V1),
|_| {},
|_| {} |_| {}
); );
} }

View file

@ -404,6 +404,7 @@ mod tests {
Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap() Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap()
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -434,6 +435,7 @@ mod tests {
Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap() Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap()
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -482,6 +484,7 @@ mod tests {
), ),
]), ]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -519,6 +522,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iname("testgroup_b"))), filter!(f_eq("name", PartialValue::new_iname("testgroup_b"))),
ModifyList::new_list(vec![Modify::Purged(AttrString::from("member"))]), ModifyList::new_list(vec![Modify::Purged(AttrString::from("member"))]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -548,6 +552,7 @@ mod tests {
Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap() Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap()
)]), )]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -558,7 +563,7 @@ mod tests {
let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str( let ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(
r#"{ r#"{
"attrs": { "attrs": {
"class": ["group", "recycled"], "class": ["group"],
"name": ["testgroup_a"], "name": ["testgroup_a"],
"description": ["testgroup"], "description": ["testgroup"],
"uuid": ["d2b496bd-8493-47b7-8142-f568b5cf47ee"] "uuid": ["d2b496bd-8493-47b7-8142-f568b5cf47ee"]
@ -589,6 +594,16 @@ mod tests {
Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap() Value::new_refer_s("d2b496bd-8493-47b7-8142-f568b5cf47ee").unwrap()
)]), )]),
None, None,
|qs: &QueryServerWriteTransaction| {
// Any pre_hooks we need. In this case, we need to trigger the delete of testgroup_a
let de_sin = unsafe {
crate::event::DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq(
"name",
PartialValue::new_iname("testgroup_a")
)])))
};
assert!(qs.delete(&de_sin).is_ok());
},
|_| {} |_| {}
); );
} }

View file

@ -246,6 +246,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iname("testperson"))), filter!(f_eq("name", PartialValue::new_iname("testperson"))),
modlist!([m_purge("spn")]), modlist!([m_purge("spn")]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }
@ -302,6 +303,7 @@ mod tests {
m_pres("spn", &Value::new_spn_str("invalid", "spn")) m_pres("spn", &Value::new_spn_str("invalid", "spn"))
]), ]),
None, None,
|_| {},
|_| {} |_| {}
); );
} }

View file

@ -1,5 +1,6 @@
use kanidm_proto::v1::OperationError; use kanidm_proto::v1::OperationError;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt;
use std::time::Duration; use std::time::Duration;
use uuid::Uuid; use uuid::Uuid;
@ -11,6 +12,12 @@ pub struct Cid {
pub s_uuid: Uuid, pub s_uuid: Uuid,
} }
impl fmt::Display for Cid {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}-{}-{}", self.ts.as_nanos(), self.d_uuid, self.s_uuid)
}
}
impl Cid { impl Cid {
#[cfg(test)] #[cfg(test)]
pub(crate) fn new(d_uuid: Uuid, s_uuid: Uuid, ts: Duration) -> Self { pub(crate) fn new(d_uuid: Uuid, s_uuid: Uuid, ts: Duration) -> Self {
@ -28,10 +35,24 @@ impl Cid {
#[cfg(test)] #[cfg(test)]
pub unsafe fn new_zero() -> Self { pub unsafe fn new_zero() -> Self {
Self::new_count(0)
}
#[cfg(test)]
pub unsafe fn new_count(c: u64) -> Self {
Cid { Cid {
d_uuid: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(), d_uuid: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
s_uuid: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(), s_uuid: Uuid::parse_str("00000000-0000-0000-0000-000000000000").unwrap(),
ts: Duration::new(0, 0), ts: Duration::new(c, 0),
}
}
#[cfg(test)]
pub fn new_random_s_d(ts: Duration) -> Self {
Cid {
d_uuid: Uuid::new_v4(),
s_uuid: Uuid::new_v4(),
ts,
} }
} }

View file

@ -0,0 +1,559 @@
use super::cid::Cid;
use crate::prelude::*;
use crate::valueset;
use kanidm_proto::v1::ConsistencyError;
use crate::entry::{compare_attrs, Eattrs};
use crate::schema::SchemaTransaction;
use std::collections::btree_map::Keys;
use std::collections::BTreeMap;
use std::fmt;
use std::ops::Bound;
use std::ops::Bound::*;
lazy_static! {
static ref PVCLASS_TOMBSTONE: PartialValue = PartialValue::new_class("tombstone");
static ref PVCLASS_RECYCLED: PartialValue = PartialValue::new_class("recycled");
}
#[derive(Debug, Clone)]
pub struct EntryChangelog {
/// The set of "entries as they existed at a point in time". This allows us to rewind
/// to a point-in-time, and then to start to "replay" applying all changes again.
///
/// A subtle and important piece of information is that an anchor can be considered
/// as the "state as existing between two Cid's". This means for Cid X, this state is
/// the "moment before X". This is important, as for a create we define the initial anchor
/// as "nothing". It's means for the anchor at time X, that changes that occured at time
/// X have NOT been replayed and applied!
anchors: BTreeMap<Cid, State>,
changes: BTreeMap<Cid, Change>,
}
/*
impl fmt::Display for EntryChangelog {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f
}
}
*/
/// A change defines the transitions that occured within this Cid (transaction). A change is applied
/// as a whole, or rejected during the replay process.
#[derive(Debug, Clone)]
pub struct Change {
s: Vec<Transition>,
}
#[derive(Debug, Clone)]
enum State {
NonExistent,
Live(Eattrs),
Recycled(Eattrs),
Tombstone(Eattrs),
}
impl fmt::Display for State {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
State::NonExistent => write!(f, "NonExistent"),
State::Live(_) => write!(f, "Live"),
State::Recycled(_) => write!(f, "Recycled"),
State::Tombstone(_) => write!(f, "Tombstone"),
}
}
}
#[derive(Debug, Clone)]
enum Transition {
Create(Eattrs),
ModifyPurge(AttrString),
ModifyPresent(AttrString, Value),
ModifyRemoved(AttrString, PartialValue),
Recycle,
Revive,
Tombstone(Eattrs),
}
impl fmt::Display for Transition {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match &self {
Transition::Create(_) => write!(f, "Create"),
Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a),
Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a),
Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a),
Transition::Recycle => write!(f, "Recycle"),
Transition::Revive => write!(f, "Revive"),
Transition::Tombstone(_) => write!(f, "Tombstone"),
}
}
}
impl State {
fn apply_change(self, change: &Change) -> Result<Self, Self> {
let mut state = self;
for transition in change.s.iter() {
match (&mut state, transition) {
(State::NonExistent, Transition::Create(attrs)) => {
trace!("NonExistent + Create -> Live");
state = State::Live(attrs.clone());
}
(State::Live(ref mut attrs), Transition::ModifyPurge(attr)) => {
trace!("Live + ModifyPurge({}) -> Live", attr);
attrs.remove(attr);
}
(State::Live(ref mut attrs), Transition::ModifyPresent(attr, value)) => {
trace!("Live + ModifyPresent({}) -> Live", attr);
if let Some(vs) = attrs.get_mut(attr) {
let r = vs.insert_checked(value.clone());
assert!(r.is_ok());
// Reject if it fails?
} else {
let vs = valueset::from_value_iter(std::iter::once(value.clone()))
.expect("Unable to fail - not empty, and only one type!");
attrs.insert(attr.clone(), vs);
}
}
(State::Live(ref mut attrs), Transition::ModifyRemoved(attr, value)) => {
trace!("Live + ModifyRemoved({}) -> Live", attr);
let rm = if let Some(vs) = attrs.get_mut(attr) {
vs.remove(value);
vs.is_empty()
} else {
false
};
if rm {
attrs.remove(attr);
};
}
(State::Live(attrs), Transition::Recycle) => {
trace!("Live + Recycle -> Recycled");
state = State::Recycled(attrs.clone());
}
(State::Live(_), Transition::Tombstone(attrs)) => {
trace!("Live + Tombstone -> Tombstone");
state = State::Tombstone(attrs.clone());
}
(State::Recycled(attrs), Transition::Revive) => {
trace!("Recycled + Revive -> Live");
state = State::Live(attrs.clone());
}
(State::Recycled(ref mut attrs), Transition::ModifyPurge(attr)) => {
trace!("Recycled + ModifyPurge({}) -> Recycled", attr);
attrs.remove(attr);
}
(State::Recycled(attrs), Transition::ModifyRemoved(attr, value)) => {
trace!("Recycled + ModifyRemoved({}) -> Recycled", attr);
let rm = if let Some(vs) = attrs.get_mut(attr) {
vs.remove(value);
vs.is_empty()
} else {
false
};
if rm {
attrs.remove(attr);
};
}
(State::Recycled(_), Transition::Tombstone(attrs)) => {
trace!("Recycled + Tombstone -> Tombstone");
state = State::Tombstone(attrs.clone());
}
// ==============================
// Invalid States
/*
(State::NonExistent, Transition::ModifyPurge(_))
| (State::NonExistent, Transition::ModifyPresent(_, _))
| (State::NonExistent, Transition::ModifyRemoved(_, _))
| (State::NonExistent, Transition::Recycle)
| (State::NonExistent, Transition::Revive)
| (State::NonExistent, Transition::Tombstone(_))
| (State::Live(_), Transition::Create(_))
| (State::Live(_), Transition::Revive)
| (State::Recycled(_), Transition::Create(_))
| (State::Recycled(_), Transition::Recycle)
| (State::Recycled(_), Transition::ModifyPresent(_, _))
| (State::Tombstone(_), _)
*/
(s, t) => {
warn!("{} + {} -> REJECTING", s, t);
return Err(state);
}
};
}
// Everything must have applied, all good then.
trace!(?state, "applied changes");
Ok(state)
}
}
impl EntryChangelog {
pub fn new(cid: Cid, attrs: Eattrs, _schema: &dyn SchemaTransaction) -> Self {
// I think we need to reduce the attrs based on what is / is not replicated.?
let anchors = btreemap![(cid.clone(), State::NonExistent)];
let changes = btreemap![(
cid,
Change {
s: vec![Transition::Create(attrs)]
}
)];
EntryChangelog { anchors, changes }
}
// Uncomment this once we have a real on-disk storage of the changelog
// #[cfg(test)]
pub fn new_without_schema(cid: Cid, attrs: Eattrs) -> Self {
// I think we need to reduce the attrs based on what is / is not replicated.?
// We need to pick a state that reflects the current state WRT to tombstone
// or recycled!
let class = attrs.get("class");
let (anchors, changes) = if class
.as_ref()
.map(|c| c.contains(&PVCLASS_TOMBSTONE as &PartialValue))
.unwrap_or(false)
{
(
btreemap![(cid.clone(), State::Tombstone(attrs))],
BTreeMap::new(),
)
} else if class
.as_ref()
.map(|c| c.contains(&PVCLASS_RECYCLED as &PartialValue))
.unwrap_or(false)
{
(
btreemap![(cid.clone(), State::Recycled(attrs))],
BTreeMap::new(),
)
} else {
(
btreemap![(cid.clone(), State::NonExistent)],
btreemap![(
cid,
Change {
s: vec![Transition::Create(attrs)]
}
)],
)
};
EntryChangelog { anchors, changes }
}
pub fn add_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
where
T: IntoIterator<Item = Value>,
{
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
viter
.into_iter()
.map(|v| Transition::ModifyPresent(AttrString::from(attr), v))
.for_each(|t| change.s.push(t));
}
pub fn remove_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
where
T: IntoIterator<Item = PartialValue>,
{
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
viter
.into_iter()
.map(|v| Transition::ModifyRemoved(AttrString::from(attr), v))
.for_each(|t| change.s.push(t));
}
pub fn purge_ava(&mut self, cid: &Cid, attr: &str) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change
.s
.push(Transition::ModifyPurge(AttrString::from(attr)));
}
pub fn recycled(&mut self, cid: &Cid) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Recycle);
}
pub fn revive(&mut self, cid: &Cid) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Revive);
}
pub fn tombstone(&mut self, cid: &Cid, attrs: Eattrs) {
if !self.changes.contains_key(cid) {
self.changes.insert(cid.clone(), Change { s: Vec::new() });
}
let change = self
.changes
.get_mut(cid)
.expect("Memory corruption, change must exist");
change.s.push(Transition::Tombstone(attrs));
}
/// Replay our changes from and including the replay Cid, up to the latest point
/// in time. We also return a vector of *rejected* Cid's showing what is in the
/// change log that is considered invalid.
fn replay(
&self,
from_cid: Bound<&Cid>,
to_cid: Bound<&Cid>,
) -> Result<(State, Vec<Cid>), OperationError> {
// Select the anchor_cid that is *earlier* or *equals* to the replay_cid.
// if not found, we are *unable to* perform this replay which indicates a problem!
let (anchor_cid, anchor) = if matches!(from_cid, Unbounded) {
// If the from is unbounded, and to is unbounded, we want
// the earliest anchor possible.
// If from is unbounded and to is bounded, we want the earliest
// possible.
self.anchors.iter().next()
} else {
// If from has a bound, we want an anchor "earlier than" from, regardless
// of the to bound state.
self.anchors.range((Unbounded, from_cid)).next_back()
}
.ok_or_else(|| {
admin_error!(
?from_cid,
?to_cid,
"Failed to locate anchor in replay range"
);
OperationError::ReplReplayFailure
})?;
trace!(?anchor_cid, ?anchor);
// Load the entry attribute state at that time.
let mut replay_state = anchor.clone();
let mut rejected_cid = Vec::new();
// For each change
for (change_cid, change) in self.changes.range((Included(anchor_cid), to_cid)) {
// Apply the change.
trace!(?change_cid, ?change);
replay_state = match replay_state.apply_change(change) {
Ok(mut new_state) => {
// Indicate that this was the highest CID so far.
match &mut new_state {
State::NonExistent => {
trace!("pass");
}
State::Live(ref mut attrs)
| State::Recycled(ref mut attrs)
| State::Tombstone(ref mut attrs) => {
let cv = vs_cid![change_cid.clone()];
let _ = attrs.insert(AttrString::from("last_modified_cid"), cv);
}
};
new_state
}
Err(previous_state) => {
warn!("rejecting invalid change {:?}", change_cid);
rejected_cid.push(change_cid.clone());
previous_state
}
};
}
// Return the eattrs state.
Ok((replay_state, rejected_cid))
}
#[instrument(
level = "trace",
name = "verify",
skip(self, _schema, expected_attrs, results)
)]
pub fn verify(
&self,
_schema: &dyn SchemaTransaction,
expected_attrs: &Eattrs,
entry_id: u64,
results: &mut Vec<Result<(), ConsistencyError>>,
) {
// We need to be able to take any anchor entry, and replay that when all changes
// are applied we get the *same entry* as the current state.
debug_assert!(results.is_empty());
// For each anchor (we only needs it's change id.)
for cid in self.anchors.keys() {
match self.replay(Included(cid), Unbounded) {
Ok((entry_state, rejected)) => {
trace!(?rejected);
match entry_state {
State::Live(attrs) | State::Recycled(attrs) | State::Tombstone(attrs) => {
if compare_attrs(&attrs, expected_attrs) {
// valid
trace!("changelog is synchronised");
} else {
// ruh-roh.
warn!("changelog has desynchronised!");
debug!(?attrs);
debug!(?expected_attrs);
debug_assert!(false);
results
.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)));
}
}
State::NonExistent => {
warn!("entry does not exist - changelog is corrupted?!");
results.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)))
}
}
}
Err(e) => {
error!(?e);
}
}
}
debug_assert!(results.is_empty());
}
pub fn contains_tail_cid(&self, cid: &Cid) -> bool {
if let Some(tail_cid) = self.changes.keys().next_back() {
if tail_cid == cid {
return true;
}
};
false
}
pub fn can_delete(&self) -> bool {
// Changelog should be empty.
// should have a current anchor state of tombstone.
self.changes.is_empty()
&& matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
}
pub fn is_live(&self) -> bool {
!matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
}
pub fn cid_iter(&self) -> Keys<Cid, Change> {
self.changes.keys()
}
/*
fn insert_anchor(&mut self, cid: Cid, entry_state: State) {
// When we insert an anchor, we have to remove all subsequent anchors (but not
// the preceeding ones.)
let _ = self.anchors.split_off(&cid);
self.anchors.insert(cid.clone(), entry_state);
}
*/
pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> {
// Build a new anchor that is equal or less than this cid.
// In other words, the cid we are trimming to, should be remaining
// in the CL, and we should have an anchor that preceeds it.
let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| {
error!(?e);
e
})?;
trace!(?rejected);
// Add the entry_state as an anchor. Use the CID we just
// trimmed to.
// insert_anchor will remove anything to the right, we also need to
// remove everything to the left, so just clear.
let _ = self.anchors.clear();
self.anchors.insert(cid.clone(), entry_state);
// And now split the CL.
let mut right = self.changes.split_off(cid);
std::mem::swap(&mut right, &mut self.changes);
// We can trace what we drop later?
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::entry::Eattrs;
// use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::repl::entry::{Change, EntryChangelog, State, Transition};
use crate::schema::{Schema, SchemaTransaction};
use std::time::Duration;
#[test]
fn test_entrychangelog_basic() {
run_entrychangelog_test!(|schema: &dyn SchemaTransaction| {
let cid = Cid::new_random_s_d(Duration::from_secs(1));
let eattrs = Eattrs::new();
let eclog = EntryChangelog::new(cid, eattrs, schema);
trace!(?eclog);
})
}
#[test]
fn test_entrychangelog_state_transitions() {
// Test that all our transitions are defined and work as
// expected.
assert!(State::NonExistent
.apply_change(&Change { s: vec![] })
.is_ok());
assert!(State::NonExistent
.apply_change(&Change {
s: vec![Transition::Create(Eattrs::new())]
})
.is_ok());
assert!(State::Live(Eattrs::new())
.apply_change(&Change { s: vec![] })
.is_ok());
assert!(State::Live(Eattrs::new())
.apply_change(&Change {
s: vec![Transition::Create(Eattrs::new())]
})
.is_err());
}
}

View file

@ -1 +1,3 @@
pub mod cid; pub mod cid;
pub mod entry;
pub mod ruv;

238
kanidmd/idm/src/repl/ruv.rs Normal file
View file

@ -0,0 +1,238 @@
use crate::prelude::*;
use crate::repl::cid::Cid;
use concread::bptree::{BptreeMap, BptreeMapReadTxn, BptreeMapWriteTxn};
use idlset::v2::IDLBitRange;
use kanidm_proto::v1::ConsistencyError;
use std::collections::BTreeMap;
use std::ops::Bound::*;
use std::sync::Arc;
pub struct ReplicationUpdateVector {
// This sorts by time. Should we look up by IDL or by UUID?
// I think IDL, because when we need to actually do the look ups we'll need
// to send this list to the BE to get the affected entries.
data: BptreeMap<Cid, IDLBitRange>,
}
impl Default for ReplicationUpdateVector {
fn default() -> Self {
let data: BptreeMap<Cid, IDLBitRange> = BptreeMap::new();
ReplicationUpdateVector { data }
}
}
impl ReplicationUpdateVector {
pub fn write(&self) -> ReplicationUpdateVectorWriteTransaction<'_> {
ReplicationUpdateVectorWriteTransaction {
data: self.data.write(),
}
}
pub fn read(&self) -> ReplicationUpdateVectorReadTransaction<'_> {
ReplicationUpdateVectorReadTransaction {
data: self.data.read(),
}
}
}
pub struct ReplicationUpdateVectorWriteTransaction<'a> {
data: BptreeMapWriteTxn<'a, Cid, IDLBitRange>,
}
pub struct ReplicationUpdateVectorReadTransaction<'a> {
data: BptreeMapReadTxn<'a, Cid, IDLBitRange>,
}
pub trait ReplicationUpdateVectorTransaction {
fn ruv_snapshot(&self) -> BTreeMap<Cid, IDLBitRange>;
fn verify(
&self,
entries: &[Arc<EntrySealedCommitted>],
results: &mut Vec<Result<(), ConsistencyError>>,
) {
// Okay rebuild the RUV in parallel.
let mut check_ruv: BTreeMap<Cid, IDLBitRange> = BTreeMap::new();
for entry in entries {
// The DB id we need.
let eid = entry.get_id();
let eclog = entry.get_changelog();
// We don't need the details of the change - only the cid of the
// change that this entry was involved in.
for cid in eclog.cid_iter() {
if let Some(idl) = check_ruv.get_mut(&cid) {
// We can't guarantee id order, so we have to do this properly.
idl.insert_id(eid);
} else {
let mut idl = IDLBitRange::new();
idl.insert_id(eid);
check_ruv.insert(cid.clone(), idl);
}
}
}
trace!(?check_ruv);
// Get the current state
let snapshot_ruv = self.ruv_snapshot();
trace!(?snapshot_ruv);
// Now compare. We want to do this checking for each CID in each, and then asserting
// the content is the same.
let mut check_iter = check_ruv.iter();
let mut snap_iter = snapshot_ruv.iter();
let mut check_next = check_iter.next();
let mut snap_next = snap_iter.next();
while let (Some((ck, cv)), Some((sk, sv))) = (&check_next, &snap_next) {
if ck == sk {
if cv == sv {
trace!("{:?} is consistent!", ck);
} else {
admin_warn!("{:?} is NOT consistent! IDL's differ", ck);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
}
check_next = check_iter.next();
snap_next = snap_iter.next();
} else if ck < sk {
admin_warn!("{:?} is NOT consistent! CID missing from RUV", ck);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
check_next = check_iter.next();
} else {
admin_warn!("{:?} is NOT consistent! CID should not exist in RUV", sk);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string())));
snap_next = snap_iter.next();
}
}
while let Some((ck, _cv)) = &check_next {
admin_warn!("{:?} is NOT consistent! CID missing from RUV", ck);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(ck.to_string())));
check_next = check_iter.next();
}
while let Some((sk, _sv)) = &snap_next {
admin_warn!("{:?} is NOT consistent! CID should not exist in RUV", sk);
debug_assert!(false);
results.push(Err(ConsistencyError::RuvInconsistent(sk.to_string())));
snap_next = snap_iter.next();
}
// Done!
}
}
impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTransaction<'a> {
fn ruv_snapshot(&self) -> BTreeMap<Cid, IDLBitRange> {
self.data
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect()
}
}
impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTransaction<'a> {
fn ruv_snapshot(&self) -> BTreeMap<Cid, IDLBitRange> {
self.data
.iter()
.map(|(k, v)| (k.clone(), v.clone()))
.collect()
}
}
impl<'a> ReplicationUpdateVectorWriteTransaction<'a> {
pub fn rebuild(&mut self, entries: &[Arc<EntrySealedCommitted>]) -> Result<(), OperationError> {
// Entries and their internal changelogs are the "source of truth" for all changes
// that have ever occured and are stored on this server. So we use them to rebuild our RUV
// here!
let mut rebuild_ruv: BTreeMap<Cid, IDLBitRange> = BTreeMap::new();
for entry in entries {
// The DB id we need.
let eid = entry.get_id();
let eclog = entry.get_changelog();
// We don't need the details of the change - only the cid of the
// change that this entry was involved in.
for cid in eclog.cid_iter() {
if let Some(idl) = rebuild_ruv.get_mut(&cid) {
// We can't guarantee id order, so we have to do this properly.
idl.insert_id(eid);
} else {
let mut idl = IDLBitRange::new();
idl.insert_id(eid);
rebuild_ruv.insert(cid.clone(), idl);
}
}
}
// Finally, we need to do a cleanup/compact of the IDL's if possible.
rebuild_ruv.iter_mut().for_each(|(_k, idl)| {
idl.maybe_compress();
});
self.data.clear();
self.data.extend(rebuild_ruv.into_iter());
Ok(())
}
pub fn insert_change(&mut self, cid: Cid, idl: IDLBitRange) -> Result<(), OperationError> {
// Remember, in a transaction the changes can be updated multiple times.
if let Some(ex_idl) = self.data.get_mut(&cid) {
// This ensures both sets have all the available ids.
let idl = ex_idl as &_ | &idl;
*ex_idl = idl;
} else {
self.data.insert(cid.clone(), idl);
}
Ok(())
}
pub fn ruv_idls(&self) -> IDLBitRange {
let mut idl = IDLBitRange::new();
self.data.iter().for_each(|(_cid, ex_idl)| {
idl = ex_idl as &_ | &idl;
});
idl
}
/*
pub fn contains(&self, idl: &IDLBitRange) -> bool {
self.data.iter()
.any(|(cid, ex_idl)| {
let idl_result = idl & ex_idl;
if idl_result.is_empty() {
false
} else {
debug!(?cid, ?idl_result);
true
}
})
}
*/
pub fn trim_up_to(&mut self, cid: &Cid) -> Result<IDLBitRange, OperationError> {
let mut idl = IDLBitRange::new();
self.data
.range((Unbounded, Excluded(cid)))
.for_each(|(_, ex_idl)| {
idl = ex_idl as &_ | &idl;
});
// Trim all cid's up to this value, and return the range of IDs
// that are affected.
self.data.split_off_lt(cid);
Ok(idl)
}
pub fn commit(self) {
self.data.commit();
}
}

View file

@ -220,7 +220,7 @@ pub trait QueryServerTransaction<'a> {
// NOTE: We currently can't build search plugins due to the inability to hand // NOTE: We currently can't build search plugins due to the inability to hand
// the QS wr/ro to the plugin trait. However, there shouldn't be a need for search // the QS wr/ro to the plugin trait. However, there shouldn't be a need for search
// plugis, because all data transforms should be in the write path. // plugins, because all data transforms should be in the write path.
let res = self.get_be_txn().search(lims, &vfr).map_err(|e| { let res = self.get_be_txn().search(lims, &vfr).map_err(|e| {
admin_error!(?e, "backend failure"); admin_error!(?e, "backend failure");
@ -860,13 +860,38 @@ impl<'a> QueryServerReadTransaction<'a> {
return idx_errs; return idx_errs;
} }
// Ok BE passed, lets move on to the content. // If anything error to this point we can't trust the verifications below. From
// here we can just amass results.
let mut results = Vec::new();
// Verify all our entries. Weird flex I know, but it's needed for verifying
// the entry changelogs are consistent to their entries.
let schema = self.get_schema();
spanned!("server::verify", {
let filt_all = filter!(f_pres("class"));
let all_entries = match self.internal_search(filt_all) {
Ok(a) => a,
Err(_e) => return vec![Err(ConsistencyError::QueryServerSearchFailure)],
};
for e in all_entries {
e.verify(schema, &mut results)
}
// Verify the RUV to the entry changelogs now.
self.get_be_txn().verify_ruv(&mut results);
});
// Ok entries passed, lets move on to the content.
// Most of our checks are in the plugins, so we let them // Most of our checks are in the plugins, so we let them
// do their job. // do their job.
// Now, call the plugins verification system. // Now, call the plugins verification system.
Plugins::run_verify(self) Plugins::run_verify(self, &mut results);
// Finished // Finished
results
} }
} }
@ -1048,7 +1073,7 @@ impl QueryServer {
} }
pub fn initialise_helper(&self, ts: Duration) -> Result<(), OperationError> { pub fn initialise_helper(&self, ts: Duration) -> Result<(), OperationError> {
// First, check our database version - attempt to do an initial indexing // Check our database version - attempt to do an initial indexing
// based on the in memory configuration // based on the in memory configuration
// //
// If we ever change the core in memory schema, or the schema that we ship // If we ever change the core in memory schema, or the schema that we ship
@ -1058,7 +1083,6 @@ impl QueryServer {
// A major reason here to split to multiple transactions is to allow schema // A major reason here to split to multiple transactions is to allow schema
// reloading to occur, which causes the idxmeta to update, and allows validation // reloading to occur, which causes the idxmeta to update, and allows validation
// of the schema in the subsequent steps as we proceed. // of the schema in the subsequent steps as we proceed.
let reindex_write_1 = task::block_on(self.write_async(ts)); let reindex_write_1 = task::block_on(self.write_async(ts));
reindex_write_1 reindex_write_1
.upgrade_reindex(SYSTEM_INDEX_VERSION) .upgrade_reindex(SYSTEM_INDEX_VERSION)
@ -1181,10 +1205,18 @@ impl<'a> QueryServerWriteTransaction<'a> {
return Err(OperationError::AccessDenied); return Err(OperationError::AccessDenied);
} }
// Before we assign replication metadata, we need to assert these entries
// are valid to create within the set of replication transitions. This
// means they *can not* be recycled or tombstones!
if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) {
admin_warn!("Refusing to create invalid entries that are attempting to bypass replication state machine.");
return Err(OperationError::AccessDenied);
}
// Assign our replication metadata now, since we can proceed with this operation. // Assign our replication metadata now, since we can proceed with this operation.
let mut candidates: Vec<Entry<EntryInvalid, EntryNew>> = candidates let mut candidates: Vec<Entry<EntryInvalid, EntryNew>> = candidates
.into_iter() .into_iter()
.map(|e| e.assign_cid(self.cid.clone())) .map(|e| e.assign_cid(self.cid.clone(), &self.schema))
.collect(); .collect();
// run any pre plugins, giving them the list of mutable candidates. // run any pre plugins, giving them the list of mutable candidates.
@ -1213,7 +1245,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
}) })
.map(|e| { .map(|e| {
// Then seal the changes? // Then seal the changes?
e.seal() e.seal(&self.schema)
}) })
}) })
.collect(); .collect();
@ -1229,10 +1261,11 @@ impl<'a> QueryServerWriteTransaction<'a> {
})?; })?;
// We may change from ce.entries later to something else? // We may change from ce.entries later to something else?
let commit_cand = self.be_txn.create(norm_cand).map_err(|e| { let commit_cand = self.be_txn.create(&self.cid, norm_cand).map_err(|e| {
admin_error!("betxn create failure {:?}", e); admin_error!("betxn create failure {:?}", e);
e e
})?; })?;
// Run any post plugins // Run any post plugins
Plugins::run_post_create(self, &commit_cand, ce).map_err(|e| { Plugins::run_post_create(self, &commit_cand, ce).map_err(|e| {
@ -1332,6 +1365,11 @@ impl<'a> QueryServerWriteTransaction<'a> {
return Err(OperationError::NoMatchingEntries); return Err(OperationError::NoMatchingEntries);
}; };
if pre_candidates.iter().any(|e| e.mask_tombstone().is_none()) {
admin_warn!("Refusing to delete entries which may be an attempt to bypass replication state machine.");
return Err(OperationError::AccessDenied);
}
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
.iter() .iter()
// Invalidate and assign change id's // Invalidate and assign change id's
@ -1351,28 +1389,28 @@ impl<'a> QueryServerWriteTransaction<'a> {
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates
.into_iter() .into_iter()
.map(|e| { .map(|e| {
e.into_recycled() e.to_recycled()
.validate(&self.schema) .validate(&self.schema)
.map_err(|e| { .map_err(|e| {
admin_error!(err = ?e, "Schema Violation in delete validate"); admin_error!(err = ?e, "Schema Violation in delete validate");
OperationError::SchemaViolation(e) OperationError::SchemaViolation(e)
}) })
// seal if it worked. // seal if it worked.
.map(Entry::seal) .map(|e| e.seal(&self.schema))
}) })
.collect(); .collect();
let del_cand: Vec<Entry<_, _>> = res?; let del_cand: Vec<Entry<_, _>> = res?;
self.be_txn self.be_txn
.modify(&pre_candidates, &del_cand) .modify(&self.cid, &pre_candidates, &del_cand)
.map_err(|e| { .map_err(|e| {
// be_txn is dropped, ie aborted here. // be_txn is dropped, ie aborted here.
admin_error!("Delete operation failed (backend), {:?}", e); admin_error!("Delete operation failed (backend), {:?}", e);
e e
})?; })?;
// Post delete plugs // Post delete plugins
Plugins::run_post_delete(self, &del_cand, de).map_err(|e| { Plugins::run_post_delete(self, &del_cand, de).map_err(|e| {
admin_error!("Delete operation failed (plugin), {:?}", e); admin_error!("Delete operation failed (plugin), {:?}", e);
e e
@ -1432,24 +1470,15 @@ impl<'a> QueryServerWriteTransaction<'a> {
pub fn purge_tombstones(&self) -> Result<(), OperationError> { pub fn purge_tombstones(&self) -> Result<(), OperationError> {
spanned!("server::purge_tombstones", { spanned!("server::purge_tombstones", {
// delete everything that is a tombstone. // purge everything that is a tombstone.
let cid = self.cid.sub_secs(CHANGELOG_MAX_AGE).map_err(|e| { let cid = self.cid.sub_secs(CHANGELOG_MAX_AGE).map_err(|e| {
admin_error!("Unable to generate search cid {:?}", e); admin_error!("Unable to generate search cid {:?}", e);
e e
})?; })?;
let ts = self.internal_search(filter_all!(f_and!([
f_eq("class", PVCLASS_TOMBSTONE.clone()),
f_lt("last_modified_cid", PartialValue::new_cid(cid)),
])))?;
if ts.is_empty() {
admin_info!("No Tombstones present - purge operation success");
return Ok(());
}
// Delete them - this is a TRUE delete, no going back now! // Delete them - this is a TRUE delete, no going back now!
self.be_txn self.be_txn
.delete(&ts) .reap_tombstones(&cid)
.map_err(|e| { .map_err(|e| {
admin_error!(err = ?e, "Tombstone purge operation failed (backend)"); admin_error!(err = ?e, "Tombstone purge operation failed (backend)");
e e
@ -1489,7 +1518,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
OperationError::SchemaViolation(e) OperationError::SchemaViolation(e)
}) })
// seal if it worked. // seal if it worked.
.map(Entry::seal) .map(|e| e.seal(&self.schema))
}) })
.collect(); .collect();
@ -1497,7 +1526,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Backend Modify // Backend Modify
self.be_txn self.be_txn
.modify(&rc, &tombstone_cand) .modify(&self.cid, &rc, &tombstone_cand)
.map_err(|e| { .map_err(|e| {
admin_error!("Purge recycled operation failed (backend), {:?}", e); admin_error!("Purge recycled operation failed (backend), {:?}", e);
e e
@ -1511,13 +1540,174 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Should this take a revive event? // Should this take a revive event?
pub fn revive_recycled(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> { pub fn revive_recycled(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> {
spanned!("server::revive_recycled", { spanned!("server::revive_recycled", {
// Revive an entry to live. This is a specialised (limited) // Revive an entry to live. This is a specialised function, and draws a lot of
// modify proxy. // inspiration from modify.
// //
// impersonate modify will require ability to search the class=recycled // Access is granted by the ability to ability to search the class=recycled
// and the ability to remove that from the object. // and the ability modify + remove that class from the object.
if !re.ident.is_internal() {
security_info!(name = %re.ident, "revive initiator");
}
// create the modify // Get the list of pre_candidates, using impersonate search.
let pre_candidates =
self.impersonate_search_valid(re.filter.clone(), re.filter.clone(), &re.ident)?;
// Is the list empty?
if pre_candidates.is_empty() {
if re.ident.is_internal() {
trace!(
"revive: no candidates match filter ... continuing {:?}",
re.filter
);
return Ok(());
} else {
request_error!(
"revive: no candidates match filter, failure {:?}",
re.filter
);
return Err(OperationError::NoMatchingEntries);
}
};
trace!("revive: pre_candidates -> {:?}", pre_candidates);
// Check access against a "fake" modify.
let modlist = ModifyList::new_list(vec![Modify::Removed(
AttrString::from("class"),
PVCLASS_RECYCLED.clone(),
)]);
let m_valid = modlist.validate(self.get_schema()).map_err(|e| {
admin_error!("revive recycled modlist Schema Violation {:?}", e);
OperationError::SchemaViolation(e)
})?;
let me = ModifyEvent::new_impersonate(
&re.ident,
re.filter.clone(),
re.filter.clone(),
m_valid,
);
let access = self.get_accesscontrols();
let op_allow = access
.modify_allow_operation(&me, &pre_candidates)
.map_err(|e| {
admin_error!("Unable to check modify access {:?}", e);
e
})?;
if !op_allow {
return Err(OperationError::AccessDenied);
}
// Are all of the entries actually recycled?
if pre_candidates.iter().all(|e| e.mask_recycled().is_some()) {
admin_warn!("Refusing to revive entries that are already live!");
return Err(OperationError::AccessDenied);
}
// Build the list of mods from directmo, to revive memberships.
let mut dm_mods: HashMap<Uuid, ModifyList<ModifyInvalid>> =
HashMap::with_capacity(pre_candidates.len());
for e in &pre_candidates {
// Get this entries uuid.
let u: Uuid = e.get_uuid();
if let Some(riter) = e.get_ava_as_refuuid("directmemberof") {
for g_uuid in riter {
dm_mods
.entry(g_uuid)
.and_modify(|mlist| {
let m = Modify::Present(
AttrString::from("member"),
Value::new_refer_r(&u),
);
mlist.push_mod(m);
})
.or_insert({
let m = Modify::Present(
AttrString::from("member"),
Value::new_refer_r(&u),
);
ModifyList::new_list(vec![m])
});
}
}
}
// clone the writeable entries.
let mut candidates: Vec<Entry<EntryInvalid, EntryCommitted>> = pre_candidates
.iter()
.map(|er| er.as_ref().clone().invalidate(self.cid.clone()))
// Mutate to apply the revive.
.map(|er| er.to_revived())
.collect();
// Are they all revived?
if candidates.iter().all(|e| e.mask_recycled().is_none()) {
admin_error!("Not all candidates were correctly revived, unable to proceed");
return Err(OperationError::InvalidEntryState);
}
// Do we need to apply pre-mod?
// Very likely, incase domain has renamed etc.
Plugins::run_pre_modify(self, &mut candidates, &me).map_err(|e| {
admin_error!("Revive operation failed (plugin), {:?}", e);
e
})?;
// Schema validate
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> = candidates
.into_iter()
.map(|e| {
e.validate(&self.schema)
.map_err(|e| {
admin_error!("Schema Violation {:?}", e);
OperationError::SchemaViolation(e)
})
.map(|e| e.seal(&self.schema))
})
.collect();
let norm_cand: Vec<Entry<_, _>> = res?;
// build the mod partial
let mp = ModifyPartial {
norm_cand,
pre_candidates,
me: &me,
};
// Call modify_apply
self.modify_apply(mp)?;
// If and only if that succeeds, apply the direct membership modifications
// if possible.
for (g, mods) in dm_mods {
// I think the filter/filter_all shouldn't matter here because the only
// valid direct memberships should be still valid/live references, as refint
// removes anything that was deleted even from recycled entries.
let f = filter_all!(f_eq("uuid", PartialValue::new_uuid(g)));
self.internal_modify(&f, &mods)?;
}
Ok(())
})
}
// Should this take a revive event?
pub fn revive_recycled_legacy(&self, re: &ReviveRecycledEvent) -> Result<(), OperationError> {
spanned!("server::revive_recycled", {
// Revive an entry to live. This is a specialised function, and draws a lot of
// inspiration from modify.
//
//
// Access is granted by the ability to ability to search the class=recycled
// and the ability modify + remove that class from the object.
// create the modify for access testing.
// tl;dr, remove the class=recycled // tl;dr, remove the class=recycled
let modlist = ModifyList::new_list(vec![Modify::Removed( let modlist = ModifyList::new_list(vec![Modify::Removed(
AttrString::from("class"), AttrString::from("class"),
@ -1668,6 +1858,12 @@ impl<'a> QueryServerWriteTransaction<'a> {
trace!("modify: candidates -> {:?}", candidates); trace!("modify: candidates -> {:?}", candidates);
// Did any of the candidates now become masked?
if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) {
admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine.");
return Err(OperationError::AccessDenied);
}
// Pre mod plugins // Pre mod plugins
// We should probably supply the pre-post cands here. // We should probably supply the pre-post cands here.
Plugins::run_pre_modify(self, &mut candidates, me).map_err(|e| { Plugins::run_pre_modify(self, &mut candidates, me).map_err(|e| {
@ -1693,7 +1889,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
); );
OperationError::SchemaViolation(e) OperationError::SchemaViolation(e)
}) })
.map(Entry::seal) .map(|e| e.seal(&self.schema))
}) })
.collect(); .collect();
@ -1717,7 +1913,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Backend Modify // Backend Modify
self.be_txn self.be_txn
.modify(&pre_candidates, &norm_cand) .modify(&self.cid, &pre_candidates, &norm_cand)
.map_err(|e| { .map_err(|e| {
admin_error!("Modify operation failed (backend), {:?}", e); admin_error!("Modify operation failed (backend), {:?}", e);
e e
@ -1798,7 +1994,6 @@ impl<'a> QueryServerWriteTransaction<'a> {
}) })
} }
#[allow(clippy::cognitive_complexity)]
pub fn modify(&self, me: &ModifyEvent) -> Result<(), OperationError> { pub fn modify(&self, me: &ModifyEvent) -> Result<(), OperationError> {
spanned!("server::modify", { spanned!("server::modify", {
let mp = unsafe { self.modify_pre_apply(me)? }; let mp = unsafe { self.modify_pre_apply(me)? };
@ -1867,7 +2062,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
); );
OperationError::SchemaViolation(e) OperationError::SchemaViolation(e)
}) })
.map(Entry::seal) .map(|e| e.seal(&self.schema))
}) })
.collect(); .collect();
@ -1889,7 +2084,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Backend Modify // Backend Modify
self.be_txn self.be_txn
.modify(&pre_candidates, &norm_cand) .modify(&self.cid, &pre_candidates, &norm_cand)
.map_err(|e| { .map_err(|e| {
admin_error!("Modify operation failed (backend), {:?}", e); admin_error!("Modify operation failed (backend), {:?}", e);
e e
@ -1975,23 +2170,31 @@ impl<'a> QueryServerWriteTransaction<'a> {
.collect(); .collect();
candidates.iter_mut().try_for_each(|er| { candidates.iter_mut().try_for_each(|er| {
if let Some(vs) = er.get_ava_mut("name") { let nvs = if let Some(vs) = er.get_ava_set("name") {
if let Some(mut nvs) = vs.migrate_iutf8_iname()? { vs.migrate_iutf8_iname()?
std::mem::swap(&mut nvs, vs) } else {
} None
}; };
if let Some(vs) = er.get_ava_mut("domain_name") { if let Some(nvs) = nvs {
if let Some(mut nvs) = vs.migrate_iutf8_iname()? { er.set_ava_set("name", nvs)
std::mem::swap(&mut nvs, vs) }
}
let nvs = if let Some(vs) = er.get_ava_set("domain_name") {
vs.migrate_iutf8_iname()?
} else {
None
}; };
if let Some(nvs) = nvs {
er.set_ava_set("domain_name", nvs)
}
Ok(()) Ok(())
})?; })?;
// Schema check all. // Schema check all.
let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, SchemaError> = candidates let res: Result<Vec<Entry<EntrySealed, EntryCommitted>>, SchemaError> = candidates
.into_iter() .into_iter()
.map(|e| e.validate(&self.schema).map(Entry::seal)) .map(|e| e.validate(&self.schema).map(|e| e.seal(&self.schema)))
.collect(); .collect();
let norm_cand: Vec<Entry<_, _>> = match res { let norm_cand: Vec<Entry<_, _>> = match res {
@ -2004,12 +2207,11 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Write them back. // Write them back.
self.be_txn self.be_txn
.modify(&pre_candidates, &norm_cand) .modify(&self.cid, &pre_candidates, &norm_cand)
.map_err(|e| { .map_err(|e| {
admin_error!("migrate_2_to_3 modification failure -> {:?}", e); admin_error!("migrate_2_to_3 modification failure -> {:?}", e);
e e
}) })
// Complete // Complete
}) })
} }
@ -3229,6 +3431,7 @@ mod tests {
// First we setup some timestamps // First we setup some timestamps
let time_p1 = duration_from_epoch_now(); let time_p1 = duration_from_epoch_now();
let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2); let time_p2 = time_p1 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
let time_p3 = time_p2 + Duration::from_secs(CHANGELOG_MAX_AGE * 2);
let server_txn = server.write(time_p1); let server_txn = server.write(time_p1);
let admin = server_txn let admin = server_txn
@ -3254,20 +3457,40 @@ mod tests {
unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_ts.clone()) }; unsafe { DeleteEvent::new_impersonate_entry(admin.clone(), filt_i_ts.clone()) };
let se_ts = unsafe { SearchEvent::new_ext_impersonate_entry(admin, filt_i_ts.clone()) }; let se_ts = unsafe { SearchEvent::new_ext_impersonate_entry(admin, filt_i_ts.clone()) };
// First, create a tombstone // First, create an entry, then push it through the lifecycle.
let e_ts = entry_init!( let e_ts = entry_init!(
("class", Value::new_class("object")), ("class", Value::new_class("object")),
("class", Value::new_class("tombstone")), ("class", Value::new_class("person")),
("name", Value::new_iname("testperson1")),
( (
"uuid", "uuid",
Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid") Value::new_uuids("9557f49c-97a5-4277-a9a5-097d17eb8317").expect("uuid")
) ),
("description", Value::new_utf8s("testperson1")),
("displayname", Value::new_utf8s("testperson1"))
); );
let ce = CreateEvent::new_internal(vec![e_ts]); let ce = CreateEvent::new_internal(vec![e_ts]);
let cr = server_txn.create(&ce); let cr = server_txn.create(&ce);
assert!(cr.is_ok()); assert!(cr.is_ok());
let de_sin = unsafe {
DeleteEvent::new_internal_invalid(filter!(f_or!([f_eq(
"name",
PartialValue::new_iname("testperson1")
)])))
};
assert!(server_txn.delete(&de_sin).is_ok());
// Commit
assert!(server_txn.commit().is_ok());
// Now, establish enough time for the recycled items to be purged.
let server_txn = server.write(time_p2);
assert!(server_txn.purge_recycled().is_ok());
// Now test the tombstone properties.
// Can it be seen (external search) // Can it be seen (external search)
let r1 = server_txn.search(&se_ts).expect("search failed"); let r1 = server_txn.search(&se_ts).expect("search failed");
assert!(r1.is_empty()); assert!(r1.is_empty());
@ -3299,7 +3522,7 @@ mod tests {
assert!(server_txn.commit().is_ok()); assert!(server_txn.commit().is_ok());
// New txn, push the cid forward. // New txn, push the cid forward.
let server_txn = server.write(time_p2); let server_txn = server.write(time_p3);
// Now purge // Now purge
assert!(server_txn.purge_tombstones().is_ok()); assert!(server_txn.purge_tombstones().is_ok());
@ -3365,7 +3588,6 @@ mod tests {
let e1 = entry_init!( let e1 = entry_init!(
("class", Value::new_class("object")), ("class", Value::new_class("object")),
("class", Value::new_class("person")), ("class", Value::new_class("person")),
("class", Value::new_class("recycled")),
("name", Value::new_iname("testperson1")), ("name", Value::new_iname("testperson1")),
( (
"uuid", "uuid",
@ -3378,7 +3600,6 @@ mod tests {
let e2 = entry_init!( let e2 = entry_init!(
("class", Value::new_class("object")), ("class", Value::new_class("object")),
("class", Value::new_class("person")), ("class", Value::new_class("person")),
("class", Value::new_class("recycled")),
("name", Value::new_iname("testperson2")), ("name", Value::new_iname("testperson2")),
( (
"uuid", "uuid",
@ -3392,6 +3613,15 @@ mod tests {
let cr = server_txn.create(&ce); let cr = server_txn.create(&ce);
assert!(cr.is_ok()); assert!(cr.is_ok());
// Now we immediately delete these to force them to the correct state.
let de_sin = unsafe {
DeleteEvent::new_internal_invalid(filter!(f_or!([
f_eq("name", PartialValue::new_iname("testperson1")),
f_eq("name", PartialValue::new_iname("testperson2")),
])))
};
assert!(server_txn.delete(&de_sin).is_ok());
// Can it be seen (external search) // Can it be seen (external search)
let r1 = server_txn.search(&se_rc).expect("search failed"); let r1 = server_txn.search(&se_rc).expect("search failed");
assert!(r1.is_empty()); assert!(r1.is_empty());

View file

@ -132,17 +132,14 @@ impl ValueSetT for ValueSetCredential {
) )
} }
fn equal(&self, _other: &ValueSet) -> bool { fn equal(&self, other: &ValueSet) -> bool {
// Looks like we may not need this? // Looks like we may not need this?
false
/*
if let Some(other) = other.as_credential_map() { if let Some(other) = other.as_credential_map() {
&self.map == other &self.map == other
} else { } else {
debug_assert!(false); // debug_assert!(false);
false false
} }
*/
} }
fn merge(&mut self, other: &ValueSet) -> Result<(), OperationError> { fn merge(&mut self, other: &ValueSet) -> Result<(), OperationError> {

View file

@ -174,8 +174,12 @@ impl ValueSetRefer {
where where
T: IntoIterator<Item = Uuid>, T: IntoIterator<Item = Uuid>,
{ {
let set = iter.into_iter().collect(); let set: BTreeSet<_> = iter.into_iter().collect();
Some(Box::new(ValueSetRefer { set })) if set.is_empty() {
None
} else {
Some(Box::new(ValueSetRefer { set }))
}
} }
} }