20221219 sync polish (#1284)

This commit is contained in:
Firstyear 2022-12-21 09:53:57 +10:00 committed by GitHub
parent 9bf6a32a2a
commit 3760615ddf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
25 changed files with 1409 additions and 107 deletions

192
Cargo.lock generated
View file

@ -121,6 +121,15 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "ascii-canvas"
version = "3.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6"
dependencies = [
"term",
]
[[package]] [[package]]
name = "asn1-rs" name = "asn1-rs"
version = "0.3.1" version = "0.3.1"
@ -975,6 +984,17 @@ dependencies = [
"itertools 0.10.5", "itertools 0.10.5",
] ]
[[package]]
name = "cron"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ff76b51e4c068c52bfd2866e1567bee7c567ae8f24ada09fd4307019e25eab7"
dependencies = [
"chrono",
"nom 7.1.1",
"once_cell",
]
[[package]] [[package]]
name = "crossbeam" name = "crossbeam"
version = "0.8.2" version = "0.8.2"
@ -1042,6 +1062,12 @@ dependencies = [
"cfg-if 1.0.0", "cfg-if 1.0.0",
] ]
[[package]]
name = "crunchy"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]] [[package]]
name = "crypto-common" name = "crypto-common"
version = "0.1.6" version = "0.1.6"
@ -1283,6 +1309,12 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "diff"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
[[package]] [[package]]
name = "digest" name = "digest"
version = "0.8.1" version = "0.8.1"
@ -1320,6 +1352,16 @@ dependencies = [
"dirs-sys", "dirs-sys",
] ]
[[package]]
name = "dirs-next"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1"
dependencies = [
"cfg-if 1.0.0",
"dirs-sys-next",
]
[[package]] [[package]]
name = "dirs-sys" name = "dirs-sys"
version = "0.3.7" version = "0.3.7"
@ -1331,6 +1373,17 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "dirs-sys-next"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d"
dependencies = [
"libc",
"redox_users",
"winapi",
]
[[package]] [[package]]
name = "discard" name = "discard"
version = "1.0.4" version = "1.0.4"
@ -1360,6 +1413,15 @@ version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797"
[[package]]
name = "ena"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7402b94a93c24e742487327a7cd839dc9d36fec9de9fb25b09f2dae459f36c3"
dependencies = [
"log",
]
[[package]] [[package]]
name = "encode_unicode" name = "encode_unicode"
version = "0.3.6" version = "0.3.6"
@ -1468,6 +1530,12 @@ dependencies = [
"windows-sys 0.42.0", "windows-sys 0.42.0",
] ]
[[package]]
name = "fixedbitset"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]] [[package]]
name = "flate2" name = "flate2"
version = "1.0.24" version = "1.0.24"
@ -2247,8 +2315,10 @@ name = "kanidm-ipa-sync"
version = "1.1.0-alpha.11-dev" version = "1.1.0-alpha.11-dev"
dependencies = [ dependencies = [
"base64urlsafedata", "base64urlsafedata",
"chrono",
"clap", "clap",
"clap_complete", "clap_complete",
"cron",
"kanidm_client", "kanidm_client",
"kanidm_proto", "kanidm_proto",
"kanidmd_lib", "kanidmd_lib",
@ -2515,6 +2585,38 @@ dependencies = [
"log", "log",
] ]
[[package]]
name = "lalrpop"
version = "0.19.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b30455341b0e18f276fa64540aff54deafb54c589de6aca68659c63dd2d5d823"
dependencies = [
"ascii-canvas",
"atty",
"bit-set",
"diff",
"ena",
"itertools 0.10.5",
"lalrpop-util",
"petgraph",
"pico-args",
"regex",
"regex-syntax",
"string_cache",
"term",
"tiny-keccak",
"unicode-xid",
]
[[package]]
name = "lalrpop-util"
version = "0.19.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcf796c978e9b4d983414f4caedc9273aa33ee214c5b887bd55fde84c85d2dc4"
dependencies = [
"regex",
]
[[package]] [[package]]
name = "last-git-commit" name = "last-git-commit"
version = "0.2.0" version = "0.2.0"
@ -2544,7 +2646,7 @@ dependencies = [
[[package]] [[package]]
name = "ldap3_client" name = "ldap3_client"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/kanidm/ldap3.git#00188ddc176adefd93ab16f3d004b13326716e07" source = "git+https://github.com/kanidm/ldap3.git#5149451559dc027a7101d3d55b03b4c206c7a40d"
dependencies = [ dependencies = [
"base64 0.13.1", "base64 0.13.1",
"base64urlsafedata", "base64urlsafedata",
@ -2563,11 +2665,13 @@ dependencies = [
[[package]] [[package]]
name = "ldap3_proto" name = "ldap3_proto"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/kanidm/ldap3.git#00188ddc176adefd93ab16f3d004b13326716e07" source = "git+https://github.com/kanidm/ldap3.git#5149451559dc027a7101d3d55b03b4c206c7a40d"
dependencies = [ dependencies = [
"bytes", "bytes",
"lalrpop",
"lalrpop-util",
"lber", "lber",
"nom 7.1.1", "regex",
"tokio-util", "tokio-util",
"tracing", "tracing",
"uuid", "uuid",
@ -2836,6 +2940,12 @@ dependencies = [
"tempfile", "tempfile",
] ]
[[package]]
name = "new_debug_unreachable"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e4a24736216ec316047a1fc4252e27dabb04218aa4a3f37c6e7ddbf1f9782b54"
[[package]] [[package]]
name = "nom" name = "nom"
version = "2.2.1" version = "2.2.1"
@ -3179,6 +3289,25 @@ version = "2.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e"
[[package]]
name = "petgraph"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143"
dependencies = [
"fixedbitset",
"indexmap",
]
[[package]]
name = "phf_shared"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096"
dependencies = [
"siphasher",
]
[[package]] [[package]]
name = "phonenumber" name = "phonenumber"
version = "0.3.1+8.12.9" version = "0.3.1+8.12.9"
@ -3199,6 +3328,12 @@ dependencies = [
"thiserror", "thiserror",
] ]
[[package]]
name = "pico-args"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db8bcd96cb740d03149cbad5518db9fd87126a10ab519c011893b1754134c468"
[[package]] [[package]]
name = "pin-project" name = "pin-project"
version = "1.0.12" version = "1.0.12"
@ -3302,6 +3437,12 @@ version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de"
[[package]]
name = "precomputed-hash"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
[[package]] [[package]]
name = "proc-macro-crate" name = "proc-macro-crate"
version = "1.2.1" version = "1.2.1"
@ -3716,6 +3857,12 @@ dependencies = [
"nom 7.1.1", "nom 7.1.1",
] ]
[[package]]
name = "rustversion"
version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70"
[[package]] [[package]]
name = "ryu" name = "ryu"
version = "1.0.11" version = "1.0.11"
@ -4058,6 +4205,12 @@ dependencies = [
"event-listener", "event-listener",
] ]
[[package]]
name = "siphasher"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bd3e3206899af3f8b12af284fafc038cc1dc2b41d1b89dd17297221c5d225de"
[[package]] [[package]]
name = "sketching" name = "sketching"
version = "1.1.0-alpha.11-dev" version = "1.1.0-alpha.11-dev"
@ -4200,6 +4353,19 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0"
[[package]]
name = "string_cache"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "213494b7a2b503146286049378ce02b482200519accc31872ee8be91fa820a08"
dependencies = [
"new_debug_unreachable",
"once_cell",
"parking_lot",
"phf_shared",
"precomputed-hash",
]
[[package]] [[package]]
name = "strsim" name = "strsim"
version = "0.10.0" version = "0.10.0"
@ -4258,6 +4424,17 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "term"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f"
dependencies = [
"dirs-next",
"rustversion",
"winapi",
]
[[package]] [[package]]
name = "termcolor" name = "termcolor"
version = "1.1.3" version = "1.1.3"
@ -4471,6 +4648,15 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
dependencies = [
"crunchy",
]
[[package]] [[package]]
name = "tinytemplate" name = "tinytemplate"
version = "1.2.1" version = "1.2.1"

View file

@ -45,12 +45,13 @@ base64urlsafedata = "0.1.2"
bytes = "^1.3.0" bytes = "^1.3.0"
clap = { version = "^3.2", features = ["derive"] } clap = { version = "^3.2", features = ["derive"] }
clap_complete = "^3.2.5" clap_complete = "^3.2.5"
# Forced by saffron # Forced by saffron/cron
chrono = "^0.4.23" chrono = "^0.4.23"
compact_jwt = "^0.2.3" compact_jwt = "^0.2.3"
# compact_jwt = { path = "../compact_jwt" } # compact_jwt = { path = "../compact_jwt" }
concread = "^0.4.0" concread = "^0.4.0"
# concread = { path = "../concread" } # concread = { path = "../concread" }
cron = "0.12.0"
crossbeam = "0.8.1" crossbeam = "0.8.1"
criterion = "^0.4.0" criterion = "^0.4.0"
csv = "1.1.6" csv = "1.1.6"

38
examples/kanidm-ipa-sync Normal file
View file

@ -0,0 +1,38 @@
# The sync account token as generated by "system sync generate-token".
sync_token = "eyJhb..."
# A cron-like expression of when to run when in scheduled mode. The format is:
# sec min hour day of month month day of week year
#
# The default of this value is "0 */5 * * * * *" which means "run every 5 minutes".
# schedule = ""
# If you want to monitor the status of the scheduled sync tool (you should)
# then you can set a bind address here.
#
# If not set, defaults to no status listener.
# status_bind = ""
# The LDAP URI to FreeIPA. This MUST be LDAPS. You should connect to a unique single
# server in the IPA topology rather than via a load balancer or dns srv records. This
# is to prevent replication conflicts and issues due to how 389-ds content sync works.
ipa_uri = "ldaps://specific-server.ipa.dev.kanidm.com"
# Path to the IPA CA certificate in PEM format.
ipa_ca = "/path/to/kanidm-ipa-ca.pem"
# The DN of an account with content sync rights. By default cn=Directory Manager has
# this access.
ipa_sync_dn = "cn=Directory Manager"
ipa_sync_pw = "pi9aix6balaqu8Maerah"
# The basedn to examine.
ipa_sync_base_dn = "dc=ipa,dc=dev,dc=kanidm,dc=com"
# The sync tool can alter or exclude entries. These are mapped by their syncuuid
# (not their ipa-object-uuid). The syncuuid is derived from nsUniqueId in 389-ds.
# This is chosen oven DN because DN's can change with modrdn where nsUniqueId is
# immutable and requires an entry to be deleted and recreated.
[ac60034b-3498-11ed-a50d-919b4b1a5ec0]
# my-problematic-entry
exclude = true

View file

@ -1,4 +1,4 @@
dn: cn=Retro Changelog Plugin,cn=plugins,cn=config dn: cn=Retro Changelog Plugin,cn=plugins,cn=config
changetype: modify changetype: modify
add: nsslapd-include-suffix add: nsslapd-include-suffix
nsslapd-include-suffix: dc=dev,dc=kanidm,dc=com nsslapd-include-suffix: dc=ipa,dc=dev,dc=kanidm,dc=com

View file

@ -14,9 +14,11 @@ repository.workspace = true
[dependencies] [dependencies]
base64urlsafedata.workspace = true base64urlsafedata.workspace = true
clap = { workspace = true, features = ["derive", "env"] } clap = { workspace = true, features = ["derive", "env"] }
chrono.workspace = true
cron.workspace = true
kanidm_client.workspace = true kanidm_client.workspace = true
kanidm_proto.workspace = true kanidm_proto.workspace = true
tokio = { workspace = true, features = ["rt", "macros"] } tokio = { workspace = true, features = ["rt", "macros", "net"] }
tracing.workspace = true tracing.workspace = true
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }

View file

@ -6,6 +6,8 @@ use uuid::Uuid;
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct Config { pub struct Config {
pub sync_token: String, pub sync_token: String,
pub schedule: Option<String>,
pub status_bind: Option<String>,
pub ipa_uri: Url, pub ipa_uri: Url,
pub ipa_ca: String, pub ipa_ca: String,
pub ipa_sync_dn: String, pub ipa_sync_dn: String,

View file

@ -0,0 +1,10 @@
#[derive(Clone, Debug)]
pub enum SyncError {
ClientConfig,
LdapConn,
LdapAuth,
LdapSyncrepl,
SyncStatus,
SyncUpdate,
Preprocess,
}

View file

@ -12,12 +12,16 @@
#![allow(clippy::expect_used)] #![allow(clippy::expect_used)]
mod config; mod config;
mod error;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
use crate::config::{Config, EntryConfig}; use crate::config::{Config, EntryConfig};
use crate::error::SyncError;
use chrono::Utc;
use clap::Parser; use clap::Parser;
use cron::Schedule;
use std::collections::HashMap; use std::collections::HashMap;
use std::fs::metadata; use std::fs::metadata;
use std::fs::File; use std::fs::File;
@ -25,8 +29,17 @@ use std::io::Read;
use std::os::unix::fs::MetadataExt; use std::os::unix::fs::MetadataExt;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::str::FromStr; use std::str::FromStr;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::thread; use std::thread;
use std::time::Duration;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpListener;
use tokio::runtime; use tokio::runtime;
use tokio::sync::broadcast;
use tokio::time::sleep;
use tracing::{debug, error, info, warn}; use tracing::{debug, error, info, warn};
use tracing_subscriber::prelude::*; use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter}; use tracing_subscriber::{fmt, EnvFilter};
@ -84,14 +97,182 @@ async fn driver_main(opt: Opt) {
} }
}; };
// Do we need this? let expression = sync_config.schedule.as_deref().unwrap_or("0 */5 * * * * *");
// let cb = cb.connect_timeout(cfg.conn_timeout);
let schedule = match Schedule::from_str(expression) {
Ok(s) => s,
Err(_) => {
error!("Failed to parse cron schedule expression");
return;
}
};
if opt.schedule {
let last_op_status = Arc::new(AtomicBool::new(true));
let (broadcast_tx, mut broadcast_rx) = broadcast::channel(4);
let last_op_status_c = last_op_status.clone();
// Can we setup the socket for status?
let status_handle = if let Some(sb) = sync_config.status_bind.as_deref() {
// Can we bind?
let listener = match TcpListener::bind(sb).await {
Ok(l) => l,
Err(e) => {
error!(?e, "Failed to bind status socket");
return;
}
};
info!("Status listener is started on {:?}", sb);
// Detach a status listener.
let status_rx = broadcast_tx.subscribe();
Some(tokio::spawn(async move {
status_task(listener, status_rx, last_op_status_c).await
}))
} else {
warn!("No status listener configured, this will prevent you monitoring the sync tool");
None
};
// main driver loop
let driver_handle = tokio::spawn(async move {
loop {
let now = Utc::now();
let next_time = match schedule.after(&now).next() {
Some(v) => v,
None => {
error!("Failed to access any future scheduled events, terminating.");
break;
}
};
// If we don't do 1 + here we can trigger the event multiple times
// rapidly since we are in the same second.
let wait_seconds = 1 + (next_time - now).num_seconds() as u64;
info!("next sync on {}, wait_time = {}s", next_time, wait_seconds);
tokio::select! {
_ = broadcast_rx.recv() => {
// stop the event loop!
break;
}
_ = sleep(Duration::from_secs(wait_seconds)) => {
info!("starting sync ...");
match run_sync(cb.clone(), &sync_config, &opt).await {
Ok(_) => last_op_status.store(true, Ordering::Relaxed),
Err(e) => {
error!(?e, "sync completed with error");
last_op_status.store(false, Ordering::Relaxed)
}
};
}
}
}
info!("Stopped sync driver");
});
// Block on signals now.
loop {
tokio::select! {
Ok(()) = tokio::signal::ctrl_c() => {
break
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::terminate();
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
break
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::alarm();
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::hangup();
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined1();
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined2();
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
}
}
broadcast_tx
.send(true)
.expect("Failed to trigger a clean shutdown!");
let _ = driver_handle.await;
if let Some(sh) = status_handle {
let _ = sh.await;
}
} else {
if let Err(e) = run_sync(cb, &sync_config, &opt).await {
error!(?e, "Sync completed with error");
};
}
}
async fn status_task(
listener: TcpListener,
mut status_rx: broadcast::Receiver<bool>,
last_op_status: Arc<AtomicBool>,
) {
loop {
tokio::select! {
_ = status_rx.recv() => {
break;
}
maybe_sock = listener.accept() => {
let mut stream = match maybe_sock {
Ok((sock, addr)) => {
debug!("accept from {:?}", addr);
sock
}
Err(e) => {
error!(?e, "Failed to accept status connection");
continue;
}
};
let sr = if last_op_status.load(Ordering::Relaxed) {
stream.write_all(b"Ok\n").await
} else {
stream.write_all(b"Err\n").await
};
if let Err(e) = sr {
error!(?e, "Failed to send status");
}
}
}
}
info!("Stopped status task");
}
async fn run_sync(
cb: KanidmClientBuilder,
sync_config: &Config,
opt: &Opt,
) -> Result<(), SyncError> {
let rsclient = match cb.build() { let rsclient = match cb.build() {
Ok(rsc) => rsc, Ok(rsc) => rsc,
Err(_e) => { Err(_e) => {
error!("Failed to build async client"); error!("Failed to build async client");
return; return Err(SyncError::ClientConfig);
} }
}; };
@ -99,7 +280,6 @@ async fn driver_main(opt: Opt) {
// Preflight check. // Preflight check.
// * can we connect to ipa? // * can we connect to ipa?
let mut ipa_client = match LdapClientBuilder::new(&sync_config.ipa_uri) let mut ipa_client = match LdapClientBuilder::new(&sync_config.ipa_uri)
.add_tls_ca(&sync_config.ipa_ca) .add_tls_ca(&sync_config.ipa_ca)
.build() .build()
@ -108,7 +288,7 @@ async fn driver_main(opt: Opt) {
Ok(lc) => lc, Ok(lc) => lc,
Err(e) => { Err(e) => {
error!(?e, "Failed to connect to freeipa"); error!(?e, "Failed to connect to freeipa");
return; return Err(SyncError::LdapConn);
} }
}; };
@ -124,7 +304,7 @@ async fn driver_main(opt: Opt) {
} }
Err(e) => { Err(e) => {
error!(?e, "Failed to bind (authenticate) to freeipa"); error!(?e, "Failed to bind (authenticate) to freeipa");
return; return Err(SyncError::LdapAuth);
} }
}; };
@ -134,7 +314,7 @@ async fn driver_main(opt: Opt) {
Ok(s) => s, Ok(s) => s,
Err(e) => { Err(e) => {
error!(?e, "Failed to access scim sync status"); error!(?e, "Failed to access scim sync status");
return; return Err(SyncError::SyncStatus);
} }
}; };
@ -184,13 +364,13 @@ async fn driver_main(opt: Opt) {
debug!(ipa_sync_base_dn = ?sync_config.ipa_sync_base_dn, ?cookie, ?mode, ?filter); debug!(ipa_sync_base_dn = ?sync_config.ipa_sync_base_dn, ?cookie, ?mode, ?filter);
let sync_result = match ipa_client let sync_result = match ipa_client
.syncrepl(sync_config.ipa_sync_base_dn, filter, cookie, mode) .syncrepl(sync_config.ipa_sync_base_dn.clone(), filter, cookie, mode)
.await .await
{ {
Ok(results) => results, Ok(results) => results,
Err(e) => { Err(e) => {
error!(?e, "Failed to perform syncrepl from ipa"); error!(?e, "Failed to perform syncrepl from ipa");
return; return Err(SyncError::LdapSyncrepl);
} }
}; };
@ -211,7 +391,7 @@ async fn driver_main(opt: Opt) {
.await .await
{ {
Ok(ssr) => ssr, Ok(ssr) => ssr,
Err(()) => return, Err(()) => return Err(SyncError::Preprocess),
}; };
if opt.proto_dump { if opt.proto_dump {
@ -220,17 +400,21 @@ async fn driver_main(opt: Opt) {
if let Err(e) = serde_json::to_writer_pretty(stdout, &scim_sync_request) { if let Err(e) = serde_json::to_writer_pretty(stdout, &scim_sync_request) {
error!(?e, "Failed to serialise scim sync request"); error!(?e, "Failed to serialise scim sync request");
}; };
Ok(())
} else if opt.dry_run { } else if opt.dry_run {
info!("dry-run complete"); info!("dry-run complete");
info!("Success!"); info!("Success!");
Ok(())
} else { } else {
if let Err(e) = rsclient.scim_v1_sync_update(&scim_sync_request).await { if let Err(e) = rsclient.scim_v1_sync_update(&scim_sync_request).await {
error!( error!(
?e, ?e,
"Failed to submit scim sync update - see the kanidmd server log for more details." "Failed to submit scim sync update - see the kanidmd server log for more details."
); );
Err(SyncError::SyncUpdate)
} else { } else {
info!("Success!"); info!("Success!");
Ok(())
} }
} }
// done! // done!

View file

@ -31,6 +31,11 @@ pub struct Opt {
#[clap(short = 'n')] #[clap(short = 'n')]
pub dry_run: bool, pub dry_run: bool,
/// Run in scheduled mode, where the sync tool will periodically attempt to sync between
/// FreeIPA and Kanidm.
#[clap(long = "schedule")]
pub schedule: bool,
/// Skip the root user permission check. /// Skip the root user permission check.
#[clap(short, long, hide = true)] #[clap(short, long, hide = true)]
pub skip_root_check: bool, pub skip_root_check: bool,

View file

@ -31,6 +31,11 @@
- [RADIUS](integrations/radius.md) - [RADIUS](integrations/radius.md)
- [LDAP](integrations/ldap.md) - [LDAP](integrations/ldap.md)
# Synchronisation
- [Concepts](sync/concepts.md)
- [FreeIPA](sync/freeipa.md)
# Integration Examples # Integration Examples
- [Kubernetes Ingress](examples/k8s_ingress_example.md) - [Kubernetes Ingress](examples/k8s_ingress_example.md)

View file

@ -1,8 +1,7 @@
# Client tools # Client tools
To interact with Kanidm as an administrator, you'll need to use our command To interact with Kanidm as an administrator, you'll need to use our command
line tools. If you haven't installed them yet, line tools. If you haven't installed them yet, [install them now](installing_client_tools.md).
[install them now](installing_client_tools.mdc).
## Kanidm configuration ## Kanidm configuration

View file

@ -0,0 +1,117 @@
# Synchronisation Concepts
## Introduction
In some environments Kanidm may be the first Identity Management system introduced. However many
existing environments have existing IDM systems that are well established and in use. To allow
Kanidm to work with these, it is possible to synchronised data between these IDM systems.
Currently Kanidm can consume (import) data from another IDM system. There are two major use cases
for this:
* Running Kanidm in parallel with another IDM system
* Migrating from an existing IDM to Kanidm
An incoming IDM data source is bound to Kanidm by a sync account. All synchronised entries will
have a reference to the sync account that they came from defined by their "sync parent uuid".
While an entry is owned by a sync account we refer to the sync account as having authority over
the content of that entry.
The sync process is driven by a sync tool. This tool extracts the current state of the sync from
Kanidm, requests the set of changes (differences) from the IDM source, and then submits these
changes to Kanidm. Kanidm will update and apply these changes and commit the new sync state on
success.
In the event of a conflict or data import error, Kanidm will halt and rollback the synchronisation
to the last good state. The sync tool should be reconfigured to exclude the conflicting entry or
to remap it's properties to resolve the conflict. The operation can then be retried.
This process can continue long term to allow Kanidm to operate in parallel to another IDM system. If
this is for a migration however, the sync account can be finalised. This terminates the sync account
and removes the sync parent uuid from all synchronised entries, moving authority of the entry into
Kanidm.
Alternatelly, the sync account can be terminated which removes all synchronised content that was submitted.
## Creating a Sync Account
Creating a sync account requires administration permissions. By default this is available to
members of the "system\_admins" group which "admin" is a memberof by default.
kanidm system sync create <sync account name>
kanidm system sync create ipasync
Once the sync account is created you can then generate the sync token which identifies the
sync tool.
kanidm system sync generate-token <sync account name> <token label>
kanidm system sync generate-token ipasync mylabel
token: eyJhbGci...
{{#template
../templates/kani-warning.md
imagepath=../images
title=Warning!
text=The sync account token has a high level of privilege, able to create new accounts and groups. It should be treated carefully as a result!
}}
If you need to revoke the token, you can do so with:
kanidm system sync destroy-token <sync account name>
kanidm system sync destroy-token ipasync
Destroying the token does NOT affect the state of the sync account and it's synchronised entries. Creating
a new token and providing that to the sync tool will continue the sync process.
## Operating the Sync Tool
The sync tool can now be run to replicate entries from the external IDM system into Kanidm.
You should refer to the chapter for the specific external IDM system you are using for details on
the sync tool configuration.
The sync tool runs in batches, meaning that changes from the source IDM service will be delayed to
appear into Kanidm. This is affected by how frequently you choose to run the sync tool.
If the sync tool fails, you can investigate details in the Kanidmd server output.
The sync tool can run "indefinetly" if you wish for Kanidm to always import data from the external
source.
## Finalisting the Sync Account
If you are performing a migration from an external IDM to Kanidm, when that migration is completed
you can nominate that Kanidm now owns all of the imported data. This is achieved by finalising the
sync account.
{{#template
../templates/kani-warning.md
imagepath=../images
title=Warning!
text=You can not undo this operation. Once you have finalised an agreement, Kanidm owns all of the synchronised data, and you can not resume synchronisation.
}}
kanidm system sync finalise <sync account name>
kanidm system sync finalise ipasync
# Do you want to continue? This operation can NOT be undone. [y/N]
Once finalised, imported accounts can now be fully managed by Kanidm.
## Terminating the Sync Account
If you decide to cease importing accounts or need to remove all imported accounts from a sync
account, you can choose to terminate the agreement removing all data that was imported.
{{#template
../templates/kani-warning.md
imagepath=../images
title=Warning!
text=You can not undo this operation. Once you have terminated an agreement, Kanidm deletes all of the synchronised data, and you can not resume synchronisation.
}}
kanidm system sync terminate <sync account name>
kanidm system sync terminate ipasync
# Do you want to continue? This operation can NOT be undone. [y/N]
Once terminated all imported data will be deleted by Kanidm.

View file

@ -0,0 +1,68 @@
# Synchronising from FreeIPA
FreeIPA is a popular opensource LDAP and Kerberos provider, aiming to be "Active Directory" for
Linux.
Kanidm is able to synchronise from FreeIPA for the purposes of coexistence or migration.
## Installing the FreeIPA Sync Tool
See [installing the client tools](../installing_client_tools.md).
## Configure the FreeIPA Sync Tool
The sync tool is a bridge between FreeIPA and Kanidm, meaning that the tool must be configured to
communicate to both sides.
Like other components of Kanidm, the FreeIPA sync tool will read your /etc/kanidm/config if present
to understand how to connect to Kanidm.
The sync tool specific components are configured in it's own configuration file.
```
{{#rustdoc_include ../../../examples/kanidm-ipa-sync}}
```
This example is located in [examples/kanidm-ipa-sync](https://github.com/kanidm/kanidm/blob/master/examples/kanidm-ipa-sync).
In addition to this, you must make some configuration changes to FreeIPA to enable synchronisation.
You must modify the retro changelog plugin to include the full scope of the database suffix.
```
{{#rustdoc_include ../../../iam_migrations/freeipa/00config-mod.ldif}}
```
You must then restart your FreeIPA server.
## Running the Sync Tool Manually
You can perform a dry run with the sync tool manually to check your configurations are
correct.
kanidm-ipa-sync [-c /path/to/kanidm/config] -i /path/to/kanidm-ipa-sync -n
kanidm-ipa-sync -i /etc/kanidm/ipa-sync -n
## Running the Sync Tool Automatically
The sync tool can be run on a schedule if you configure the `schedule` parameter, and provide
the option "--schedule" on the cli
kanidm-ipa-sync [-c /path/to/kanidm/config] -i /path/to/kanidm-ipa-sync --schedule
## Monitoring the Sync Tool
When running in schedule mode, you may wish to monitor the sync tool for failures. Since failures
block the sync process, this is important to ensuring a smooth and reliable synchronisation process.
You can configure a status listener that can be monitored via tcp with the parameter `status_bind`.
An example of monitoring this with netcat is:
# status_bind = "[::1]:12345"
# nc ::1 12345
Ok
It's important to note no details are revealed via the status socket, and is purely for Ok or Err status
of the last sync.

View file

@ -63,4 +63,14 @@ impl KanidmClient {
self.perform_patch_request(format!("/v1/sync_account/{}", id).as_str(), update_entry) self.perform_patch_request(format!("/v1/sync_account/{}", id).as_str(), update_entry)
.await .await
} }
pub async fn idm_sync_account_finalise(&self, id: &str) -> Result<(), ClientError> {
self.perform_get_request(format!("/v1/sync_account/{}/_finalise", id).as_str())
.await
}
pub async fn idm_sync_account_terminate(&self, id: &str) -> Result<(), ClientError> {
self.perform_get_request(format!("/v1/sync_account/{}/_terminate", id).as_str())
.await
}
} }

View file

@ -48,11 +48,18 @@ fi
WORKDIR /usr/src/kanidm/ WORKDIR /usr/src/kanidm/
# build the CLI # build the CLI
RUN if [ -z "${KANIDM_FEATURES}" ]; then \ RUN if [ -z "${KANIDM_FEATURES}" ]; then \
cargo build -p kanidm_tools --bin kanidm ${KANIDM_BUILD_OPTIONS} \ cargo build --bin kanidm ${KANIDM_BUILD_OPTIONS} \
--target-dir="/usr/src/kanidm/target/" \
--release; \
cargo build --bin kanidm-ipa-sync ${KANIDM_BUILD_OPTIONS} \
--target-dir="/usr/src/kanidm/target/" \ --target-dir="/usr/src/kanidm/target/" \
--release; \ --release; \
else \ else \
cargo build -p kanidm_tools --bin kanidm ${KANIDM_BUILD_OPTIONS} \ cargo build --bin kanidm ${KANIDM_BUILD_OPTIONS} \
--target-dir="/usr/src/kanidm/target/" \
--features="${KANIDM_FEATURES}" \
--release; \
cargo build --bin kanidm-ipa-sync ${KANIDM_BUILD_OPTIONS} \
--target-dir="/usr/src/kanidm/target/" \ --target-dir="/usr/src/kanidm/target/" \
--features="${KANIDM_FEATURES}" \ --features="${KANIDM_FEATURES}" \
--release; \ --release; \
@ -69,7 +76,9 @@ RUN zypper install -y timezone busybox-adduser && \
zypper clean -a zypper clean -a
COPY --from=builder /usr/src/kanidm/target/release/kanidm /sbin/ COPY --from=builder /usr/src/kanidm/target/release/kanidm /sbin/
COPY --from=builder /usr/src/kanidm/target/release/kanidm-ipa-sync /sbin/
RUN chmod +x /sbin/kanidm RUN chmod +x /sbin/kanidm
RUN chmod +x /sbin/kanidm-ipa-sync
ENV RUST_BACKTRACE 1 ENV RUST_BACKTRACE 1
RUN adduser -D -H kanidm RUN adduser -D -H kanidm

View file

@ -1,4 +1,5 @@
use crate::SynchOpt; use crate::SynchOpt;
use dialoguer::Confirm;
impl SynchOpt { impl SynchOpt {
pub fn debug(&self) -> bool { pub fn debug(&self) -> bool {
@ -8,7 +9,9 @@ impl SynchOpt {
SynchOpt::Create { copt, .. } SynchOpt::Create { copt, .. }
| SynchOpt::GenerateToken { copt, .. } | SynchOpt::GenerateToken { copt, .. }
| SynchOpt::DestroyToken { copt, .. } | SynchOpt::DestroyToken { copt, .. }
| SynchOpt::ForceRefresh { copt, .. } => copt.debug, | SynchOpt::ForceRefresh { copt, .. }
| SynchOpt::Finalise { copt, .. }
| SynchOpt::Terminate { copt, .. } => copt.debug,
} }
} }
@ -71,6 +74,40 @@ impl SynchOpt {
Err(e) => error!("Error -> {:?}", e), Err(e) => error!("Error -> {:?}", e),
} }
} }
SynchOpt::Finalise { account_id, copt } => {
if !Confirm::new()
.default(false)
.with_prompt("Do you want to continue? This operation can NOT be undone.")
.interact()
.unwrap()
{
info!("No changes were made");
return;
}
let client = copt.to_client().await;
match client.idm_sync_account_finalise(account_id).await {
Ok(()) => println!("Success"),
Err(e) => error!("Error -> {:?}", e),
}
}
SynchOpt::Terminate { account_id, copt } => {
if !Confirm::new()
.default(false)
.with_prompt("Do you want to continue? This operation can NOT be undone.")
.interact()
.unwrap()
{
info!("No changes were made");
return;
}
let client = copt.to_client().await;
match client.idm_sync_account_terminate(account_id).await {
Ok(()) => println!("Success"),
Err(e) => error!("Error -> {:?}", e),
}
}
} }
} }
} }

View file

@ -759,6 +759,7 @@ pub enum SynchOpt {
#[clap(name = "description")] #[clap(name = "description")]
description: Option<String>, description: Option<String>,
}, },
/// Generate a bearer token for an IDM sync account
#[clap(name = "generate-token")] #[clap(name = "generate-token")]
GenerateToken { GenerateToken {
#[clap()] #[clap()]
@ -768,6 +769,7 @@ pub enum SynchOpt {
#[clap(flatten)] #[clap(flatten)]
copt: CommonOpt, copt: CommonOpt,
}, },
/// Destroy (revoke) the bearer token for an IDM sync account
#[clap(name = "destroy-token")] #[clap(name = "destroy-token")]
DestroyToken { DestroyToken {
#[clap()] #[clap()]
@ -784,7 +786,31 @@ pub enum SynchOpt {
account_id: String, account_id: String,
#[clap(flatten)] #[clap(flatten)]
copt: CommonOpt, copt: CommonOpt,
} },
/// Finalise and remove this sync account. This will transfer all synchronised entries into
/// the authority of Kanidm. This signals the end of a migration from an external IDM into
/// Kanidm. ⚠️ This action can NOT be undone. Once complete, it is most likely
/// that attempting to recreate a sync account from the same IDM will fail due to conflicting
/// entries that Kanidm now owns.
#[clap(name = "finalise")]
Finalise {
#[clap()]
account_id: String,
#[clap(flatten)]
copt: CommonOpt,
},
/// Terminate and remove this sync account. This will DELETE all entries that were imported
/// from the external IDM source. ⚠️ This action can NOT be undone, and will require you to
/// recreate the sync account if you
/// wish to re-import data. Recreating the sync account may fail until the recycle bin and
/// and tombstones are purged.
#[clap(name = "terminate")]
Terminate {
#[clap()]
account_id: String,
#[clap(flatten)]
copt: CommonOpt,
},
} }
#[derive(Debug, Subcommand)] #[derive(Debug, Subcommand)]
@ -807,7 +833,8 @@ pub enum SystemOpt {
#[clap(subcommand)] #[clap(subcommand)]
commands: DomainOpt, commands: DomainOpt,
}, },
#[clap(name = "sync", hide = true)] #[clap(name = "sync")]
/// Configure synchronisation from an external IDM system
Synch { Synch {
#[clap(subcommand)] #[clap(subcommand)]
commands: SynchOpt, commands: SynchOpt,

View file

@ -1,7 +1,9 @@
use kanidmd_lib::prelude::*; use kanidmd_lib::prelude::*;
use crate::{QueryServerReadV1, QueryServerWriteV1}; use crate::{QueryServerReadV1, QueryServerWriteV1};
use kanidmd_lib::idm::scim::{GenerateScimSyncTokenEvent, ScimSyncUpdateEvent}; use kanidmd_lib::idm::scim::{
GenerateScimSyncTokenEvent, ScimSyncFinaliseEvent, ScimSyncTerminateEvent, ScimSyncUpdateEvent,
};
use kanidmd_lib::idm::server::IdmServerTransaction; use kanidmd_lib::idm::server::IdmServerTransaction;
use kanidm_proto::scim_v1::{ScimSyncRequest, ScimSyncState}; use kanidm_proto::scim_v1::{ScimSyncRequest, ScimSyncState};
@ -80,6 +82,76 @@ impl QueryServerWriteV1 {
.and_then(|r| idms_prox_write.commit().map(|_| r)) .and_then(|r| idms_prox_write.commit().map(|_| r))
} }
#[instrument(
level = "info",
skip_all,
fields(uuid = ?eventid)
)]
pub async fn handle_sync_account_finalise(
&self,
uat: Option<String>,
uuid_or_name: String,
eventid: Uuid,
) -> Result<(), OperationError> {
let ct = duration_from_epoch_now();
let mut idms_prox_write = self.idms.proxy_write(ct).await;
let ident = idms_prox_write
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
.map_err(|e| {
admin_error!(err = ?e, "Invalid identity");
e
})?;
let target = idms_prox_write
.qs_write
.name_to_uuid(uuid_or_name.as_str())
.map_err(|e| {
admin_error!(err = ?e, "Error resolving id to target");
e
})?;
let sfe = ScimSyncFinaliseEvent { ident, target };
idms_prox_write
.scim_sync_finalise(&sfe)
.and_then(|r| idms_prox_write.commit().map(|_| r))
}
#[instrument(
level = "info",
skip_all,
fields(uuid = ?eventid)
)]
pub async fn handle_sync_account_terminate(
&self,
uat: Option<String>,
uuid_or_name: String,
eventid: Uuid,
) -> Result<(), OperationError> {
let ct = duration_from_epoch_now();
let mut idms_prox_write = self.idms.proxy_write(ct).await;
let ident = idms_prox_write
.validate_and_parse_token_to_ident(uat.as_deref(), ct)
.map_err(|e| {
admin_error!(err = ?e, "Invalid identity");
e
})?;
let target = idms_prox_write
.qs_write
.name_to_uuid(uuid_or_name.as_str())
.map_err(|e| {
admin_error!(err = ?e, "Error resolving id to target");
e
})?;
let ste = ScimSyncTerminateEvent { ident, target };
idms_prox_write
.scim_sync_terminate(&ste)
.and_then(|r| idms_prox_write.commit().map(|_| r))
}
#[instrument( #[instrument(
level = "info", level = "info",
skip_all, skip_all,

View file

@ -4,9 +4,7 @@ use kanidm_proto::scim_v1::ScimSyncRequest;
use kanidm_proto::v1::Entry as ProtoEntry; use kanidm_proto::v1::Entry as ProtoEntry;
use kanidmd_lib::prelude::*; use kanidmd_lib::prelude::*;
use super::v1::{ use super::v1::{json_rest_event_get, json_rest_event_get_id, json_rest_event_post};
json_rest_event_delete_id, json_rest_event_get, json_rest_event_get_id, json_rest_event_post,
};
pub async fn sync_account_get(req: tide::Request<AppState>) -> tide::Result { pub async fn sync_account_get(req: tide::Request<AppState>) -> tide::Result {
let filter = filter_all!(f_eq("class", PartialValue::new_class("sync_account"))); let filter = filter_all!(f_eq("class", PartialValue::new_class("sync_account")));
@ -23,11 +21,6 @@ pub async fn sync_account_id_get(req: tide::Request<AppState>) -> tide::Result {
json_rest_event_get_id(req, filter, None).await json_rest_event_get_id(req, filter, None).await
} }
pub async fn sync_account_id_delete(req: tide::Request<AppState>) -> tide::Result {
let filter = filter_all!(f_eq("class", PartialValue::new_class("sync_account")));
json_rest_event_delete_id(req, filter).await
}
pub async fn sync_account_id_patch(mut req: tide::Request<AppState>) -> tide::Result { pub async fn sync_account_id_patch(mut req: tide::Request<AppState>) -> tide::Result {
// Update a value / attrs // Update a value / attrs
let uat = req.get_current_uat(); let uat = req.get_current_uat();
@ -48,8 +41,7 @@ pub async fn sync_account_id_patch(mut req: tide::Request<AppState>) -> tide::Re
to_tide_response(res, hvalue) to_tide_response(res, hvalue)
} }
/* pub async fn sync_account_id_get_finalise(req: tide::Request<AppState>) -> tide::Result {
pub async fn sync_account_token_get(req: tide::Request<AppState>) -> tide::Result {
let uat = req.get_current_uat(); let uat = req.get_current_uat();
let uuid_or_name = req.get_url_param("id")?; let uuid_or_name = req.get_url_param("id")?;
@ -57,12 +49,25 @@ pub async fn sync_account_token_get(req: tide::Request<AppState>) -> tide::Resul
let res = req let res = req
.state() .state()
.qe_r_ref .qe_w_ref
.handle_service_account_api_token_get(uat, uuid_or_name, eventid) .handle_sync_account_finalise(uat, uuid_or_name, eventid)
.await;
to_tide_response(res, hvalue)
}
pub async fn sync_account_id_get_terminate(req: tide::Request<AppState>) -> tide::Result {
let uat = req.get_current_uat();
let uuid_or_name = req.get_url_param("id")?;
let (eventid, hvalue) = req.new_eventid();
let res = req
.state()
.qe_w_ref
.handle_sync_account_terminate(uat, uuid_or_name, eventid)
.await; .await;
to_tide_response(res, hvalue) to_tide_response(res, hvalue)
} }
*/
pub async fn sync_account_token_post(mut req: tide::Request<AppState>) -> tide::Result { pub async fn sync_account_token_post(mut req: tide::Request<AppState>) -> tide::Result {
let uat = req.get_current_uat(); let uat = req.get_current_uat();
@ -257,8 +262,15 @@ pub fn scim_route_setup(appserver: &mut tide::Route<'_, AppState>, routemap: &mu
sync_account_route sync_account_route
.at("/:id") .at("/:id")
.mapped_get(routemap, sync_account_id_get) .mapped_get(routemap, sync_account_id_get)
.mapped_patch(routemap, sync_account_id_patch) .mapped_patch(routemap, sync_account_id_patch);
.mapped_delete(routemap, sync_account_id_delete);
sync_account_route
.at("/:id/_finalise")
.mapped_get(routemap, sync_account_id_get_finalise);
sync_account_route
.at("/:id/_terminate")
.mapped_get(routemap, sync_account_id_get_terminate);
sync_account_route sync_account_route
.at("/:id/_sync_token") .at("/:id/_sync_token")

View file

@ -5,8 +5,10 @@ use std::fs;
use std::path::Path; use std::path::Path;
use chrono::Utc; use chrono::Utc;
use saffron::parse::{CronExpr, English}; use saffron::parse::{CronExpr, English};
use saffron::Cron; use saffron::Cron;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::time::{interval, sleep, Duration}; use tokio::time::{interval, sleep, Duration};

View file

@ -366,6 +366,7 @@ pub struct AccessEffectivePermission {
// I don't think we need this? The ident is implied by the requestor. // I don't think we need this? The ident is implied by the requestor.
// ident: Uuid, // ident: Uuid,
pub target: Uuid, pub target: Uuid,
pub delete: bool,
pub search: BTreeSet<AttrString>, pub search: BTreeSet<AttrString>,
pub modify_pres: BTreeSet<AttrString>, pub modify_pres: BTreeSet<AttrString>,
pub modify_rem: BTreeSet<AttrString>, pub modify_rem: BTreeSet<AttrString>,
@ -538,41 +539,41 @@ pub trait AccessControlsTransaction<'a> {
// For each entry // For each entry
let allowed_entries: Vec<Arc<EntrySealedCommitted>> = let allowed_entries: Vec<Arc<EntrySealedCommitted>> =
entries entries
.into_iter() .into_iter()
.filter(|e| { .filter(|e| {
// For each acp // For each acp
let allowed_attrs: BTreeSet<&str> = related_acp let allowed_attrs: BTreeSet<&str> = related_acp
.iter() .iter()
.filter_map(|(acs, f_res)| { .filter_map(|(acs, f_res)| {
// if it applies // if it applies
if e.entry_match_no_index(f_res) { if e.entry_match_no_index(f_res) {
security_access!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry matches acs"); security_access!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry matches acs");
// add search_attrs to allowed. // add search_attrs to allowed.
Some(acs.attrs.iter().map(|s| s.as_str())) Some(acs.attrs.iter().map(|s| s.as_str()))
} else { } else {
trace!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry DOES NOT match acs"); // should this be `security_access`? trace!(entry = ?e.get_uuid(), acs = %acs.acp.name, "entry DOES NOT match acs"); // should this be `security_access`?
None None
} }
}) })
.flatten() .flatten()
.collect(); .collect();
security_access!( security_access!(
requested = ?requested_attrs, requested = ?requested_attrs,
allows = ?allowed_attrs, allows = ?allowed_attrs,
"attributes", "attributes",
); );
// is attr set a subset of allowed set? // is attr set a subset of allowed set?
// true -> entry is allowed in result set // true -> entry is allowed in result set
// false -> the entry is not allowed to be searched by this entity, so is // false -> the entry is not allowed to be searched by this entity, so is
// excluded. // excluded.
let decision = requested_attrs.is_subset(&allowed_attrs); let decision = requested_attrs.is_subset(&allowed_attrs);
security_access!(?decision, "search attr decision"); security_access!(?decision, "search attr decision");
decision decision
}) })
.collect(); .collect();
if allowed_entries.is_empty() { if allowed_entries.is_empty() {
security_access!("denied ❌"); security_access!("denied ❌");
@ -1104,6 +1105,44 @@ pub trait AccessControlsTransaction<'a> {
Ok(r) Ok(r)
} }
#[instrument(level = "debug", name = "access::delete_related_acp", skip_all)]
fn delete_related_acp<'b>(
&'b self,
ident: &Identity,
) -> Vec<(&'b AccessControlDelete, Filter<FilterValidResolved>)> {
// Some useful references we'll use for the remainder of the operation
let delete_state = self.get_delete();
let acp_resolve_filter_cache = self.get_acp_resolve_filter_cache();
let related_acp: Vec<(&AccessControlDelete, _)> = delete_state
.iter()
.filter_map(|acs| {
if let Some(receiver) = acs.acp.receiver {
if ident.is_memberof(receiver) {
acs.acp
.targetscope
.resolve(ident, None, Some(acp_resolve_filter_cache))
.map_err(|e| {
admin_error!(
"A internal filter/event was passed for resolution!?!? {:?}",
e
);
e
})
.ok()
.map(|f_res| (acs, f_res))
} else {
None
}
} else {
None
}
})
.collect();
related_acp
}
#[instrument(level = "debug", name = "access::delete_allow_operation", skip_all)] #[instrument(level = "debug", name = "access::delete_allow_operation", skip_all)]
fn delete_allow_operation( fn delete_allow_operation(
&self, &self,
@ -1134,36 +1173,8 @@ pub trait AccessControlsTransaction<'a> {
} }
}; };
// Some useful references we'll use for the remainder of the operation
let delete_state = self.get_delete();
let acp_resolve_filter_cache = self.get_acp_resolve_filter_cache();
// Find the acps that relate to the caller. // Find the acps that relate to the caller.
let related_acp: Vec<(&AccessControlDelete, _)> = delete_state let related_acp = self.delete_related_acp(&de.ident);
.iter()
.filter_map(|acs| {
if let Some(receiver) = acs.acp.receiver {
if de.ident.is_memberof(receiver) {
acs.acp
.targetscope
.resolve(&de.ident, None, Some(acp_resolve_filter_cache))
.map_err(|e| {
admin_error!(
"A internal filter/event was passed for resolution!?!? {:?}",
e
);
e
})
.ok()
.map(|f_res| (acs, f_res))
} else {
None
}
} else {
None
}
})
.collect();
/* /*
related_acp.iter().for_each(|racp| { related_acp.iter().for_each(|racp| {
@ -1256,7 +1267,8 @@ pub trait AccessControlsTransaction<'a> {
// == modify == // == modify ==
let modify_related_acp: Vec<(&AccessControlModify, _)> = self.modify_related_acp(ident); let modify_related_acp = self.modify_related_acp(ident);
let delete_related_acp = self.delete_related_acp(ident);
/* /*
modify_related_acp.iter().for_each(|(racp, _)| { modify_related_acp.iter().for_each(|(racp, _)| {
@ -1323,8 +1335,23 @@ pub trait AccessControlsTransaction<'a> {
.flat_map(|acp| acp.classes.iter().cloned()) .flat_map(|acp| acp.classes.iter().cloned())
.collect(); .collect();
// == delete ==
let delete = delete_related_acp.iter().any(|(acd, f_res)| {
if e.entry_match_no_index(f_res) {
security_access!(
entry_uuid = ?e.get_uuid(),
acs = %acd.acp.name,
"entry matches acd"
);
true
} else {
false
}
});
AccessEffectivePermission { AccessEffectivePermission {
target: e.get_uuid(), target: e.get_uuid(),
delete,
search: search_effective, search: search_effective,
modify_pres, modify_pres,
modify_rem, modify_rem,
@ -2859,6 +2886,7 @@ mod tests {
vec![], vec![],
&r_set, &r_set,
vec![AccessEffectivePermission { vec![AccessEffectivePermission {
delete: false,
target: uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"), target: uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"),
search: btreeset![AttrString::from("name")], search: btreeset![AttrString::from("name")],
modify_pres: BTreeSet::new(), modify_pres: BTreeSet::new(),
@ -2899,6 +2927,7 @@ mod tests {
}], }],
&r_set, &r_set,
vec![AccessEffectivePermission { vec![AccessEffectivePermission {
delete: false,
target: uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"), target: uuid!("cc8e95b4-c24f-4d68-ba54-8bed76f63930"),
search: BTreeSet::new(), search: BTreeSet::new(),
modify_pres: btreeset![AttrString::from("name")], modify_pres: btreeset![AttrString::from("name")],

View file

@ -12,6 +12,7 @@ use crate::idm::server::{IdmServerProxyReadTransaction, IdmServerProxyWriteTrans
use crate::prelude::*; use crate::prelude::*;
use crate::value::Session; use crate::value::Session;
use crate::access::AccessControlsTransaction;
use crate::schema::{SchemaClass, SchemaTransaction}; use crate::schema::{SchemaClass, SchemaTransaction};
// Internals of a Scim Sync token // Internals of a Scim Sync token
@ -227,6 +228,257 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
} }
} }
pub struct ScimSyncFinaliseEvent {
pub ident: Identity,
pub target: Uuid,
}
impl<'a> IdmServerProxyWriteTransaction<'a> {
pub fn scim_sync_finalise(
&mut self,
sfe: &ScimSyncFinaliseEvent,
) -> Result<(), OperationError> {
// Get the target and ensure it's really a sync account
let entry = self
.qs_write
.internal_search_uuid(sfe.target)
.map_err(|e| {
admin_error!(?e, "Failed to search sync account");
e
})?;
let sync_account = SyncAccount::try_from_entry_rw(&entry).map_err(|e| {
admin_error!(?e, "Failed to covert sync account");
e
})?;
let sync_uuid = sync_account.uuid;
// Do we have permission to delete it?
let effective_perms = self
.qs_write
.get_accesscontrols()
.effective_permission_check(&sfe.ident, Some(BTreeSet::default()), &[entry])?;
let eperm = effective_perms.get(0).ok_or_else(|| {
admin_error!("Effective Permission check returned no results");
OperationError::InvalidState
})?;
if eperm.target != sync_account.uuid {
admin_error!("Effective Permission check target differs from requested entry uuid");
return Err(OperationError::InvalidEntryState);
}
// ⚠️ Assume that anything before this line is unauthorised, and after this line IS
// authorised!
//
// We do this check via effective permissions because a lot of the operations that
// follow will require permissions beyond what system admins have.
if !eperm.delete {
security_info!(
"Requestor {} does not have permission to delete sync account {}",
sfe.ident,
sync_account.name
);
return Err(OperationError::NotAuthorised);
}
// Referential integrity tries to assert that the reference to sync_parent_uuid is valid
// from within the recycle bin. To prevent this, we have to "finalise" first, transfer
// authority to kanidm, THEN we do the delete which breaks the reference requirement.
//
// Importantly, we have to do this for items that are in the recycle bin!
// First, get the set of uuids that exist. We need this so we have the set of uuids we'll
// be deleteing *at the end*.
let f_all_sync = filter_all!(f_and!([
f_eq("class", PVCLASS_SYNC_OBJECT.clone()),
f_eq("sync_parent_uuid", PartialValue::Refer(sync_uuid))
]));
// TODO: This could benefit from a search that only grabs uuids?
let existing_entries = self
.qs_write
.internal_search(f_all_sync.clone())
.map_err(|e| {
error!("Failed to determine existing entries set");
e
})?;
// This is the delete filter we need later.
let filter_or: Vec<_> = existing_entries
.iter()
.map(|e| f_eq("uuid", PartialValue::Uuid(e.get_uuid())))
.collect();
// We only need to delete the sync account itself.
let delete_filter = filter!(f_eq("uuid", PartialValue::Uuid(sync_uuid)));
if !filter_or.is_empty() {
// Now modify these to remove their sync related attributes.
let schema = self.qs_write.get_schema();
let sync_class = schema.get_classes().get("sync_object").ok_or_else(|| {
error!("Failed to access sync_object class, schema corrupt");
OperationError::InvalidState
})?;
let modlist = std::iter::once(Modify::Removed(
"class".into(),
PartialValue::new_class("sync_object"),
))
.chain(
sync_class
.may_iter()
.map(|aname| Modify::Purged(aname.clone())),
)
.collect();
let mods = ModifyList::new_list(modlist);
self.qs_write
.internal_modify(&f_all_sync, &mods)
.map_err(|e| {
error!("Failed to modify sync objects to grant authority to kanidm");
e
})?;
};
self.qs_write.internal_delete(&delete_filter).map_err(|e| {
error!(?e, "Failed to terminate sync account");
e
})
}
}
pub struct ScimSyncTerminateEvent {
pub ident: Identity,
pub target: Uuid,
}
impl<'a> IdmServerProxyWriteTransaction<'a> {
pub fn scim_sync_terminate(
&mut self,
ste: &ScimSyncTerminateEvent,
) -> Result<(), OperationError> {
// Get the target and ensure it's really a sync account
let entry = self
.qs_write
.internal_search_uuid(ste.target)
.map_err(|e| {
admin_error!(?e, "Failed to search sync account");
e
})?;
let sync_account = SyncAccount::try_from_entry_rw(&entry).map_err(|e| {
admin_error!(?e, "Failed to covert sync account");
e
})?;
let sync_uuid = sync_account.uuid;
// Do we have permission to delete it?
let effective_perms = self
.qs_write
.get_accesscontrols()
.effective_permission_check(&ste.ident, Some(BTreeSet::default()), &[entry])?;
let eperm = effective_perms.get(0).ok_or_else(|| {
admin_error!("Effective Permission check returned no results");
OperationError::InvalidState
})?;
if eperm.target != sync_account.uuid {
admin_error!("Effective Permission check target differs from requested entry uuid");
return Err(OperationError::InvalidEntryState);
}
// ⚠️ Assume that anything before this line is unauthorised, and after this line IS
// authorised!
//
// We do this check via effective permissions because a lot of the operations that
// follow will require permissions beyond what system admins have.
if !eperm.delete {
security_info!(
"Requestor {} does not have permission to delete sync account {}",
ste.ident,
sync_account.name
);
return Err(OperationError::NotAuthorised);
}
// Referential integrity tries to assert that the reference to sync_parent_uuid is valid
// from within the recycle bin. To prevent this, we have to "finalise" first, transfer
// authority to kanidm, THEN we do the delete which breaks the reference requirement.
//
// Importantly, we have to do this for items that are in the recycle bin!
// First, get the set of uuids that exist. We need this so we have the set of uuids we'll
// be deleteing *at the end*.
let f_all_sync = filter_all!(f_and!([
f_eq("class", PVCLASS_SYNC_OBJECT.clone()),
f_eq("sync_parent_uuid", PartialValue::Refer(sync_uuid))
]));
// TODO: This could benefit from a search that only grabs uuids?
let existing_entries = self
.qs_write
.internal_search(f_all_sync.clone())
.map_err(|e| {
error!("Failed to determine existing entries set");
e
})?;
// This is the delete filter we need later.
let filter_or: Vec<_> = existing_entries
.iter()
.map(|e| f_eq("uuid", PartialValue::Uuid(e.get_uuid())))
.collect();
let delete_filter = if filter_or.is_empty() {
// We only need to delete the sync account itself.
filter!(f_eq("uuid", PartialValue::Uuid(sync_uuid)))
} else {
// Now modify these to remove their sync related attributes.
let schema = self.qs_write.get_schema();
let sync_class = schema.get_classes().get("sync_object").ok_or_else(|| {
error!("Failed to access sync_object class, schema corrupt");
OperationError::InvalidState
})?;
let modlist = std::iter::once(Modify::Removed(
"class".into(),
PartialValue::new_class("sync_object"),
))
.chain(
sync_class
.may_iter()
.map(|aname| Modify::Purged(aname.clone())),
)
.collect();
let mods = ModifyList::new_list(modlist);
self.qs_write
.internal_modify(&f_all_sync, &mods)
.map_err(|e| {
error!("Failed to modify sync objects to grant authority to kanidm");
e
})?;
filter!(f_or!([
f_eq("uuid", PartialValue::Uuid(sync_uuid)),
f_or(filter_or)
]))
};
self.qs_write.internal_delete(&delete_filter).map_err(|e| {
error!(?e, "Failed to terminate sync account");
e
})
}
}
pub struct ScimSyncUpdateEvent { pub struct ScimSyncUpdateEvent {
pub ident: Identity, pub ident: Identity,
} }
@ -991,7 +1243,10 @@ mod tests {
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use super::{GenerateScimSyncTokenEvent, ScimSyncToken, ScimSyncUpdateEvent}; use super::{
GenerateScimSyncTokenEvent, ScimSyncFinaliseEvent, ScimSyncTerminateEvent, ScimSyncToken,
ScimSyncUpdateEvent,
};
use async_std::task; use async_std::task;
@ -2042,6 +2297,225 @@ mod tests {
}) })
} }
#[test]
fn test_idm_scim_sync_finalise_1() {
run_idm_test!(|_qs: &QueryServer,
idms: &IdmServer,
_idms_delayed: &mut IdmServerDelayed| {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let (sync_uuid, ident) = test_scim_sync_apply_setup_ident(&mut idms_prox_write, ct);
let sse = ScimSyncUpdateEvent { ident };
let changes =
serde_json::from_str(TEST_SYNC_SCIM_IPA_1).expect("failed to parse scim sync");
assert!(idms_prox_write.scim_sync_apply(&sse, &changes, ct).is_ok());
assert!(idms_prox_write.commit().is_ok());
// Finalise the sync account.
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let ident = idms_prox_write
.qs_write
.internal_search_uuid(UUID_ADMIN)
.map(Identity::from_impersonate_entry_readwrite)
.expect("Failed to get admin");
let sfe = ScimSyncFinaliseEvent {
ident,
target: sync_uuid,
};
idms_prox_write
.scim_sync_finalise(&sfe)
.expect("Failed to finalise sync account");
// Check that the entries still exists but now have no sync_object attached.
let testgroup = get_single_entry("testgroup", &mut idms_prox_write);
assert!(!testgroup.attribute_equality("class", &PVCLASS_SYNC_OBJECT));
let testposix = get_single_entry("testposix", &mut idms_prox_write);
assert!(!testposix.attribute_equality("class", &PVCLASS_SYNC_OBJECT));
let testexternal = get_single_entry("testexternal", &mut idms_prox_write);
assert!(!testexternal.attribute_equality("class", &PVCLASS_SYNC_OBJECT));
let testuser = get_single_entry("testuser", &mut idms_prox_write);
assert!(!testuser.attribute_equality("class", &PVCLASS_SYNC_OBJECT));
assert!(idms_prox_write.commit().is_ok());
})
}
#[test]
fn test_idm_scim_sync_finalise_2() {
run_idm_test!(|_qs: &QueryServer,
idms: &IdmServer,
_idms_delayed: &mut IdmServerDelayed| {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let (sync_uuid, ident) = test_scim_sync_apply_setup_ident(&mut idms_prox_write, ct);
let sse = ScimSyncUpdateEvent { ident };
let changes =
serde_json::from_str(TEST_SYNC_SCIM_IPA_1).expect("failed to parse scim sync");
assert!(idms_prox_write.scim_sync_apply(&sse, &changes, ct).is_ok());
// The difference in this test is that the refresh deletes some entries
// so the recycle bin case needs to be handled.
let changes = serde_json::from_str(TEST_SYNC_SCIM_IPA_REFRESH_1)
.expect("failed to parse scim sync");
assert!(idms_prox_write.scim_sync_apply(&sse, &changes, ct).is_ok());
assert!(idms_prox_write.commit().is_ok());
// Finalise the sync account.
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let ident = idms_prox_write
.qs_write
.internal_search_uuid(UUID_ADMIN)
.map(Identity::from_impersonate_entry_readwrite)
.expect("Failed to get admin");
let sfe = ScimSyncFinaliseEvent {
ident,
target: sync_uuid,
};
idms_prox_write
.scim_sync_finalise(&sfe)
.expect("Failed to finalise sync account");
// Check that the entries still exists but now have no sync_object attached.
let testgroup = get_single_entry("testgroup", &mut idms_prox_write);
assert!(!testgroup.attribute_equality("class", &PVCLASS_SYNC_OBJECT));
let testuser = get_single_entry("testuser", &mut idms_prox_write);
assert!(!testuser.attribute_equality("class", &PVCLASS_SYNC_OBJECT));
for iname in ["testposix", "testexternal"] {
trace!(%iname);
assert!(idms_prox_write
.qs_write
.internal_search(filter!(f_eq("name", PartialValue::new_iname(iname))))
.unwrap()
.is_empty());
}
assert!(idms_prox_write.commit().is_ok());
})
}
#[test]
fn test_idm_scim_sync_terminate_1() {
run_idm_test!(|_qs: &QueryServer,
idms: &IdmServer,
_idms_delayed: &mut IdmServerDelayed| {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let (sync_uuid, ident) = test_scim_sync_apply_setup_ident(&mut idms_prox_write, ct);
let sse = ScimSyncUpdateEvent { ident };
let changes =
serde_json::from_str(TEST_SYNC_SCIM_IPA_1).expect("failed to parse scim sync");
assert!(idms_prox_write.scim_sync_apply(&sse, &changes, ct).is_ok());
assert!(idms_prox_write.commit().is_ok());
// Terminate the sync account
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let ident = idms_prox_write
.qs_write
.internal_search_uuid(UUID_ADMIN)
.map(Identity::from_impersonate_entry_readwrite)
.expect("Failed to get admin");
let sfe = ScimSyncTerminateEvent {
ident,
target: sync_uuid,
};
idms_prox_write
.scim_sync_terminate(&sfe)
.expect("Failed to terminate sync account");
// Check that the entries no longer exist
for iname in ["testgroup", "testposix", "testexternal", "testuser"] {
trace!(%iname);
assert!(idms_prox_write
.qs_write
.internal_search(filter!(f_eq("name", PartialValue::new_iname(iname))))
.unwrap()
.is_empty());
}
assert!(idms_prox_write.commit().is_ok());
})
}
#[test]
fn test_idm_scim_sync_terminate_2() {
run_idm_test!(|_qs: &QueryServer,
idms: &IdmServer,
_idms_delayed: &mut IdmServerDelayed| {
let ct = Duration::from_secs(TEST_CURRENT_TIME);
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let (sync_uuid, ident) = test_scim_sync_apply_setup_ident(&mut idms_prox_write, ct);
let sse = ScimSyncUpdateEvent { ident };
let changes =
serde_json::from_str(TEST_SYNC_SCIM_IPA_1).expect("failed to parse scim sync");
assert!(idms_prox_write.scim_sync_apply(&sse, &changes, ct).is_ok());
// The difference in this test is that the refresh deletes some entries
// so the recycle bin case needs to be handled.
let changes = serde_json::from_str(TEST_SYNC_SCIM_IPA_REFRESH_1)
.expect("failed to parse scim sync");
assert!(idms_prox_write.scim_sync_apply(&sse, &changes, ct).is_ok());
assert!(idms_prox_write.commit().is_ok());
// Terminate the sync account
let mut idms_prox_write = task::block_on(idms.proxy_write(ct));
let ident = idms_prox_write
.qs_write
.internal_search_uuid(UUID_ADMIN)
.map(Identity::from_impersonate_entry_readwrite)
.expect("Failed to get admin");
let sfe = ScimSyncTerminateEvent {
ident,
target: sync_uuid,
};
idms_prox_write
.scim_sync_terminate(&sfe)
.expect("Failed to terminate sync account");
// Check that the entries no longer exist
for iname in ["testgroup", "testposix", "testexternal", "testuser"] {
trace!(%iname);
assert!(idms_prox_write
.qs_write
.internal_search(filter!(f_eq("name", PartialValue::new_iname(iname))))
.unwrap()
.is_empty());
}
assert!(idms_prox_write.commit().is_ok());
})
}
const TEST_SYNC_SCIM_IPA_1: &str = r#" const TEST_SYNC_SCIM_IPA_1: &str = r#"
{ {
"from_state": "Refresh", "from_state": "Refresh",

View file

@ -1672,7 +1672,6 @@ impl<'a> SchemaWriteTransaction<'a> {
uuid: UUID_SCHEMA_CLASS_SYNC_OBJECT, uuid: UUID_SCHEMA_CLASS_SYNC_OBJECT,
description: String::from("A class denoting that an entry is synchronised from an external source. This entry may not be modifiable."), description: String::from("A class denoting that an entry is synchronised from an external source. This entry may not be modifiable."),
systemmust: vec![ systemmust: vec![
AttrString::from("uuid"),
AttrString::from("sync_parent_uuid") AttrString::from("sync_parent_uuid")
], ],
systemmay: vec![ systemmay: vec![

View file

@ -131,7 +131,14 @@ impl<'a> QueryServerWriteTransaction<'a> {
.collect::<Result<Vec<EntryInvalidCommitted>, _>>()?; .collect::<Result<Vec<EntryInvalidCommitted>, _>>()?;
// Did any of the candidates now become masked? // Did any of the candidates now become masked?
if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) { if std::iter::zip(
pre_candidates
.iter()
.map(|e| e.mask_recycled_ts().is_none()),
candidates.iter().map(|e| e.mask_recycled_ts().is_none()),
)
.any(|(a, b)| a != b)
{
admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine."); admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine.");
return Err(OperationError::AccessDenied); return Err(OperationError::AccessDenied);
} }

View file

@ -1809,7 +1809,14 @@ impl<'a> QueryServerWriteTransaction<'a> {
trace!("modify: candidates -> {:?}", candidates); trace!("modify: candidates -> {:?}", candidates);
// Did any of the candidates now become masked? // Did any of the candidates now become masked?
if candidates.iter().any(|e| e.mask_recycled_ts().is_none()) { if std::iter::zip(
pre_candidates
.iter()
.map(|e| e.mask_recycled_ts().is_none()),
candidates.iter().map(|e| e.mask_recycled_ts().is_none()),
)
.any(|(a, b)| a != b)
{
admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine."); admin_warn!("Refusing to apply modifications that are attempting to bypass replication state machine.");
return Err(OperationError::AccessDenied); return Err(OperationError::AccessDenied);
} }