mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 12:37:00 +01:00
parent
19f9fde012
commit
a91bf55471
14
Cargo.lock
generated
14
Cargo.lock
generated
|
@ -2840,6 +2840,7 @@ dependencies = [
|
|||
"clap_complete",
|
||||
"cron",
|
||||
"kanidm_client",
|
||||
"kanidm_lib_file_permissions",
|
||||
"kanidm_proto",
|
||||
"kanidm_utils_users",
|
||||
"kanidmd_lib",
|
||||
|
@ -2864,6 +2865,7 @@ dependencies = [
|
|||
"clap_complete",
|
||||
"cron",
|
||||
"kanidm_client",
|
||||
"kanidm_lib_file_permissions",
|
||||
"kanidm_proto",
|
||||
"kanidm_utils_users",
|
||||
"kanidmd_lib",
|
||||
|
@ -3045,6 +3047,7 @@ dependencies = [
|
|||
"chrono",
|
||||
"compact_jwt",
|
||||
"cron",
|
||||
"filetime",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"http",
|
||||
|
@ -3090,7 +3093,6 @@ dependencies = [
|
|||
"dyn-clone",
|
||||
"enum-iterator",
|
||||
"fernet",
|
||||
"filetime",
|
||||
"futures",
|
||||
"hashbrown 0.14.1",
|
||||
"hex",
|
||||
|
@ -3126,7 +3128,6 @@ dependencies = [
|
|||
"tokio",
|
||||
"tokio-util",
|
||||
"toml",
|
||||
"touch",
|
||||
"tracing",
|
||||
"url",
|
||||
"urlencoding",
|
||||
|
@ -5367,15 +5368,6 @@ dependencies = [
|
|||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "touch"
|
||||
version = "0.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0ff985ccaedc537018a1b6c7f377d25e16d08aa1fcc5f8f4fba984c7e69cf09"
|
||||
dependencies = [
|
||||
"log",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.4.13"
|
||||
|
|
|
@ -195,7 +195,6 @@ tokio-openssl = "^0.6.3"
|
|||
tokio-util = "^0.7.9"
|
||||
|
||||
toml = "^0.5.11"
|
||||
touch = "^0.0.1"
|
||||
tracing = { version = "^0.1.37", features = [
|
||||
"max_level_trace",
|
||||
"release_max_level_debug",
|
||||
|
|
|
@ -22,6 +22,7 @@ bytes = { workspace = true }
|
|||
chrono = { workspace = true }
|
||||
compact_jwt = { workspace = true }
|
||||
cron = { workspace = true }
|
||||
filetime = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
futures-util = { workspace = true }
|
||||
http = { workspace = true }
|
||||
|
|
|
@ -30,7 +30,6 @@ use kanidmd_lib::{
|
|||
idm::server::{IdmServer, IdmServerTransaction},
|
||||
idm::serviceaccount::{DestroyApiTokenEvent, GenerateApiTokenEvent},
|
||||
modify::{Modify, ModifyInvalid, ModifyList},
|
||||
utils::duration_from_epoch_now,
|
||||
value::{PartialValue, Value},
|
||||
};
|
||||
|
||||
|
@ -1568,9 +1567,15 @@ impl QueryServerWriteV1 {
|
|||
.qs_write
|
||||
.purge_tombstones()
|
||||
.and_then(|_| idms_prox_write.commit());
|
||||
admin_info!(?res, "Purge tombstones result");
|
||||
#[allow(clippy::expect_used)]
|
||||
res.expect("Invalid Server State");
|
||||
|
||||
match res {
|
||||
Ok(()) => {
|
||||
debug!("Purge tombstone success");
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to purge tombstones");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
|
@ -1586,9 +1591,15 @@ impl QueryServerWriteV1 {
|
|||
.qs_write
|
||||
.purge_recycled()
|
||||
.and_then(|_| idms_prox_write.commit());
|
||||
admin_info!(?res, "Purge recycled result");
|
||||
#[allow(clippy::expect_used)]
|
||||
res.expect("Invalid Server State");
|
||||
|
||||
match res {
|
||||
Ok(()) => {
|
||||
debug!("Purge recyclebin success");
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to purge recyclebin");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_delayedaction(&self, da: DelayedAction) {
|
||||
|
|
|
@ -69,12 +69,13 @@ pub async fn kopid_middleware<B>(
|
|||
request.extensions_mut().insert(KOpId { eventid, uat });
|
||||
let mut response = next.run(request).await;
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
response.headers_mut().insert(
|
||||
"X-KANIDM-OPID",
|
||||
HeaderValue::from_str(&eventid.as_hyphenated().to_string())
|
||||
.expect("Failed to set X-KANIDM-OPID header in response!"),
|
||||
);
|
||||
// This conversion *should never* fail. If it does, rather than panic, we warn and
|
||||
// just don't put the id in the response.
|
||||
let _ = HeaderValue::from_str(&eventid.as_hyphenated().to_string())
|
||||
.map(|hv| response.headers_mut().insert("X-KANIDM-OPID", hv))
|
||||
.map_err(|err| {
|
||||
warn!(?err, "An invalid operation id was encountered");
|
||||
});
|
||||
|
||||
response
|
||||
}
|
||||
|
|
|
@ -33,11 +33,13 @@ mod https;
|
|||
mod interval;
|
||||
mod ldaps;
|
||||
mod repl;
|
||||
mod utils;
|
||||
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::utils::touch_file_or_quit;
|
||||
use compact_jwt::JwsSigner;
|
||||
use kanidm_proto::v1::OperationError;
|
||||
use kanidmd_lib::be::{Backend, BackendConfig, BackendTransaction, FsType};
|
||||
|
@ -45,7 +47,6 @@ use kanidmd_lib::idm::ldap::LdapServer;
|
|||
use kanidmd_lib::prelude::*;
|
||||
use kanidmd_lib::schema::Schema;
|
||||
use kanidmd_lib::status::StatusActor;
|
||||
use kanidmd_lib::utils::{duration_from_epoch_now, touch_file_or_quit};
|
||||
#[cfg(not(target_family = "windows"))]
|
||||
use libc::umask;
|
||||
|
||||
|
@ -106,7 +107,7 @@ async fn setup_qs_idms(
|
|||
config: &Configuration,
|
||||
) -> Result<(QueryServer, IdmServer, IdmServerDelayed, IdmServerAudit), OperationError> {
|
||||
// Create a query_server implementation
|
||||
let query_server = QueryServer::new(be, schema, config.domain.clone());
|
||||
let query_server = QueryServer::new(be, schema, config.domain.clone())?;
|
||||
|
||||
// TODO #62: Should the IDM parts be broken out to the IdmServer?
|
||||
// What's important about this initial setup here is that it also triggers
|
||||
|
@ -134,7 +135,7 @@ async fn setup_qs(
|
|||
config: &Configuration,
|
||||
) -> Result<QueryServer, OperationError> {
|
||||
// Create a query_server implementation
|
||||
let query_server = QueryServer::new(be, schema, config.domain.clone());
|
||||
let query_server = QueryServer::new(be, schema, config.domain.clone())?;
|
||||
|
||||
// TODO #62: Should the IDM parts be broken out to the IdmServer?
|
||||
// What's important about this initial setup here is that it also triggers
|
||||
|
@ -175,7 +176,13 @@ macro_rules! dbscan_setup_be {
|
|||
|
||||
pub fn dbscan_list_indexes_core(config: &Configuration) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_rotxn = be.read();
|
||||
let mut be_rotxn = match be.read() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to proceed, backend read transaction failure.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_rotxn.list_indexes() {
|
||||
Ok(mut idx_list) => {
|
||||
|
@ -192,7 +199,13 @@ pub fn dbscan_list_indexes_core(config: &Configuration) {
|
|||
|
||||
pub fn dbscan_list_id2entry_core(config: &Configuration) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_rotxn = be.read();
|
||||
let mut be_rotxn = match be.read() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to proceed, backend read transaction failure.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_rotxn.list_id2entry() {
|
||||
Ok(mut id_list) => {
|
||||
|
@ -214,7 +227,13 @@ pub fn dbscan_list_index_analysis_core(config: &Configuration) {
|
|||
|
||||
pub fn dbscan_list_index_core(config: &Configuration, index_name: &str) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_rotxn = be.read();
|
||||
let mut be_rotxn = match be.read() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to proceed, backend read transaction failure.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_rotxn.list_index_content(index_name) {
|
||||
Ok(mut idx_list) => {
|
||||
|
@ -231,7 +250,13 @@ pub fn dbscan_list_index_core(config: &Configuration, index_name: &str) {
|
|||
|
||||
pub fn dbscan_get_id2entry_core(config: &Configuration, id: u64) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_rotxn = be.read();
|
||||
let mut be_rotxn = match be.read() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to proceed, backend read transaction failure.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_rotxn.get_id2entry(id) {
|
||||
Ok((id, value)) => println!("{:>8}: {}", id, value),
|
||||
|
@ -243,7 +268,16 @@ pub fn dbscan_get_id2entry_core(config: &Configuration, id: u64) {
|
|||
|
||||
pub fn dbscan_quarantine_id2entry_core(config: &Configuration, id: u64) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_wrtxn = be.write();
|
||||
let mut be_wrtxn = match be.write() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(
|
||||
?err,
|
||||
"Unable to proceed, backend write transaction failure."
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_wrtxn
|
||||
.quarantine_entry(id)
|
||||
|
@ -260,7 +294,13 @@ pub fn dbscan_quarantine_id2entry_core(config: &Configuration, id: u64) {
|
|||
|
||||
pub fn dbscan_list_quarantined_core(config: &Configuration) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_rotxn = be.read();
|
||||
let mut be_rotxn = match be.read() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to proceed, backend read transaction failure.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_rotxn.list_quarantined() {
|
||||
Ok(mut id_list) => {
|
||||
|
@ -277,7 +317,16 @@ pub fn dbscan_list_quarantined_core(config: &Configuration) {
|
|||
|
||||
pub fn dbscan_restore_quarantined_core(config: &Configuration, id: u64) {
|
||||
let be = dbscan_setup_be!(config);
|
||||
let mut be_wrtxn = be.write();
|
||||
let mut be_wrtxn = match be.write() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(
|
||||
?err,
|
||||
"Unable to proceed, backend write transaction failure."
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
match be_wrtxn
|
||||
.restore_quarantined(id)
|
||||
|
@ -309,7 +358,14 @@ pub fn backup_server_core(config: &Configuration, dst_path: &str) {
|
|||
}
|
||||
};
|
||||
|
||||
let mut be_ro_txn = be.read();
|
||||
let mut be_ro_txn = match be.read() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(?err, "Unable to proceed, backend read transaction failure.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let r = be_ro_txn.backup(dst_path);
|
||||
match r {
|
||||
Ok(_) => info!("Backup success!"),
|
||||
|
@ -341,7 +397,16 @@ pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
|
|||
}
|
||||
};
|
||||
|
||||
let mut be_wr_txn = be.write();
|
||||
let mut be_wr_txn = match be.write() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(
|
||||
?err,
|
||||
"Unable to proceed, backend write transaction failure."
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let r = be_wr_txn.restore(dst_path).and_then(|_| be_wr_txn.commit());
|
||||
|
||||
if r.is_err() {
|
||||
|
@ -397,7 +462,16 @@ pub async fn reindex_server_core(config: &Configuration) {
|
|||
};
|
||||
|
||||
// Reindex only the core schema attributes to bootstrap the process.
|
||||
let mut be_wr_txn = be.write();
|
||||
let mut be_wr_txn = match be.write() {
|
||||
Ok(txn) => txn,
|
||||
Err(err) => {
|
||||
error!(
|
||||
?err,
|
||||
"Unable to proceed, backend write transaction failure."
|
||||
);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let r = be_wr_txn.reindex().and_then(|_| be_wr_txn.commit());
|
||||
|
||||
// Now that's done, setup a minimal qs and reindex from that.
|
||||
|
@ -534,7 +608,14 @@ pub async fn verify_server_core(config: &Configuration) {
|
|||
return;
|
||||
}
|
||||
};
|
||||
let server = QueryServer::new(be, schema_mem, config.domain.clone());
|
||||
|
||||
let server = match QueryServer::new(be, schema_mem, config.domain.clone()) {
|
||||
Ok(qs) => qs,
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to setup query server");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Run verifications.
|
||||
let r = server.verify().await;
|
||||
|
|
48
server/core/src/utils.rs
Normal file
48
server/core/src/utils.rs
Normal file
|
@ -0,0 +1,48 @@
|
|||
use filetime::FileTime;
|
||||
use std::fs::File;
|
||||
use std::io::ErrorKind;
|
||||
use std::path::PathBuf;
|
||||
use std::time::SystemTime;
|
||||
|
||||
pub fn touch_file_or_quit(file_path: &str) {
|
||||
/*
|
||||
Attempt to touch the file file_path, will quit the application if it fails for any reason.
|
||||
|
||||
Will also create a new file if it doesn't already exist.
|
||||
*/
|
||||
if PathBuf::from(file_path).exists() {
|
||||
let t = FileTime::from_system_time(SystemTime::now());
|
||||
match filetime::set_file_times(file_path, t, t) {
|
||||
Ok(_) => debug!(
|
||||
"Successfully touched existing file {}, can continue",
|
||||
file_path
|
||||
),
|
||||
Err(e) => {
|
||||
match e.kind() {
|
||||
ErrorKind::PermissionDenied => {
|
||||
// we bail here because you won't be able to write them back...
|
||||
error!("Permission denied writing to {}, quitting.", file_path)
|
||||
}
|
||||
_ => {
|
||||
error!(
|
||||
"Failed to write to {} due to error: {:?} ... quitting.",
|
||||
file_path, e
|
||||
)
|
||||
}
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match File::create(file_path) {
|
||||
Ok(_) => debug!("Successfully touched new file {}", file_path),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to write to {} due to error: {:?} ... quitting.",
|
||||
file_path, e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
|
@ -181,8 +181,29 @@ async fn submit_admin_req(path: &str, req: AdminTaskRequest, output_mode: Consol
|
|||
}
|
||||
}
|
||||
|
||||
#[tokio::main(flavor = "multi_thread")]
|
||||
async fn main() -> ExitCode {
|
||||
fn main() -> ExitCode {
|
||||
let maybe_rt = tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.thread_name("kanidmd-thread-pool")
|
||||
// .thread_stack_size(8 * 1024 * 1024)
|
||||
// If we want a hook for thread start.
|
||||
// .on_thread_start()
|
||||
// In future, we can stop the whole process if a panic occurs.
|
||||
// .unhandled_panic(tokio::runtime::UnhandledPanic::ShutdownRuntime)
|
||||
.build();
|
||||
|
||||
let rt = match maybe_rt {
|
||||
Ok(rt) => rt,
|
||||
Err(err) => {
|
||||
eprintln!("CRITICAL: Unable to start runtime! {:?}", err);
|
||||
return ExitCode::FAILURE;
|
||||
}
|
||||
};
|
||||
|
||||
rt.block_on(kanidm_main())
|
||||
}
|
||||
|
||||
async fn kanidm_main() -> ExitCode {
|
||||
// Read CLI args, determine what the user has asked us to do.
|
||||
let opt = KanidmdParser::parse();
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@ concread = { workspace = true }
|
|||
dyn-clone = { workspace = true }
|
||||
enum-iterator = { workspace = true }
|
||||
fernet = { workspace = true, features = ["fernet_danger_timestamps"] }
|
||||
filetime = { workspace = true }
|
||||
# futures-util = { workspace = true }
|
||||
hashbrown = { workspace = true }
|
||||
idlset = { workspace = true }
|
||||
|
@ -67,7 +66,6 @@ time = { workspace = true, features = ["serde", "std"] }
|
|||
tokio = { workspace = true, features = ["net", "sync", "time", "rt"] }
|
||||
tokio-util = { workspace = true, features = ["codec"] }
|
||||
toml = { workspace = true }
|
||||
touch = { workspace = true }
|
||||
nonempty = { workspace = true, features = ["serialize"] }
|
||||
|
||||
tracing = { workspace = true, features = ["attributes"] }
|
||||
|
|
|
@ -1172,7 +1172,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
|
|||
self.db.set_db_ts_max(ts)
|
||||
}
|
||||
|
||||
pub(crate) fn get_db_index_version(&self) -> i64 {
|
||||
pub(crate) fn get_db_index_version(&self) -> Result<i64, OperationError> {
|
||||
self.db.get_db_index_version()
|
||||
}
|
||||
|
||||
|
@ -1292,34 +1292,35 @@ impl IdlArcSqlite {
|
|||
self.name_cache.try_quiesce();
|
||||
}
|
||||
|
||||
pub fn read(&self) -> IdlArcSqliteReadTransaction {
|
||||
pub fn read(&self) -> Result<IdlArcSqliteReadTransaction, OperationError> {
|
||||
// IMPORTANT! Always take entrycache FIRST
|
||||
let entry_cache_read = self.entry_cache.read();
|
||||
let db_read = self.db.read()?;
|
||||
let idl_cache_read = self.idl_cache.read();
|
||||
let name_cache_read = self.name_cache.read();
|
||||
let allids_read = self.allids.read();
|
||||
let db_read = self.db.read();
|
||||
|
||||
IdlArcSqliteReadTransaction {
|
||||
Ok(IdlArcSqliteReadTransaction {
|
||||
db: db_read,
|
||||
entry_cache: entry_cache_read,
|
||||
idl_cache: idl_cache_read,
|
||||
name_cache: name_cache_read,
|
||||
allids: allids_read,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write(&self) -> IdlArcSqliteWriteTransaction {
|
||||
pub fn write(&self) -> Result<IdlArcSqliteWriteTransaction, OperationError> {
|
||||
// IMPORTANT! Always take entrycache FIRST
|
||||
let entry_cache_write = self.entry_cache.write();
|
||||
let db_write = self.db.write()?;
|
||||
let idl_cache_write = self.idl_cache.write();
|
||||
let name_cache_write = self.name_cache.write();
|
||||
let op_ts_max_write = self.op_ts_max.write();
|
||||
let allids_write = self.allids.write();
|
||||
let maxid_write = self.maxid.write();
|
||||
let db_write = self.db.write();
|
||||
let keyhandles_write = self.keyhandles.write();
|
||||
IdlArcSqliteWriteTransaction {
|
||||
|
||||
Ok(IdlArcSqliteWriteTransaction {
|
||||
db: db_write,
|
||||
entry_cache: entry_cache_write,
|
||||
idl_cache: idl_cache_write,
|
||||
|
@ -1328,7 +1329,7 @@ impl IdlArcSqlite {
|
|||
allids: allids_write,
|
||||
maxid: maxid_write,
|
||||
keyhandles: keyhandles_write,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -120,7 +120,7 @@ pub struct IdlSqliteWriteTransaction {
|
|||
db_name: &'static str,
|
||||
}
|
||||
|
||||
pub trait IdlSqliteTransaction {
|
||||
pub(crate) trait IdlSqliteTransaction {
|
||||
fn get_db_name(&self) -> &str;
|
||||
|
||||
fn get_conn(&self) -> Result<&Connection, OperationError>;
|
||||
|
@ -692,7 +692,11 @@ impl Drop for IdlSqliteReadTransaction {
|
|||
}
|
||||
|
||||
impl IdlSqliteReadTransaction {
|
||||
pub fn new(pool: ConnPool, conn: Connection, db_name: &'static str) -> Self {
|
||||
pub fn new(
|
||||
pool: ConnPool,
|
||||
conn: Connection,
|
||||
db_name: &'static str,
|
||||
) -> Result<Self, OperationError> {
|
||||
// Start the transaction
|
||||
//
|
||||
// I'm happy for this to be an expect, because this is a huge failure
|
||||
|
@ -700,14 +704,14 @@ impl IdlSqliteReadTransaction {
|
|||
// this a Result<>
|
||||
//
|
||||
// There is no way to flag this is an RO operation.
|
||||
#[allow(clippy::expect_used)]
|
||||
conn.execute("BEGIN DEFERRED TRANSACTION", [])
|
||||
.expect("Unable to begin transaction!");
|
||||
IdlSqliteReadTransaction {
|
||||
.map_err(sqlite_error)?;
|
||||
|
||||
Ok(IdlSqliteReadTransaction {
|
||||
pool,
|
||||
conn: Some(conn),
|
||||
db_name,
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -744,16 +748,19 @@ impl Drop for IdlSqliteWriteTransaction {
|
|||
}
|
||||
|
||||
impl IdlSqliteWriteTransaction {
|
||||
pub fn new(pool: ConnPool, conn: Connection, db_name: &'static str) -> Self {
|
||||
pub fn new(
|
||||
pool: ConnPool,
|
||||
conn: Connection,
|
||||
db_name: &'static str,
|
||||
) -> Result<Self, OperationError> {
|
||||
// Start the transaction
|
||||
#[allow(clippy::expect_used)]
|
||||
conn.execute("BEGIN EXCLUSIVE TRANSACTION", [])
|
||||
.expect("Unable to begin transaction!");
|
||||
IdlSqliteWriteTransaction {
|
||||
.map_err(sqlite_error)?;
|
||||
Ok(IdlSqliteWriteTransaction {
|
||||
pool,
|
||||
conn: Some(conn),
|
||||
db_name,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(level = "debug", name = "idl_sqlite::commit", skip_all)]
|
||||
|
@ -764,7 +771,6 @@ impl IdlSqliteWriteTransaction {
|
|||
std::mem::swap(&mut dropping, &mut self.conn);
|
||||
|
||||
if let Some(conn) = dropping {
|
||||
#[allow(clippy::expect_used)]
|
||||
conn.execute("COMMIT TRANSACTION", [])
|
||||
.map(|_| ())
|
||||
.map_err(|e| {
|
||||
|
@ -772,10 +778,12 @@ impl IdlSqliteWriteTransaction {
|
|||
OperationError::BackendEngine
|
||||
})?;
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
self.pool
|
||||
.lock()
|
||||
.expect("Unable to access db pool")
|
||||
.map_err(|err| {
|
||||
error!(?err, "Unable to return connection to pool");
|
||||
OperationError::BackendEngine
|
||||
})?
|
||||
.push_back(conn);
|
||||
|
||||
Ok(())
|
||||
|
@ -1513,11 +1521,9 @@ impl IdlSqliteWriteTransaction {
|
|||
|
||||
// ===== inner helpers =====
|
||||
// Some of these are not self due to use in new()
|
||||
fn get_db_version_key(&self, key: &str) -> i64 {
|
||||
#[allow(clippy::expect_used)]
|
||||
self.get_conn()
|
||||
.expect("Unable to access transaction connection")
|
||||
.query_row(
|
||||
fn get_db_version_key(&self, key: &str) -> Result<i64, OperationError> {
|
||||
self.get_conn().map(|conn| {
|
||||
conn.query_row(
|
||||
&format!(
|
||||
"SELECT version FROM {}.db_version WHERE id = :id",
|
||||
self.get_db_name()
|
||||
|
@ -1529,6 +1535,7 @@ impl IdlSqliteWriteTransaction {
|
|||
// The value is missing, default to 0.
|
||||
0
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
fn set_db_version_key(&self, key: &str, v: i64) -> Result<(), OperationError> {
|
||||
|
@ -1555,7 +1562,7 @@ impl IdlSqliteWriteTransaction {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) fn get_db_index_version(&self) -> i64 {
|
||||
pub(crate) fn get_db_index_version(&self) -> Result<i64, OperationError> {
|
||||
self.get_db_version_key(DBV_INDEXV)
|
||||
}
|
||||
|
||||
|
@ -1602,7 +1609,7 @@ impl IdlSqliteWriteTransaction {
|
|||
.map_err(sqlite_error)?;
|
||||
|
||||
// If the table is empty, populate the versions as 0.
|
||||
let mut dbv_id2entry = self.get_db_version_key(DBV_ID2ENTRY);
|
||||
let mut dbv_id2entry = self.get_db_version_key(DBV_ID2ENTRY)?;
|
||||
|
||||
trace!(%dbv_id2entry);
|
||||
|
||||
|
@ -1890,51 +1897,47 @@ impl IdlSqlite {
|
|||
}
|
||||
|
||||
pub(crate) fn get_allids_count(&self) -> Result<u64, OperationError> {
|
||||
#[allow(clippy::expect_used)]
|
||||
let guard = self.pool.lock().expect("Unable to lock connection pool.");
|
||||
let guard = self.pool.lock().map_err(|err| {
|
||||
error!(?err, "Unable to access connection to pool");
|
||||
OperationError::BackendEngine
|
||||
})?;
|
||||
// Get not pop here
|
||||
#[allow(clippy::expect_used)]
|
||||
let conn = guard
|
||||
.get(0)
|
||||
.expect("Unable to retrieve connection from pool.");
|
||||
let conn = guard.get(0).ok_or_else(|| {
|
||||
error!("Unable to retrieve connection from pool");
|
||||
OperationError::BackendEngine
|
||||
})?;
|
||||
|
||||
conn.query_row("select count(id) from id2entry", [], |row| row.get(0))
|
||||
.map_err(sqlite_error)
|
||||
}
|
||||
|
||||
pub fn read(&self) -> IdlSqliteReadTransaction {
|
||||
// When we make this async, this will allow us to backoff
|
||||
// when we miss-grabbing from the conn-pool.
|
||||
#[allow(clippy::expect_used)]
|
||||
let conn = self
|
||||
.pool
|
||||
.lock()
|
||||
.map_err(|e| {
|
||||
error!(err = ?e, "Unable to lock connection pool.");
|
||||
})
|
||||
.ok()
|
||||
.and_then(|mut q| {
|
||||
trace!(?q);
|
||||
q.pop_front()
|
||||
})
|
||||
.expect("Unable to retrieve connection from pool.");
|
||||
pub fn read(&self) -> Result<IdlSqliteReadTransaction, OperationError> {
|
||||
// This can't fail because we should only get here if a pool conn is available.
|
||||
let mut guard = self.pool.lock().map_err(|e| {
|
||||
error!(err = ?e, "Unable to lock connection pool.");
|
||||
OperationError::BackendEngine
|
||||
})?;
|
||||
|
||||
let conn = guard.pop_front().ok_or_else(|| {
|
||||
error!("Unable to retrieve connection from pool.");
|
||||
OperationError::BackendEngine
|
||||
})?;
|
||||
|
||||
IdlSqliteReadTransaction::new(self.pool.clone(), conn, self.db_name)
|
||||
}
|
||||
|
||||
pub fn write(&self) -> IdlSqliteWriteTransaction {
|
||||
#[allow(clippy::expect_used)]
|
||||
let conn = self
|
||||
.pool
|
||||
.lock()
|
||||
.map_err(|e| {
|
||||
error!(err = ?e, "Unable to lock connection pool.");
|
||||
})
|
||||
.ok()
|
||||
.and_then(|mut q| {
|
||||
trace!(?q);
|
||||
q.pop_front()
|
||||
})
|
||||
.expect("Unable to retrieve connection from pool.");
|
||||
pub fn write(&self) -> Result<IdlSqliteWriteTransaction, OperationError> {
|
||||
// This can't fail because we should only get here if a pool conn is available.
|
||||
let mut guard = self.pool.lock().map_err(|e| {
|
||||
error!(err = ?e, "Unable to lock connection pool.");
|
||||
OperationError::BackendEngine
|
||||
})?;
|
||||
|
||||
let conn = guard.pop_front().ok_or_else(|| {
|
||||
error!("Unable to retrieve connection from pool.");
|
||||
OperationError::BackendEngine
|
||||
})?;
|
||||
|
||||
IdlSqliteWriteTransaction::new(self.pool.clone(), conn, self.db_name)
|
||||
}
|
||||
}
|
||||
|
@ -1949,7 +1952,7 @@ mod tests {
|
|||
sketching::test_init();
|
||||
let cfg = BackendConfig::new_test("main");
|
||||
let be = IdlSqlite::new(&cfg, false).unwrap();
|
||||
let be_w = be.write();
|
||||
let be_w = be.write().unwrap();
|
||||
let r = be_w.verify();
|
||||
assert!(r.is_empty());
|
||||
}
|
||||
|
|
|
@ -32,9 +32,9 @@ use crate::repl::ruv::{
|
|||
};
|
||||
use crate::value::{IndexType, Value};
|
||||
|
||||
pub mod dbentry;
|
||||
pub mod dbrepl;
|
||||
pub mod dbvalue;
|
||||
pub(crate) mod dbentry;
|
||||
pub(crate) mod dbrepl;
|
||||
pub(crate) mod dbvalue;
|
||||
|
||||
mod idl_arc_sqlite;
|
||||
mod idl_sqlite;
|
||||
|
@ -1595,7 +1595,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
pub fn upgrade_reindex(&mut self, v: i64) -> Result<(), OperationError> {
|
||||
let dbv = self.get_db_index_version();
|
||||
let dbv = self.get_db_index_version()?;
|
||||
admin_debug!(?dbv, ?v, "upgrade_reindex");
|
||||
if dbv < v {
|
||||
limmediate_warning!(
|
||||
|
@ -1896,19 +1896,20 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
fn reset_db_s_uuid(&mut self) -> Result<Uuid, OperationError> {
|
||||
// The value is missing. Generate a new one and store it.
|
||||
let nsid = Uuid::new_v4();
|
||||
self.get_idlayer().write_db_s_uuid(nsid)?;
|
||||
self.get_idlayer().write_db_s_uuid(nsid).map_err(|err| {
|
||||
error!(?err, "Unable to persist server uuid");
|
||||
err
|
||||
})?;
|
||||
Ok(nsid)
|
||||
}
|
||||
|
||||
pub fn get_db_s_uuid(&mut self) -> Uuid {
|
||||
#[allow(clippy::expect_used)]
|
||||
match self
|
||||
.get_idlayer()
|
||||
.get_db_s_uuid()
|
||||
.expect("DBLayer Error!!!")
|
||||
{
|
||||
Some(s_uuid) => s_uuid,
|
||||
None => self.reset_db_s_uuid().expect("Failed to regenerate S_UUID"),
|
||||
pub fn get_db_s_uuid(&mut self) -> Result<Uuid, OperationError> {
|
||||
match self.get_idlayer().get_db_s_uuid().map_err(|err| {
|
||||
error!(?err, "Failed to read server uuid");
|
||||
err
|
||||
})? {
|
||||
Some(s_uuid) => Ok(s_uuid),
|
||||
None => self.reset_db_s_uuid(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1916,7 +1917,10 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
/// returning the new UUID
|
||||
fn reset_db_d_uuid(&mut self) -> Result<Uuid, OperationError> {
|
||||
let nsid = Uuid::new_v4();
|
||||
self.get_idlayer().write_db_d_uuid(nsid)?;
|
||||
self.get_idlayer().write_db_d_uuid(nsid).map_err(|err| {
|
||||
error!(?err, "Unable to persist domain uuid");
|
||||
err
|
||||
})?;
|
||||
Ok(nsid)
|
||||
}
|
||||
|
||||
|
@ -1927,15 +1931,13 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
/// This pulls the domain UUID from the database
|
||||
pub fn get_db_d_uuid(&mut self) -> Uuid {
|
||||
#[allow(clippy::expect_used)]
|
||||
match self
|
||||
.get_idlayer()
|
||||
.get_db_d_uuid()
|
||||
.expect("DBLayer Error retrieving Domain UUID!!!")
|
||||
{
|
||||
Some(d_uuid) => d_uuid,
|
||||
None => self.reset_db_d_uuid().expect("Failed to regenerate D_UUID"),
|
||||
pub fn get_db_d_uuid(&mut self) -> Result<Uuid, OperationError> {
|
||||
match self.get_idlayer().get_db_d_uuid().map_err(|err| {
|
||||
error!(?err, "Failed to read domain uuid");
|
||||
err
|
||||
})? {
|
||||
Some(d_uuid) => Ok(d_uuid),
|
||||
None => self.reset_db_d_uuid(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1951,7 +1953,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
fn get_db_index_version(&mut self) -> i64 {
|
||||
fn get_db_index_version(&mut self) -> Result<i64, OperationError> {
|
||||
self.get_idlayer().get_db_index_version()
|
||||
}
|
||||
|
||||
|
@ -2026,7 +2028,7 @@ impl Backend {
|
|||
// In this case we can use an empty idx meta because we don't
|
||||
// access any parts of
|
||||
// the indexing subsystem here.
|
||||
let mut idl_write = be.idlayer.write();
|
||||
let mut idl_write = be.idlayer.write()?;
|
||||
idl_write
|
||||
.setup()
|
||||
.and_then(|_| idl_write.commit())
|
||||
|
@ -2036,7 +2038,7 @@ impl Backend {
|
|||
})?;
|
||||
|
||||
// Now rebuild the ruv.
|
||||
let mut be_write = be.write();
|
||||
let mut be_write = be.write()?;
|
||||
be_write
|
||||
.ruv_reload()
|
||||
.and_then(|_| be_write.commit())
|
||||
|
@ -2057,41 +2059,21 @@ impl Backend {
|
|||
self.idlayer.try_quiesce();
|
||||
}
|
||||
|
||||
pub fn read(&self) -> BackendReadTransaction {
|
||||
BackendReadTransaction {
|
||||
idlayer: self.idlayer.read(),
|
||||
pub fn read(&self) -> Result<BackendReadTransaction, OperationError> {
|
||||
Ok(BackendReadTransaction {
|
||||
idlayer: self.idlayer.read()?,
|
||||
idxmeta: self.idxmeta.read(),
|
||||
ruv: self.ruv.read(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write(&self) -> BackendWriteTransaction {
|
||||
BackendWriteTransaction {
|
||||
idlayer: self.idlayer.write(),
|
||||
pub fn write(&self) -> Result<BackendWriteTransaction, OperationError> {
|
||||
Ok(BackendWriteTransaction {
|
||||
idlayer: self.idlayer.write()?,
|
||||
idxmeta_wr: self.idxmeta.write(),
|
||||
ruv: self.ruv.write(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Should this actually call the idlayer directly?
|
||||
pub fn reset_db_s_uuid(&self) -> Uuid {
|
||||
let mut wr = self.write();
|
||||
#[allow(clippy::expect_used)]
|
||||
let sid = wr
|
||||
.reset_db_s_uuid()
|
||||
.expect("unable to reset db server uuid");
|
||||
#[allow(clippy::expect_used)]
|
||||
wr.commit()
|
||||
.expect("Unable to commit to backend, can not proceed");
|
||||
sid
|
||||
}
|
||||
|
||||
/*
|
||||
pub fn get_db_s_uuid(&self) -> Uuid {
|
||||
let wr = self.write(Set::new());
|
||||
wr.reset_db_s_uuid().unwrap()
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
// What are the possible actions we'll receive here?
|
||||
|
@ -2161,7 +2143,7 @@ mod tests {
|
|||
let be = Backend::new(BackendConfig::new_test("main"), idxmeta, false)
|
||||
.expect("Failed to setup backend");
|
||||
|
||||
let mut be_txn = be.write();
|
||||
let mut be_txn = be.write().unwrap();
|
||||
|
||||
let r = $test_fn(&mut be_txn);
|
||||
// Commit, to guarantee it worked.
|
||||
|
@ -2619,12 +2601,12 @@ mod tests {
|
|||
#[test]
|
||||
fn test_be_sid_generation_and_reset() {
|
||||
run_test!(|be: &mut BackendWriteTransaction| {
|
||||
let sid1 = be.get_db_s_uuid();
|
||||
let sid2 = be.get_db_s_uuid();
|
||||
let sid1 = be.get_db_s_uuid().unwrap();
|
||||
let sid2 = be.get_db_s_uuid().unwrap();
|
||||
assert!(sid1 == sid2);
|
||||
let sid3 = be.reset_db_s_uuid().unwrap();
|
||||
assert!(sid1 != sid3);
|
||||
let sid4 = be.get_db_s_uuid();
|
||||
let sid4 = be.get_db_s_uuid().unwrap();
|
||||
assert!(sid3 == sid4);
|
||||
});
|
||||
}
|
||||
|
@ -3692,8 +3674,8 @@ mod tests {
|
|||
let be_b = Backend::new(BackendConfig::new_test("db_2"), idxmeta, false)
|
||||
.expect("Failed to setup backend");
|
||||
|
||||
let mut be_a_txn = be_a.write();
|
||||
let mut be_b_txn = be_b.write();
|
||||
let mut be_a_txn = be_a.write().unwrap();
|
||||
let mut be_b_txn = be_b.write().unwrap();
|
||||
|
||||
assert!(be_a_txn.get_db_s_uuid() != be_b_txn.get_db_s_uuid());
|
||||
|
||||
|
|
|
@ -1256,14 +1256,14 @@ impl FilterResolved {
|
|||
|
||||
// If the f_list_and only has one element, pop it and return.
|
||||
if f_list_new.len() == 1 {
|
||||
#[allow(clippy::expect_used)]
|
||||
f_list_new.pop().expect("corrupt?")
|
||||
f_list_new.remove(0)
|
||||
} else {
|
||||
// finally, optimise this list by sorting.
|
||||
f_list_new.sort_unstable();
|
||||
f_list_new.dedup();
|
||||
// Which ever element as the head is first must be the min SF
|
||||
// so we use this in our And to represent us.
|
||||
// so we use this in our And to represent the "best possible" value
|
||||
// of how indexes will perform.
|
||||
let sf = f_list_new.first().and_then(|f| f.get_slopeyness_factor());
|
||||
//
|
||||
// return!
|
||||
|
@ -1287,8 +1287,7 @@ impl FilterResolved {
|
|||
|
||||
// If the f_list_or only has one element, pop it and return.
|
||||
if f_list_new.len() == 1 {
|
||||
#[allow(clippy::expect_used)]
|
||||
f_list_new.pop().expect("corrupt?")
|
||||
f_list_new.remove(0)
|
||||
} else {
|
||||
// sort, but reverse so that sub-optimal elements are earlier
|
||||
// to promote fast-failure.
|
||||
|
|
|
@ -364,16 +364,6 @@ impl Account {
|
|||
inputs
|
||||
}
|
||||
|
||||
pub fn primary_cred_uuid(&self) -> Option<Uuid> {
|
||||
self.primary.as_ref().map(|cred| cred.uuid).or_else(|| {
|
||||
if self.is_anonymous() {
|
||||
Some(UUID_ANONYMOUS)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn primary_cred_uuid_and_policy(&self) -> Option<(Uuid, CredSoftLockPolicy)> {
|
||||
self.primary
|
||||
.as_ref()
|
||||
|
|
|
@ -55,7 +55,6 @@ const PW_BADLIST_MSG: &str = "password is in badlist";
|
|||
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq)]
|
||||
pub enum AuthType {
|
||||
Anonymous,
|
||||
UnixPassword,
|
||||
Password,
|
||||
GeneratedPassword,
|
||||
PasswordMfa,
|
||||
|
@ -66,7 +65,6 @@ impl fmt::Display for AuthType {
|
|||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
AuthType::Anonymous => write!(f, "anonymous"),
|
||||
AuthType::UnixPassword => write!(f, "unixpassword"),
|
||||
AuthType::Password => write!(f, "password"),
|
||||
AuthType::GeneratedPassword => write!(f, "generatedpassword"),
|
||||
AuthType::PasswordMfa => write!(f, "passwordmfa"),
|
||||
|
@ -1036,7 +1034,7 @@ impl AuthSession {
|
|||
self.account.uuid,
|
||||
async_tx,
|
||||
webauthn,
|
||||
Some(&account_policy.pw_badlist_cache),
|
||||
Some(account_policy.pw_badlist_cache()),
|
||||
) {
|
||||
CredState::Success { auth_type, cred_id } => {
|
||||
// Issue the uat based on a set of factors.
|
||||
|
@ -1045,8 +1043,8 @@ impl AuthSession {
|
|||
time,
|
||||
async_tx,
|
||||
cred_id,
|
||||
account_policy.authsession_expiry,
|
||||
account_policy.privilege_expiry,
|
||||
account_policy.authsession_expiry(),
|
||||
account_policy.privilege_expiry(),
|
||||
)?;
|
||||
let jwt = Jws::new(uat);
|
||||
|
||||
|
@ -1128,7 +1126,7 @@ impl AuthSession {
|
|||
// We need to actually work this out better, and then
|
||||
// pass it to to_userauthtoken
|
||||
let scope = match auth_type {
|
||||
AuthType::UnixPassword | AuthType::Anonymous => SessionScope::ReadOnly,
|
||||
AuthType::Anonymous => SessionScope::ReadOnly,
|
||||
AuthType::GeneratedPassword => SessionScope::ReadWrite,
|
||||
AuthType::Password | AuthType::PasswordMfa | AuthType::Passkey => {
|
||||
if privileged {
|
||||
|
@ -1162,11 +1160,6 @@ impl AuthSession {
|
|||
AuthType::Anonymous => {
|
||||
// Skip - these sessions are not validated by session id.
|
||||
}
|
||||
AuthType::UnixPassword => {
|
||||
// Impossibru!
|
||||
admin_error!("Impossible auth type (UnixPassword) found");
|
||||
return Err(OperationError::InvalidState);
|
||||
}
|
||||
AuthType::Password
|
||||
| AuthType::GeneratedPassword
|
||||
| AuthType::PasswordMfa
|
||||
|
@ -1199,7 +1192,7 @@ impl AuthSession {
|
|||
// Sanity check - We have already been really strict about what session types
|
||||
// can actually trigger a re-auth, but we recheck here for paranoia!
|
||||
let scope = match auth_type {
|
||||
AuthType::UnixPassword | AuthType::Anonymous | AuthType::GeneratedPassword => {
|
||||
AuthType::Anonymous | AuthType::GeneratedPassword => {
|
||||
error!("AuthType used in Reauth is not valid for session re-issuance. Rejecting");
|
||||
return Err(OperationError::InvalidState);
|
||||
}
|
||||
|
@ -1272,7 +1265,7 @@ mod tests {
|
|||
use crate::idm::server::AccountPolicy;
|
||||
use crate::idm::AuthState;
|
||||
use crate::prelude::*;
|
||||
use crate::utils::{duration_from_epoch_now, readable_password_from_random};
|
||||
use crate::utils::readable_password_from_random;
|
||||
use kanidm_lib_crypto::CryptoPolicy;
|
||||
|
||||
fn create_pw_badlist_cache() -> HashSet<String> {
|
||||
|
|
|
@ -1270,7 +1270,11 @@ impl<'a> IdmServerCredUpdateTransaction<'a> {
|
|||
// check a password badlist to eliminate more content
|
||||
// we check the password as "lower case" to help eliminate possibilities
|
||||
// also, when pw_badlist_cache is read from DB, it is read as Value (iutf8 lowercase)
|
||||
if (self.account_policy.pw_badlist_cache).contains(&cleartext.to_lowercase()) {
|
||||
if self
|
||||
.account_policy
|
||||
.pw_badlist_cache()
|
||||
.contains(&cleartext.to_lowercase())
|
||||
{
|
||||
security_info!("Password found in badlist, rejecting");
|
||||
Err(PasswordQuality::BadListed)
|
||||
} else {
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
//! is implemented.
|
||||
|
||||
pub mod account;
|
||||
pub mod applinks;
|
||||
pub(crate) mod applinks;
|
||||
pub mod audit;
|
||||
pub mod authsession;
|
||||
pub(crate) mod authsession;
|
||||
pub mod credupdatesession;
|
||||
pub mod delayed;
|
||||
pub mod event;
|
||||
|
@ -14,12 +14,12 @@ pub mod group;
|
|||
pub mod identityverification;
|
||||
pub mod ldap;
|
||||
pub mod oauth2;
|
||||
pub mod radius;
|
||||
pub mod reauth;
|
||||
pub(crate) mod radius;
|
||||
pub(crate) mod reauth;
|
||||
pub mod scim;
|
||||
pub mod server;
|
||||
pub mod serviceaccount;
|
||||
pub mod unix;
|
||||
pub(crate) mod unix;
|
||||
|
||||
use std::fmt;
|
||||
|
||||
|
|
|
@ -70,14 +70,14 @@ pub struct DomainKeys {
|
|||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AccountPolicy {
|
||||
pub(crate) privilege_expiry: u32,
|
||||
pub(crate) authsession_expiry: u32,
|
||||
pub(crate) pw_badlist_cache: HashSet<String>,
|
||||
pub(crate) struct AccountPolicy {
|
||||
privilege_expiry: u32,
|
||||
authsession_expiry: u32,
|
||||
pw_badlist_cache: HashSet<String>,
|
||||
}
|
||||
|
||||
impl AccountPolicy {
|
||||
pub fn new(
|
||||
pub(crate) fn new(
|
||||
privilege_expiry: u32,
|
||||
authsession_expiry: u32,
|
||||
pw_badlist_cache: HashSet<String>,
|
||||
|
@ -89,7 +89,20 @@ impl AccountPolicy {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn from_pw_badlist_cache(pw_badlist_cache: HashSet<String>) -> Self {
|
||||
pub(crate) fn privilege_expiry(&self) -> u32 {
|
||||
self.privilege_expiry
|
||||
}
|
||||
|
||||
pub(crate) fn authsession_expiry(&self) -> u32 {
|
||||
self.authsession_expiry
|
||||
}
|
||||
|
||||
pub(crate) fn pw_badlist_cache(&self) -> &HashSet<String> {
|
||||
&self.pw_badlist_cache
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn from_pw_badlist_cache(pw_badlist_cache: HashSet<String>) -> Self {
|
||||
Self {
|
||||
pw_badlist_cache,
|
||||
..Default::default()
|
||||
|
@ -2178,7 +2191,6 @@ mod tests {
|
|||
use crate::idm::AuthState;
|
||||
use crate::modify::{Modify, ModifyList};
|
||||
use crate::prelude::*;
|
||||
use crate::utils::duration_from_epoch_now;
|
||||
use crate::value::SessionState;
|
||||
use kanidm_lib_crypto::CryptoPolicy;
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ static ALLOC: jemallocator::Jemalloc = jemallocator::Jemalloc;
|
|||
|
||||
#[macro_use]
|
||||
extern crate rusqlite;
|
||||
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
#[macro_use]
|
||||
|
@ -45,7 +46,8 @@ pub mod entry;
|
|||
pub mod event;
|
||||
pub mod filter;
|
||||
pub mod modify;
|
||||
pub mod utils;
|
||||
pub mod time;
|
||||
pub(crate) mod utils;
|
||||
pub mod value;
|
||||
pub mod valueset;
|
||||
#[macro_use]
|
||||
|
@ -100,7 +102,7 @@ pub mod prelude {
|
|||
QueryServer, QueryServerReadTransaction, QueryServerTransaction,
|
||||
QueryServerWriteTransaction,
|
||||
};
|
||||
pub use crate::utils::duration_from_epoch_now;
|
||||
pub use crate::time::duration_from_epoch_now;
|
||||
pub use crate::value::{
|
||||
ApiTokenScope, IndexType, PartialValue, SessionScope, SyntaxType, Value,
|
||||
};
|
||||
|
|
|
@ -12,7 +12,8 @@ macro_rules! setup_test {
|
|||
let be = Backend::new(BackendConfig::new_test("main"), idxmeta, false)
|
||||
.expect("Failed to init BE");
|
||||
|
||||
let qs = QueryServer::new(be, schema_outer, "example.com".to_string());
|
||||
let qs = QueryServer::new(be, schema_outer, "example.com".to_string())
|
||||
.expect("Failed to setup Query Server");
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
|
@ -24,7 +25,7 @@ macro_rules! setup_test {
|
|||
(
|
||||
$preload_entries:expr
|
||||
) => {{
|
||||
use crate::utils::duration_from_epoch_now;
|
||||
use crate::prelude::duration_from_epoch_now;
|
||||
|
||||
let _ = sketching::test_init();
|
||||
|
||||
|
@ -37,7 +38,8 @@ macro_rules! setup_test {
|
|||
let be = Backend::new(BackendConfig::new_test("main"), idxmeta, false)
|
||||
.expect("Failed to init BE");
|
||||
|
||||
let qs = QueryServer::new(be, schema_outer, "example.com".to_string());
|
||||
let qs = QueryServer::new(be, schema_outer, "example.com".to_string())
|
||||
.expect("Failed to setup Query Server");
|
||||
tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()
|
||||
|
@ -75,7 +77,6 @@ macro_rules! run_create_test {
|
|||
use crate::event::CreateEvent;
|
||||
use crate::prelude::*;
|
||||
use crate::schema::Schema;
|
||||
use crate::utils::duration_from_epoch_now;
|
||||
|
||||
let qs = setup_test!($preload_entries);
|
||||
|
||||
|
@ -197,7 +198,6 @@ macro_rules! run_delete_test {
|
|||
use crate::event::DeleteEvent;
|
||||
use crate::prelude::*;
|
||||
use crate::schema::Schema;
|
||||
use crate::utils::duration_from_epoch_now;
|
||||
|
||||
let qs = setup_test!($preload_entries);
|
||||
|
||||
|
|
|
@ -65,7 +65,6 @@ impl Cid {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
pub fn sub_secs(&self, secs: u64) -> Result<Self, OperationError> {
|
||||
self.ts
|
||||
.checked_sub(Duration::from_secs(secs))
|
||||
|
|
|
@ -306,7 +306,8 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
};
|
||||
|
||||
// Assert that the d_uuid matches the repl domain uuid.
|
||||
let db_uuid = self.be_txn.get_db_d_uuid();
|
||||
let db_uuid = self.be_txn.get_db_d_uuid()?;
|
||||
|
||||
if db_uuid != ctx_domain_uuid {
|
||||
error!("Unable to proceed with consumer incremental - incoming domain uuid does not match our database uuid. You must investigate this situation. {:?} != {:?}", db_uuid, ctx_domain_uuid);
|
||||
return Err(OperationError::ReplDomainUuidMismatch);
|
||||
|
|
|
@ -1,597 +0,0 @@
|
|||
use std::collections::btree_map::Keys;
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
use std::ops::Bound;
|
||||
use std::ops::Bound::*;
|
||||
|
||||
use kanidm_proto::v1::ConsistencyError;
|
||||
|
||||
use super::cid::Cid;
|
||||
use crate::entry::{compare_attrs, Eattrs};
|
||||
use crate::prelude::*;
|
||||
use crate::schema::SchemaTransaction;
|
||||
use crate::valueset;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EntryChangelog {
|
||||
/// The set of "entries as they existed at a point in time". This allows us to rewind
|
||||
/// to a point-in-time, and then to start to "replay" applying all changes again.
|
||||
///
|
||||
/// A subtle and important piece of information is that an anchor can be considered
|
||||
/// as the "state as existing between two Cid's". This means for Cid X, this state is
|
||||
/// the "moment before X". This is important, as for a create we define the initial anchor
|
||||
/// as "nothing". It's means for the anchor at time X, that changes that occurred at time
|
||||
/// X have NOT been replayed and applied!
|
||||
anchors: BTreeMap<Cid, State>,
|
||||
changes: BTreeMap<Cid, Change>,
|
||||
}
|
||||
|
||||
/*
|
||||
impl fmt::Display for EntryChangelog {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
/// A change defines the transitions that occurred within this Cid (transaction). A change is applied
|
||||
/// as a whole, or rejected during the replay process.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Change {
|
||||
s: Vec<Transition>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum State {
|
||||
NonExistent,
|
||||
Live(Eattrs),
|
||||
Recycled(Eattrs),
|
||||
Tombstone(Eattrs),
|
||||
}
|
||||
|
||||
impl fmt::Display for State {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match &self {
|
||||
State::NonExistent => write!(f, "NonExistent"),
|
||||
State::Live(_) => write!(f, "Live"),
|
||||
State::Recycled(_) => write!(f, "Recycled"),
|
||||
State::Tombstone(_) => write!(f, "Tombstone"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
enum Transition {
|
||||
Create(Eattrs),
|
||||
ModifyPurge(AttrString),
|
||||
ModifyPresent(AttrString, Box<Value>),
|
||||
ModifyRemoved(AttrString, Box<PartialValue>),
|
||||
ModifyAssert(AttrString, Box<PartialValue>),
|
||||
Recycle,
|
||||
Revive,
|
||||
Tombstone(Eattrs),
|
||||
}
|
||||
|
||||
impl fmt::Display for Transition {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match &self {
|
||||
Transition::Create(_) => write!(f, "Create"),
|
||||
Transition::ModifyPurge(a) => write!(f, "ModifyPurge({})", a),
|
||||
Transition::ModifyPresent(a, _) => write!(f, "ModifyPresent({})", a),
|
||||
Transition::ModifyRemoved(a, _) => write!(f, "ModifyRemoved({})", a),
|
||||
Transition::ModifyAssert(a, _) => write!(f, "ModifyAssert({})", a),
|
||||
Transition::Recycle => write!(f, "Recycle"),
|
||||
Transition::Revive => write!(f, "Revive"),
|
||||
Transition::Tombstone(_) => write!(f, "Tombstone"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl State {
|
||||
fn apply_change(self, change: &Change) -> Result<Self, Self> {
|
||||
let mut state = self;
|
||||
for transition in change.s.iter() {
|
||||
match (&mut state, transition) {
|
||||
(State::NonExistent, Transition::Create(attrs)) => {
|
||||
trace!("NonExistent + Create -> Live");
|
||||
state = State::Live(attrs.clone());
|
||||
}
|
||||
(State::Live(ref mut attrs), Transition::ModifyPurge(attr)) => {
|
||||
trace!("Live + ModifyPurge({}) -> Live", attr);
|
||||
attrs.remove(attr);
|
||||
}
|
||||
(State::Live(ref mut attrs), Transition::ModifyPresent(attr, value)) => {
|
||||
trace!("Live + ModifyPresent({}) -> Live", attr);
|
||||
if let Some(vs) = attrs.get_mut(attr) {
|
||||
let r = vs.insert_checked(value.as_ref().clone());
|
||||
assert!(r.is_ok());
|
||||
// Reject if it fails?
|
||||
} else {
|
||||
#[allow(clippy::expect_used)]
|
||||
let vs = valueset::from_value_iter(std::iter::once(value.as_ref().clone()))
|
||||
.expect("Unable to fail - always single value, and only one type!");
|
||||
attrs.insert(attr.clone(), vs);
|
||||
}
|
||||
}
|
||||
(State::Live(ref mut attrs), Transition::ModifyRemoved(attr, value)) => {
|
||||
trace!("Live + ModifyRemoved({}) -> Live", attr);
|
||||
let rm = if let Some(vs) = attrs.get_mut(attr) {
|
||||
vs.remove(value);
|
||||
vs.is_empty()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
if rm {
|
||||
attrs.remove(attr);
|
||||
};
|
||||
}
|
||||
(State::Live(ref mut attrs), Transition::ModifyAssert(attr, value)) => {
|
||||
trace!("Live + ModifyAssert({}) -> Live", attr);
|
||||
|
||||
if attrs
|
||||
.get(attr)
|
||||
.map(|vs| vs.contains(value))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
// Valid
|
||||
} else {
|
||||
warn!("{} + {:?} -> Assertion not met - REJECTING", attr, value);
|
||||
return Err(state);
|
||||
}
|
||||
}
|
||||
(State::Live(attrs), Transition::Recycle) => {
|
||||
trace!("Live + Recycle -> Recycled");
|
||||
state = State::Recycled(attrs.clone());
|
||||
}
|
||||
(State::Live(_), Transition::Tombstone(attrs)) => {
|
||||
trace!("Live + Tombstone -> Tombstone");
|
||||
state = State::Tombstone(attrs.clone());
|
||||
}
|
||||
(State::Recycled(attrs), Transition::Revive) => {
|
||||
trace!("Recycled + Revive -> Live");
|
||||
state = State::Live(attrs.clone());
|
||||
}
|
||||
(State::Recycled(ref mut attrs), Transition::ModifyPurge(attr)) => {
|
||||
trace!("Recycled + ModifyPurge({}) -> Recycled", attr);
|
||||
attrs.remove(attr);
|
||||
}
|
||||
(State::Recycled(attrs), Transition::ModifyRemoved(attr, value)) => {
|
||||
trace!("Recycled + ModifyRemoved({}) -> Recycled", attr);
|
||||
let rm = if let Some(vs) = attrs.get_mut(attr) {
|
||||
vs.remove(value);
|
||||
vs.is_empty()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
if rm {
|
||||
attrs.remove(attr);
|
||||
};
|
||||
}
|
||||
(State::Recycled(_), Transition::Tombstone(attrs)) => {
|
||||
trace!("Recycled + Tombstone -> Tombstone");
|
||||
state = State::Tombstone(attrs.clone());
|
||||
}
|
||||
|
||||
// ==============================
|
||||
// Invalid States
|
||||
/*
|
||||
(State::NonExistent, Transition::ModifyPurge(_))
|
||||
| (State::NonExistent, Transition::ModifyPresent(_, _))
|
||||
| (State::NonExistent, Transition::ModifyRemoved(_, _))
|
||||
| (State::NonExistent, Transition::Recycle)
|
||||
| (State::NonExistent, Transition::Revive)
|
||||
| (State::NonExistent, Transition::Tombstone(_))
|
||||
| (State::Live(_), Transition::Create(_))
|
||||
| (State::Live(_), Transition::Revive)
|
||||
| (State::Recycled(_), Transition::Create(_))
|
||||
| (State::Recycled(_), Transition::Recycle)
|
||||
| (State::Recycled(_), Transition::ModifyPresent(_, _))
|
||||
| (State::Tombstone(_), _)
|
||||
*/
|
||||
(s, t) => {
|
||||
warn!("{} + {} -> REJECTING", s, t);
|
||||
return Err(state);
|
||||
}
|
||||
};
|
||||
}
|
||||
// Everything must have applied, all good then.
|
||||
trace!(?state, "applied changes");
|
||||
Ok(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl EntryChangelog {
|
||||
pub fn new(cid: Cid, attrs: Eattrs, _schema: &dyn SchemaTransaction) -> Self {
|
||||
// I think we need to reduce the attrs based on what is / is not replicated.?
|
||||
|
||||
let anchors = btreemap![(cid.clone(), State::NonExistent)];
|
||||
let changes = btreemap![(
|
||||
cid,
|
||||
Change {
|
||||
s: vec![Transition::Create(attrs)]
|
||||
}
|
||||
)];
|
||||
|
||||
EntryChangelog { anchors, changes }
|
||||
}
|
||||
|
||||
// TODO: work out if the below comment about uncommenting is still valid
|
||||
// Uncomment this once we have a real on-disk storage of the changelog
|
||||
pub fn new_without_schema(cid: Cid, attrs: Eattrs) -> Self {
|
||||
// I think we need to reduce the attrs based on what is / is not replicated.?
|
||||
|
||||
// We need to pick a state that reflects the current state WRT to tombstone
|
||||
// or recycled!
|
||||
let class = attrs.get(Attribute::Class.as_ref());
|
||||
|
||||
let (anchors, changes) = if class
|
||||
.as_ref()
|
||||
.map(|c| c.contains(&EntryClass::Tombstone.to_partialvalue()))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
(btreemap![(cid, State::Tombstone(attrs))], BTreeMap::new())
|
||||
} else if class
|
||||
.as_ref()
|
||||
.map(|c| c.contains(&EntryClass::Recycled.to_partialvalue()))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
(btreemap![(cid, State::Recycled(attrs))], BTreeMap::new())
|
||||
} else {
|
||||
(
|
||||
btreemap![(cid.clone(), State::NonExistent)],
|
||||
btreemap![(
|
||||
cid,
|
||||
Change {
|
||||
s: vec![Transition::Create(attrs)]
|
||||
}
|
||||
)],
|
||||
)
|
||||
};
|
||||
|
||||
EntryChangelog { anchors, changes }
|
||||
}
|
||||
|
||||
// fn add_ava_iter<T>(&mut self, cid: &Cid, attr: Attribute, viter: T)
|
||||
// where
|
||||
// T: IntoIterator<Item = Value>,
|
||||
// {
|
||||
// if !self.changes.contains_key(cid) {
|
||||
// self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
// }
|
||||
|
||||
// #[allow(clippy::expect_used)]
|
||||
// let change = self
|
||||
// .changes
|
||||
// .get_mut(cid)
|
||||
// .expect("Memory corruption, change must exist");
|
||||
|
||||
// viter
|
||||
// .into_iter()
|
||||
// .map(|v| Transition::ModifyPresent(attr.into(), Box::new(v)))
|
||||
// .for_each(|t| change.s.push(t));
|
||||
// }
|
||||
|
||||
pub fn remove_ava_iter<T>(&mut self, cid: &Cid, attr: &str, viter: T)
|
||||
where
|
||||
T: IntoIterator<Item = PartialValue>,
|
||||
{
|
||||
if !self.changes.contains_key(cid) {
|
||||
self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
let change = self
|
||||
.changes
|
||||
.get_mut(cid)
|
||||
.expect("Memory corruption, change must exist");
|
||||
|
||||
viter
|
||||
.into_iter()
|
||||
.map(|v| Transition::ModifyRemoved(AttrString::from(attr), Box::new(v)))
|
||||
.for_each(|t| change.s.push(t));
|
||||
}
|
||||
|
||||
pub fn assert_ava(&mut self, cid: &Cid, attr: Attribute, value: PartialValue) {
|
||||
if !self.changes.contains_key(cid) {
|
||||
self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
let change = self
|
||||
.changes
|
||||
.get_mut(cid)
|
||||
.expect("Memory corruption, change must exist");
|
||||
|
||||
change
|
||||
.s
|
||||
.push(Transition::ModifyAssert(attr.into(), Box::new(value)))
|
||||
}
|
||||
|
||||
pub fn purge_ava(&mut self, cid: &Cid, attr: Attribute) {
|
||||
if !self.changes.contains_key(cid) {
|
||||
self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
let change = self
|
||||
.changes
|
||||
.get_mut(cid)
|
||||
.expect("Memory corruption, change must exist");
|
||||
change.s.push(Transition::ModifyPurge(attr.info()));
|
||||
}
|
||||
|
||||
pub fn recycled(&mut self, cid: &Cid) {
|
||||
if !self.changes.contains_key(cid) {
|
||||
self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
let change = self
|
||||
.changes
|
||||
.get_mut(cid)
|
||||
.expect("Memory corruption, change must exist");
|
||||
change.s.push(Transition::Recycle);
|
||||
}
|
||||
|
||||
pub fn revive(&mut self, cid: &Cid) {
|
||||
if !self.changes.contains_key(cid) {
|
||||
self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
let change = self
|
||||
.changes
|
||||
.get_mut(cid)
|
||||
.expect("Memory corruption, change must exist");
|
||||
change.s.push(Transition::Revive);
|
||||
}
|
||||
|
||||
pub fn tombstone(&mut self, cid: &Cid, attrs: Eattrs) {
|
||||
if !self.changes.contains_key(cid) {
|
||||
self.changes.insert(cid.clone(), Change { s: Vec::new() });
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
let change = self
|
||||
.changes
|
||||
.get_mut(cid)
|
||||
.expect("Memory corruption, change must exist");
|
||||
change.s.push(Transition::Tombstone(attrs));
|
||||
}
|
||||
|
||||
/// Replay our changes from and including the replay Cid, up to the latest point
|
||||
/// in time. We also return a vector of *rejected* Cid's showing what is in the
|
||||
/// change log that is considered invalid.
|
||||
fn replay(
|
||||
&self,
|
||||
from_cid: Bound<&Cid>,
|
||||
to_cid: Bound<&Cid>,
|
||||
) -> Result<(State, Vec<Cid>), OperationError> {
|
||||
// Select the anchor_cid that is *earlier* or *equals* to the replay_cid.
|
||||
|
||||
// if not found, we are *unable to* perform this replay which indicates a problem!
|
||||
let (anchor_cid, anchor) = if matches!(from_cid, Unbounded) {
|
||||
// If the from is unbounded, and to is unbounded, we want
|
||||
// the earliest anchor possible.
|
||||
|
||||
// If from is unbounded and to is bounded, we want the earliest
|
||||
// possible.
|
||||
self.anchors.iter().next()
|
||||
} else {
|
||||
// If from has a bound, we want an anchor "earlier than" from, regardless
|
||||
// of the to bound state.
|
||||
self.anchors.range((Unbounded, from_cid)).next_back()
|
||||
}
|
||||
.ok_or_else(|| {
|
||||
admin_error!(
|
||||
?from_cid,
|
||||
?to_cid,
|
||||
"Failed to locate anchor in replay range"
|
||||
);
|
||||
OperationError::ReplReplayFailure
|
||||
})?;
|
||||
|
||||
trace!(?anchor_cid, ?anchor);
|
||||
|
||||
// Load the entry attribute state at that time.
|
||||
let mut replay_state = anchor.clone();
|
||||
let mut rejected_cid = Vec::new();
|
||||
|
||||
// For each change
|
||||
for (change_cid, change) in self.changes.range((Included(anchor_cid), to_cid)) {
|
||||
// Apply the change.
|
||||
trace!(?change_cid, ?change);
|
||||
|
||||
replay_state = match replay_state.apply_change(change) {
|
||||
Ok(mut new_state) => {
|
||||
// Indicate that this was the highest CID so far.
|
||||
match &mut new_state {
|
||||
State::NonExistent => {
|
||||
trace!("pass");
|
||||
}
|
||||
State::Live(ref mut attrs)
|
||||
| State::Recycled(ref mut attrs)
|
||||
| State::Tombstone(ref mut attrs) => {
|
||||
let cv = vs_cid![change_cid.clone()];
|
||||
let _ = attrs.insert(Attribute::LastModifiedCid.into(), cv);
|
||||
}
|
||||
};
|
||||
new_state
|
||||
}
|
||||
Err(previous_state) => {
|
||||
warn!("rejecting invalid change {:?}", change_cid);
|
||||
rejected_cid.push(change_cid.clone());
|
||||
previous_state
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Return the eattrs state.
|
||||
Ok((replay_state, rejected_cid))
|
||||
}
|
||||
|
||||
#[instrument(
|
||||
level = "trace",
|
||||
name = "verify",
|
||||
skip(self, _schema, expected_attrs, results)
|
||||
)]
|
||||
pub fn verify(
|
||||
&self,
|
||||
_schema: &dyn SchemaTransaction,
|
||||
expected_attrs: &Eattrs,
|
||||
entry_id: u64,
|
||||
results: &mut Vec<Result<(), ConsistencyError>>,
|
||||
) {
|
||||
// We need to be able to take any anchor entry, and replay that when all changes
|
||||
// are applied we get the *same entry* as the current state.
|
||||
debug_assert!(results.is_empty());
|
||||
|
||||
// For each anchor (we only needs it's change id.)
|
||||
for cid in self.anchors.keys() {
|
||||
match self.replay(Included(cid), Unbounded) {
|
||||
Ok((entry_state, rejected)) => {
|
||||
trace!(?rejected);
|
||||
|
||||
match entry_state {
|
||||
State::Live(attrs) | State::Recycled(attrs) | State::Tombstone(attrs) => {
|
||||
if compare_attrs(&attrs, expected_attrs) {
|
||||
// valid
|
||||
trace!("changelog is synchronised");
|
||||
} else {
|
||||
// ruh-roh.
|
||||
warn!("changelog has desynchronised!");
|
||||
debug!(?attrs);
|
||||
debug!(?expected_attrs);
|
||||
debug_assert!(false);
|
||||
results
|
||||
.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)));
|
||||
}
|
||||
}
|
||||
State::NonExistent => {
|
||||
warn!("entry does not exist - changelog is corrupted?!");
|
||||
results.push(Err(ConsistencyError::ChangelogDesynchronised(entry_id)))
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!(?e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug_assert!(results.is_empty());
|
||||
}
|
||||
|
||||
pub fn contains_tail_cid(&self, cid: &Cid) -> bool {
|
||||
if let Some(tail_cid) = self.changes.keys().next_back() {
|
||||
if tail_cid == cid {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
false
|
||||
}
|
||||
|
||||
pub fn can_delete(&self) -> bool {
|
||||
// Changelog should be empty.
|
||||
// should have a current anchor state of tombstone.
|
||||
self.changes.is_empty()
|
||||
&& matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
|
||||
}
|
||||
|
||||
pub fn is_live(&self) -> bool {
|
||||
!matches!(self.anchors.values().next_back(), Some(State::Tombstone(_)))
|
||||
}
|
||||
|
||||
pub fn cid_iter(&self) -> Keys<Cid, Change> {
|
||||
self.changes.keys()
|
||||
}
|
||||
|
||||
/*
|
||||
fn insert_anchor(&mut self, cid: Cid, entry_state: State) {
|
||||
// When we insert an anchor, we have to remove all subsequent anchors (but not
|
||||
// the preceding ones.)
|
||||
let _ = self.anchors.split_off(&cid);
|
||||
self.anchors.insert(cid.clone(), entry_state);
|
||||
}
|
||||
*/
|
||||
|
||||
pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> {
|
||||
// Build a new anchor that is equal or less than this cid.
|
||||
// In other words, the cid we are trimming to, should be remaining
|
||||
// in the CL, and we should have an anchor that precedes it.
|
||||
let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| {
|
||||
error!(?e);
|
||||
e
|
||||
})?;
|
||||
trace!(?rejected);
|
||||
// Add the entry_state as an anchor. Use the CID we just
|
||||
// trimmed to.
|
||||
|
||||
// insert_anchor will remove anything to the right, we also need to
|
||||
// remove everything to the left, so just clear.
|
||||
self.anchors.clear();
|
||||
self.anchors.insert(cid.clone(), entry_state);
|
||||
|
||||
// And now split the CL.
|
||||
let mut right = self.changes.split_off(cid);
|
||||
std::mem::swap(&mut right, &mut self.changes);
|
||||
// We can trace what we drop later?
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::entry::Eattrs;
|
||||
// use crate::prelude::*;
|
||||
use crate::repl::cid::Cid;
|
||||
use crate::repl::entry::{Change, EntryChangelog, State, Transition};
|
||||
use crate::schema::{Schema, SchemaTransaction};
|
||||
|
||||
#[cfg(test)]
|
||||
macro_rules! run_entrychangelog_test {
|
||||
($test_fn:expr) => {{
|
||||
let _ = sketching::test_init();
|
||||
let schema_outer = Schema::new().expect("Failed to init schema");
|
||||
|
||||
let schema_txn = schema_outer.read();
|
||||
|
||||
$test_fn(&schema_txn)
|
||||
}};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entrychangelog_basic() {
|
||||
run_entrychangelog_test!(|schema: &dyn SchemaTransaction| {
|
||||
let cid = Cid::new_random_s_d(Duration::from_secs(1));
|
||||
let eattrs = Eattrs::new();
|
||||
let eclog = EntryChangelog::new(cid, eattrs, schema);
|
||||
trace!(?eclog);
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entrychangelog_state_transitions() {
|
||||
// Test that all our transitions are defined and work as
|
||||
// expected.
|
||||
assert!(State::NonExistent
|
||||
.apply_change(&Change { s: vec![] })
|
||||
.is_ok());
|
||||
assert!(State::NonExistent
|
||||
.apply_change(&Change {
|
||||
s: vec![Transition::Create(Eattrs::new())]
|
||||
})
|
||||
.is_ok());
|
||||
|
||||
assert!(State::Live(Eattrs::new())
|
||||
.apply_change(&Change { s: vec![] })
|
||||
.is_ok());
|
||||
assert!(State::Live(Eattrs::new())
|
||||
.apply_change(&Change {
|
||||
s: vec![Transition::Create(Eattrs::new())]
|
||||
})
|
||||
.is_err());
|
||||
}
|
||||
}
|
|
@ -323,7 +323,6 @@ impl From<SchemaAttribute> for EntryInitNew {
|
|||
fn from(value: SchemaAttribute) -> Self {
|
||||
let mut entry = EntryInitNew::new();
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
entry.set_ava(
|
||||
Attribute::AttributeName,
|
||||
vec![Value::new_iutf8(&value.name)],
|
||||
|
@ -506,7 +505,6 @@ impl From<SchemaClass> for EntryInitNew {
|
|||
fn from(value: SchemaClass) -> Self {
|
||||
let mut entry = EntryInitNew::new();
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
entry.set_ava(Attribute::ClassName, vec![Value::new_iutf8(&value.name)]);
|
||||
|
||||
// class
|
||||
|
|
|
@ -37,14 +37,14 @@ use self::access::{
|
|||
AccessControlsWriteTransaction,
|
||||
};
|
||||
|
||||
pub mod access;
|
||||
pub(crate) mod access;
|
||||
pub mod batch_modify;
|
||||
pub mod create;
|
||||
pub mod delete;
|
||||
pub mod identity;
|
||||
pub mod migrations;
|
||||
pub(crate) mod migrations;
|
||||
pub mod modify;
|
||||
pub mod recycle;
|
||||
pub(crate) mod recycle;
|
||||
|
||||
const RESOLVE_FILTER_CACHE_MAX: usize = 4096;
|
||||
const RESOLVE_FILTER_CACHE_LOCAL: usize = 0;
|
||||
|
@ -1085,14 +1085,15 @@ impl<'a> QueryServerTransaction<'a> for QueryServerWriteTransaction<'a> {
|
|||
}
|
||||
|
||||
impl QueryServer {
|
||||
pub fn new(be: Backend, schema: Schema, domain_name: String) -> Self {
|
||||
pub fn new(be: Backend, schema: Schema, domain_name: String) -> Result<Self, OperationError> {
|
||||
let (s_uuid, d_uuid) = {
|
||||
let mut wr = be.write();
|
||||
let res = (wr.get_db_s_uuid(), wr.get_db_d_uuid());
|
||||
let mut wr = be.write().unwrap();
|
||||
let s_uuid = wr.get_db_s_uuid().unwrap();
|
||||
let d_uuid = wr.get_db_d_uuid().unwrap();
|
||||
#[allow(clippy::expect_used)]
|
||||
wr.commit()
|
||||
.expect("Critical - unable to commit db_s_uuid or db_d_uuid");
|
||||
res
|
||||
(s_uuid, d_uuid)
|
||||
};
|
||||
|
||||
let pool_size = be.get_pool_size();
|
||||
|
@ -1117,7 +1118,15 @@ impl QueryServer {
|
|||
let phase = Arc::new(CowCell::new(ServerPhase::Bootstrap));
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
QueryServer {
|
||||
let resolve_filter_cache = Arc::new(
|
||||
ARCacheBuilder::new()
|
||||
.set_size(RESOLVE_FILTER_CACHE_MAX, RESOLVE_FILTER_CACHE_LOCAL)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.expect("Failed to build resolve_filter_cache"),
|
||||
);
|
||||
|
||||
Ok(QueryServer {
|
||||
phase,
|
||||
s_uuid,
|
||||
d_info,
|
||||
|
@ -1126,15 +1135,9 @@ impl QueryServer {
|
|||
accesscontrols: Arc::new(AccessControls::default()),
|
||||
db_tickets: Arc::new(Semaphore::new(pool_size as usize)),
|
||||
write_ticket: Arc::new(Semaphore::new(1)),
|
||||
resolve_filter_cache: Arc::new(
|
||||
ARCacheBuilder::new()
|
||||
.set_size(RESOLVE_FILTER_CACHE_MAX, RESOLVE_FILTER_CACHE_LOCAL)
|
||||
.set_reader_quiesce(true)
|
||||
.build()
|
||||
.expect("Failed to build resolve_filter_cache"),
|
||||
),
|
||||
resolve_filter_cache,
|
||||
dyngroup_cache,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn try_quiesce(&self) {
|
||||
|
@ -1159,7 +1162,7 @@ impl QueryServer {
|
|||
};
|
||||
|
||||
QueryServerReadTransaction {
|
||||
be_txn: self.be.read(),
|
||||
be_txn: self.be.read().unwrap(),
|
||||
schema: self.schema.read(),
|
||||
d_info: self.d_info.read(),
|
||||
accesscontrols: self.accesscontrols.read(),
|
||||
|
@ -1197,7 +1200,7 @@ impl QueryServer {
|
|||
};
|
||||
|
||||
let schema_write = self.schema.write();
|
||||
let mut be_txn = self.be.write();
|
||||
let mut be_txn = self.be.write().unwrap();
|
||||
let d_info = self.d_info.write();
|
||||
let phase = self.phase.write();
|
||||
|
||||
|
@ -1513,7 +1516,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
pub(crate) fn reload_domain_info(&mut self) -> Result<(), OperationError> {
|
||||
let domain_name = self.get_db_domain_name()?;
|
||||
let display_name = self.get_db_domain_display_name()?;
|
||||
let domain_uuid = self.be_txn.get_db_d_uuid();
|
||||
let domain_uuid = self.be_txn.get_db_d_uuid()?;
|
||||
let mut_d_info = self.d_info.get_mut();
|
||||
if mut_d_info.d_uuid != domain_uuid {
|
||||
admin_warn!(
|
||||
|
|
|
@ -1,8 +1,6 @@
|
|||
use crate::be::{Backend, BackendConfig};
|
||||
use crate::prelude::*;
|
||||
use crate::schema::Schema;
|
||||
#[allow(unused_imports)]
|
||||
use crate::utils::duration_from_epoch_now;
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
pub async fn setup_test() -> QueryServer {
|
||||
|
@ -19,6 +17,7 @@ pub async fn setup_test() -> QueryServer {
|
|||
|
||||
// Init is called via the proc macro
|
||||
QueryServer::new(be, schema_outer, "example.com".to_string())
|
||||
.expect("Failed to setup Query Server")
|
||||
}
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
|
@ -37,6 +36,7 @@ pub async fn setup_pair_test() -> (QueryServer, QueryServer) {
|
|||
|
||||
// Init is called via the proc macro
|
||||
QueryServer::new(be, schema_outer, "example.com".to_string())
|
||||
.expect("Failed to setup Query Server")
|
||||
};
|
||||
|
||||
let qs_b = {
|
||||
|
@ -51,6 +51,7 @@ pub async fn setup_pair_test() -> (QueryServer, QueryServer) {
|
|||
|
||||
// Init is called via the proc macro
|
||||
QueryServer::new(be, schema_outer, "example.com".to_string())
|
||||
.expect("Failed to setup Query Server")
|
||||
};
|
||||
|
||||
(qs_a, qs_b)
|
||||
|
|
8
server/lib/src/time.rs
Normal file
8
server/lib/src/time.rs
Normal file
|
@ -0,0 +1,8 @@
|
|||
use std::time::{Duration, SystemTime};
|
||||
|
||||
pub fn duration_from_epoch_now() -> Duration {
|
||||
#[allow(clippy::expect_used)]
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("invalid duration from epoch now")
|
||||
}
|
|
@ -1,22 +1,7 @@
|
|||
use std::fs::Metadata;
|
||||
use std::io::ErrorKind;
|
||||
#[cfg(target_os = "linux")]
|
||||
use std::os::linux::fs::MetadataExt;
|
||||
#[cfg(target_os = "macos")]
|
||||
use std::os::macos::fs::MetadataExt;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use filetime::FileTime;
|
||||
use crate::prelude::*;
|
||||
use hashbrown::HashSet;
|
||||
use rand::distributions::Distribution;
|
||||
use rand::{thread_rng, Rng};
|
||||
use touch::file as touch_file;
|
||||
// #[cfg(target_os = "windows")]
|
||||
// use std::os::windows::fs::MetadataExt;
|
||||
use crate::prelude::*;
|
||||
#[cfg(target_family = "unix")]
|
||||
use kanidm_utils_users::{get_current_gid, get_current_uid};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DistinctAlpha;
|
||||
|
@ -80,66 +65,6 @@ pub fn readable_password_from_random() -> String {
|
|||
)
|
||||
}
|
||||
|
||||
pub fn duration_from_epoch_now() -> Duration {
|
||||
#[allow(clippy::expect_used)]
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("invalid duration from epoch now")
|
||||
}
|
||||
|
||||
pub fn touch_file_or_quit(file_path: &str) {
|
||||
/*
|
||||
Attempt to touch the file file_path, will quit the application if it fails for any reason.
|
||||
|
||||
Will also create a new file if it doesn't already exist.
|
||||
*/
|
||||
if PathBuf::from(file_path).exists() {
|
||||
let t = FileTime::from_system_time(SystemTime::now());
|
||||
match filetime::set_file_times(file_path, t, t) {
|
||||
Ok(_) => debug!(
|
||||
"Successfully touched existing file {}, can continue",
|
||||
file_path
|
||||
),
|
||||
Err(e) => {
|
||||
match e.kind() {
|
||||
ErrorKind::PermissionDenied => {
|
||||
// we bail here because you won't be able to write them back...
|
||||
error!("Permission denied writing to {}, quitting.", file_path)
|
||||
}
|
||||
_ => {
|
||||
error!(
|
||||
"Failed to write to {} due to error: {:?} ... quitting.",
|
||||
file_path, e
|
||||
)
|
||||
}
|
||||
}
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match touch_file::write(file_path, "", false) {
|
||||
Ok(_) => debug!("Successfully touched new file {}", file_path),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to write to {} due to error: {:?} ... quitting.",
|
||||
file_path, e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
#[allow(dead_code)]
|
||||
pub fn uuid_from_now(sid: Sid) -> Uuid {
|
||||
let d = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap();
|
||||
uuid_from_duration(d, sid)
|
||||
}
|
||||
*/
|
||||
|
||||
impl Distribution<char> for DistinctAlpha {
|
||||
fn sample<R: Rng + ?Sized>(&self, rng: &mut R) -> char {
|
||||
const RANGE: u32 = 55;
|
||||
|
@ -156,40 +81,6 @@ impl Distribution<char> for DistinctAlpha {
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
/// Check a given file's metadata is read-only for the current user (true = read-only)
|
||||
pub fn file_permissions_readonly(meta: &Metadata) -> bool {
|
||||
// Who are we running as?
|
||||
let cuid = get_current_uid();
|
||||
let cgid = get_current_gid();
|
||||
|
||||
// Who owns the file?
|
||||
// Who is the group owner of the file?
|
||||
let f_gid = meta.st_gid();
|
||||
let f_uid = meta.st_uid();
|
||||
|
||||
let f_mode = meta.st_mode();
|
||||
|
||||
!(
|
||||
// If we are the owner, we have write perms as we can alter the DAC rights
|
||||
cuid == f_uid ||
|
||||
// If we are the group owner, check the mode bits do not have write.
|
||||
(cgid == f_gid && (f_mode & 0o0020) != 0) ||
|
||||
// Finally, check that everyone bits don't have write.
|
||||
((f_mode & 0o0002) != 0)
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(target_family = "unix"))]
|
||||
/// Check a given file's metadata is read-only for the current user (true = read-only) Stub function if you're building for windows!
|
||||
pub fn file_permissions_readonly(meta: &Metadata) -> bool {
|
||||
debug!(
|
||||
"Windows target asked to check metadata on {:?} returning false",
|
||||
meta
|
||||
);
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::prelude::*;
|
||||
|
|
|
@ -18,6 +18,7 @@ chrono = { workspace = true }
|
|||
cron = { workspace = true }
|
||||
kanidm_client = { workspace = true }
|
||||
kanidm_proto = { workspace = true }
|
||||
kanidm_lib_file_permissions = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt", "macros", "net"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
|
||||
|
@ -30,7 +31,7 @@ toml = { workspace = true }
|
|||
url = { workspace = true, features = ["serde"] }
|
||||
uuid = { workspace = true, features = ["serde"] }
|
||||
|
||||
# For file metadata, should this me moved out?
|
||||
# Currently only for attribute - should attribute be broken out?
|
||||
kanidmd_lib = { workspace = true }
|
||||
|
||||
[target.'cfg(target_family = "unix")'.dependencies]
|
||||
|
|
|
@ -54,7 +54,8 @@ use kanidm_proto::scim_v1::{
|
|||
MultiValueAttr, ScimEntry, ScimExternalMember, ScimSshPubKey, ScimSyncGroup, ScimSyncPerson,
|
||||
ScimSyncRequest, ScimSyncRetentionMode, ScimSyncState, ScimTotp,
|
||||
};
|
||||
use kanidmd_lib::utils::file_permissions_readonly;
|
||||
|
||||
use kanidm_lib_file_permissions::readonly as file_permissions_readonly;
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
|
||||
|
|
|
@ -18,6 +18,7 @@ chrono = { workspace = true }
|
|||
cron = { workspace = true }
|
||||
kanidm_client = { workspace = true }
|
||||
kanidm_proto = { workspace = true }
|
||||
kanidm_lib_file_permissions = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt", "macros", "net"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] }
|
||||
|
@ -30,7 +31,7 @@ toml = { workspace = true }
|
|||
url = { workspace = true, features = ["serde"] }
|
||||
uuid = { workspace = true, features = ["serde"] }
|
||||
|
||||
# For file metadata, should this me moved out?
|
||||
# Currently only for attribute - should attribute be broken out?
|
||||
kanidmd_lib = { workspace = true }
|
||||
|
||||
[target.'cfg(target_family = "unix")'.dependencies]
|
||||
|
|
|
@ -43,11 +43,11 @@ use tracing_subscriber::prelude::*;
|
|||
use tracing_subscriber::{fmt, EnvFilter};
|
||||
|
||||
use kanidm_client::KanidmClientBuilder;
|
||||
use kanidm_lib_file_permissions::readonly as file_permissions_readonly;
|
||||
use kanidm_proto::scim_v1::{
|
||||
MultiValueAttr, ScimEntry, ScimExternalMember, ScimSshPubKey, ScimSyncGroup, ScimSyncPerson,
|
||||
ScimSyncRequest, ScimSyncRetentionMode, ScimSyncState,
|
||||
};
|
||||
use kanidmd_lib::utils::file_permissions_readonly;
|
||||
|
||||
#[cfg(target_family = "unix")]
|
||||
use kanidm_utils_users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
|
||||
|
|
Loading…
Reference in a new issue