Compare commits

..

1 commit

Author SHA1 Message Date
phoenixbackups faadee0d7d
Merge 8ad70c6111 into a2eae53328 2025-04-01 21:28:23 +02:00
19 changed files with 701 additions and 942 deletions

View file

@ -53,6 +53,7 @@
- [Service Integration Examples](examples/readme.md)
- [Kubernetes Ingress](examples/kubernetes_ingress.md)
- [OAuth2 Examples](integrations/oauth2/examples.md)
- [Traefik](examples/traefik.md)
- [Replication](repl/readme.md)

View file

@ -1,4 +1,3 @@
version = "2"
bindaddress = "[::]:8443"
ldapbindaddress = "127.0.0.1:3636"

View file

@ -1,6 +1,3 @@
# The server configuration file version.
version = "2"
# The webserver bind address. Requires TLS certificates.
# If the port is set to 443 you may require the
# NET_BIND_SERVICE capability.

View file

@ -1,6 +1,3 @@
# The server configuration file version.
version = "2"
# The webserver bind address. Requires TLS certificates.
# If the port is set to 443 you may require the
# NET_BIND_SERVICE capability.

View file

@ -25,7 +25,7 @@ def recover_account(username: str) -> str:
"recover-account",
username,
"--config",
"./insecure_server.toml",
"../../examples/insecure_server.toml",
"--output",
"json",
]

View file

@ -44,7 +44,7 @@ fi
# defaults
KANIDM_CONFIG_FILE="./insecure_server.toml"
KANIDM_CONFIG_FILE="../../examples/insecure_server.toml"
KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')"
KANIDM_CA_PATH="/tmp/kanidm/ca.pem"
@ -83,7 +83,7 @@ if [ "${REMOVE_TEST_DB}" -eq 1 ]; then
rm /tmp/kanidm/kanidm.db || true
fi
export KANIDM_CONFIG="./insecure_server.toml"
export KANIDM_CONFIG="../../examples/insecure_server.toml"
IDM_ADMIN_USER="idm_admin@localhost"
echo "Resetting the idm_admin user..."

View file

@ -25,7 +25,7 @@ if [ ! -f "run_insecure_dev_server.sh" ]; then
exit 1
fi
export KANIDM_CONFIG="./insecure_server.toml"
export KANIDM_CONFIG="../../examples/insecure_server.toml"
mkdir -p /tmp/kanidm/client_ca
@ -48,7 +48,7 @@ fi
ATTEMPT=0
KANIDM_CONFIG_FILE="./insecure_server.toml"
KANIDM_CONFIG_FILE="../../examples/insecure_server.toml"
KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')"
KANIDM_CA_PATH="/tmp/kanidm/ca.pem"

View file

@ -191,7 +191,7 @@ impl QueryServerReadV1 {
pub async fn handle_online_backup(
&self,
msg: OnlineBackupEvent,
outpath: &Path,
outpath: &str,
versions: usize,
) -> Result<(), OperationError> {
trace!(eventid = ?msg.eventid, "Begin online backup event");
@ -200,12 +200,12 @@ impl QueryServerReadV1 {
#[allow(clippy::unwrap_used)]
let timestamp = now.format(&Rfc3339).unwrap();
let dest_file = outpath.join(format!("backup-{}.json", timestamp));
let dest_file = format!("{}/backup-{}.json", outpath, timestamp);
if dest_file.exists() {
if Path::new(&dest_file).exists() {
error!(
"Online backup file {} already exists, will not overwrite it.",
dest_file.display()
dest_file
);
return Err(OperationError::InvalidState);
}
@ -218,14 +218,10 @@ impl QueryServerReadV1 {
.get_be_txn()
.backup(&dest_file)
.map(|()| {
info!("Online backup created {} successfully", dest_file.display());
info!("Online backup created {} successfully", dest_file);
})
.map_err(|e| {
error!(
"Online backup failed to create {}: {:?}",
dest_file.display(),
e
);
error!("Online backup failed to create {}: {:?}", dest_file, e);
OperationError::InvalidState
})?;
}
@ -271,11 +267,7 @@ impl QueryServerReadV1 {
}
}
Err(e) => {
error!(
"Online backup cleanup error read dir {}: {}",
outpath.display(),
e
);
error!("Online backup cleanup error read dir {}: {}", outpath, e);
return Err(OperationError::InvalidState);
}
}

File diff suppressed because it is too large Load diff

View file

@ -112,19 +112,19 @@ impl IntervalActor {
if !op.exists() {
info!(
"Online backup output folder '{}' does not exist, trying to create it.",
outpath.display()
outpath
);
fs::create_dir_all(&outpath).map_err(|e| {
error!(
"Online backup failed to create output directory '{}': {}",
outpath.display(),
outpath.clone(),
e
)
})?;
}
if !op.is_dir() {
error!("Online backup output '{}' is not a directory or we are missing permissions to access it.", outpath.display());
error!("Online backup output '{}' is not a directory or we are missing permissions to access it.", outpath);
return Err(());
}
@ -148,7 +148,7 @@ impl IntervalActor {
if let Err(e) = server
.handle_online_backup(
OnlineBackupEvent::new(),
&outpath,
outpath.clone().as_str(),
versions,
)
.await

View file

@ -36,10 +36,9 @@ mod ldaps;
mod repl;
mod utils;
use crate::actors::{QueryServerReadV1, QueryServerWriteV1};
use crate::admin::AdminActor;
use crate::config::{Configuration, ServerRole};
use crate::interval::IntervalActor;
use std::fmt::{Display, Formatter};
use std::sync::Arc;
use crate::utils::touch_file_or_quit;
use compact_jwt::{JwsHs256Signer, JwsSigner};
use kanidm_proto::internal::OperationError;
@ -51,14 +50,17 @@ use kanidmd_lib::status::StatusActor;
use kanidmd_lib::value::CredentialType;
#[cfg(not(target_family = "windows"))]
use libc::umask;
use std::fmt::{Display, Formatter};
use std::path::Path;
use std::sync::Arc;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::Notify;
use tokio::task;
use crate::actors::{QueryServerReadV1, QueryServerWriteV1};
use crate::admin::AdminActor;
use crate::config::{Configuration, ServerRole};
use crate::interval::IntervalActor;
use tokio::sync::mpsc;
// === internal setup helpers
fn setup_backend(config: &Configuration, schema: &Schema) -> Result<Backend, OperationError> {
@ -78,7 +80,7 @@ fn setup_backend_vacuum(
let pool_size: u32 = config.threads as u32;
let cfg = BackendConfig::new(
config.db_path.as_deref(),
config.db_path.as_str(),
pool_size,
config.db_fs_type.unwrap_or_default(),
config.db_arc_size,
@ -333,7 +335,7 @@ pub fn dbscan_restore_quarantined_core(config: &Configuration, id: u64) {
};
}
pub fn backup_server_core(config: &Configuration, dst_path: &Path) {
pub fn backup_server_core(config: &Configuration, dst_path: &str) {
let schema = match Schema::new() {
Ok(s) => s,
Err(e) => {
@ -369,11 +371,8 @@ pub fn backup_server_core(config: &Configuration, dst_path: &Path) {
// Let the txn abort, even on success.
}
pub async fn restore_server_core(config: &Configuration, dst_path: &Path) {
// If it's an in memory database, we don't need to touch anything
if let Some(db_path) = config.db_path.as_ref() {
touch_file_or_quit(db_path);
}
pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
touch_file_or_quit(config.db_path.as_str());
// First, we provide the in-memory schema so that core attrs are indexed correctly.
let schema = match Schema::new() {
@ -1012,7 +1011,7 @@ pub async fn create_server_core(
let tls_accepter_reload_task_notify = tls_acceptor_reload_notify.clone();
let tls_config = config.tls_config.clone();
let ldap_configured = config.ldapbindaddress.is_some();
let ldap_configured = config.ldapaddress.is_some();
let (ldap_tls_acceptor_reload_tx, ldap_tls_acceptor_reload_rx) = mpsc::channel(1);
let (http_tls_acceptor_reload_tx, http_tls_acceptor_reload_rx) = mpsc::channel(1);
@ -1077,7 +1076,7 @@ pub async fn create_server_core(
};
// If we have been requested to init LDAP, configure it now.
let maybe_ldap_acceptor_handle = match &config.ldapbindaddress {
let maybe_ldap_acceptor_handle = match &config.ldapaddress {
Some(la) => {
let opt_ldap_ssl_acceptor = maybe_tls_acceptor.clone();

View file

@ -1,39 +1,32 @@
use filetime::FileTime;
use std::fs::File;
use std::io::ErrorKind;
use std::path::Path;
use std::path::PathBuf;
use std::time::SystemTime;
pub fn touch_file_or_quit<P: AsRef<Path>>(file_path: P) {
pub fn touch_file_or_quit(file_path: &str) {
/*
Attempt to touch the file file_path, will quit the application if it fails for any reason.
Will also create a new file if it doesn't already exist.
*/
let file_path: &Path = file_path.as_ref();
if file_path.exists() {
if PathBuf::from(file_path).exists() {
let t = FileTime::from_system_time(SystemTime::now());
match filetime::set_file_times(file_path, t, t) {
Ok(_) => debug!(
"Successfully touched existing file {}, can continue",
file_path.display()
file_path
),
Err(e) => {
match e.kind() {
ErrorKind::PermissionDenied => {
// we bail here because you won't be able to write them back...
error!(
"Permission denied writing to {}, quitting.",
file_path.display()
)
error!("Permission denied writing to {}, quitting.", file_path)
}
_ => {
error!(
"Failed to write to {} due to error: {:?} ... quitting.",
file_path.display(),
e
file_path, e
)
}
}
@ -42,12 +35,11 @@ pub fn touch_file_or_quit<P: AsRef<Path>>(file_path: P) {
}
} else {
match File::create(file_path) {
Ok(_) => debug!("Successfully touched new file {}", file_path.display()),
Ok(_) => debug!("Successfully touched new file {}", file_path),
Err(e) => {
error!(
"Failed to write to {} due to error: {:?} ... quitting.",
file_path.display(),
e
file_path, e
);
std::process::exit(1);
}

View file

@ -22,7 +22,7 @@ fi
mkdir -p "${KANI_TMP}"/client_ca
CONFIG_FILE=${CONFIG_FILE:="${SCRIPT_DIR}/insecure_server.toml"}
CONFIG_FILE=${CONFIG_FILE:="${SCRIPT_DIR}/../../examples/insecure_server.toml"}
if [ ! -f "${CONFIG_FILE}" ]; then
echo "Couldn't find configuration file at ${CONFIG_FILE}, please ensure you're running this script from its base directory (${SCRIPT_DIR})."

View file

@ -37,7 +37,7 @@ use kanidmd_core::admin::{
AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
};
use kanidmd_core::config::{CliConfig, Configuration, EnvironmentConfig, ServerConfigUntagged};
use kanidmd_core::config::{Configuration, ServerConfig};
use kanidmd_core::{
backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
@ -379,13 +379,17 @@ fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
}
// We have to do this because we can't use tracing until we've started the logging pipeline, and we can't start the logging pipeline until the tokio runtime's doing its thing.
async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
async fn start_daemon(
opt: KanidmdParser,
mut config: Configuration,
sconfig: ServerConfig,
) -> ExitCode {
// if we have a server config and it has an OTEL URL, then we'll start the logging pipeline now.
// TODO: only send to stderr when we're not in a TTY
let sub = match sketching::otel::start_logging_pipeline(
&config.otel_grpc_url,
config.log_level,
&sconfig.otel_grpc_url,
sconfig.log_level.unwrap_or_default(),
"kanidmd",
) {
Err(err) => {
@ -419,8 +423,8 @@ async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
return err;
};
if let Some(db_path) = config.db_path.as_ref() {
let db_pathbuf = db_path.to_path_buf();
if let Some(db_path) = sconfig.db_path.as_ref() {
let db_pathbuf = PathBuf::from(db_path.as_str());
// We can't check the db_path permissions because it may not exist yet!
if let Some(db_parent_path) = db_pathbuf.parent() {
if !db_parent_path.exists() {
@ -460,11 +464,33 @@ async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
}
}
config.update_db_path(db_path);
} else {
error!("No db_path set in configuration, server startup will FAIL!");
return ExitCode::FAILURE;
}
if let Some(origin) = sconfig.origin.clone() {
config.update_origin(&origin);
} else {
error!("No origin set in configuration, server startup will FAIL!");
return ExitCode::FAILURE;
}
if let Some(domain) = sconfig.domain.clone() {
config.update_domain(&domain);
} else {
error!("No domain set in configuration, server startup will FAIL!");
return ExitCode::FAILURE;
}
config.update_db_arc_size(sconfig.get_db_arc_size());
config.update_role(sconfig.role);
config.update_output_mode(opt.commands.commonopt().output_mode.to_owned().into());
config.update_trust_x_forward_for(sconfig.trust_x_forward_for);
config.update_admin_bind_path(&sconfig.adminbindpath);
config.update_replication_config(sconfig.repl_config.clone());
match &opt.commands {
// we aren't going to touch the DB so we can carry on
KanidmdOpt::ShowReplicationCertificate { .. }
@ -475,15 +501,19 @@ async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
_ => {
// Okay - Lets now create our lock and go.
#[allow(clippy::expect_used)]
let klock_path = match config.db_path.clone() {
Some(val) => val.with_extension("klock"),
None => std::env::temp_dir().join("kanidmd.klock"),
let klock_path = match sconfig.db_path.clone() {
Some(val) => format!("{}.klock", val),
None => std::env::temp_dir()
.join("kanidmd.klock")
.to_str()
.expect("Unable to create klock path, this is a critical error!")
.to_string(),
};
let flock = match File::create(&klock_path) {
Ok(flock) => flock,
Err(e) => {
error!("ERROR: Refusing to start - unable to create kanidmd exclusive lock at {} - {:?}", klock_path.display(), e);
error!("ERROR: Refusing to start - unable to create kanidmd exclusive lock at {} - {:?}", klock_path, e);
return ExitCode::FAILURE;
}
};
@ -491,7 +521,7 @@ async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
match flock.try_lock_exclusive() {
Ok(()) => debug!("Acquired kanidm exclusive lock"),
Err(e) => {
error!("ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {} - {:?}", klock_path.display(), e);
error!("ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {} - {:?}", klock_path, e);
error!("Is another kanidmd process running?");
return ExitCode::FAILURE;
}
@ -499,7 +529,7 @@ async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
}
}
kanidm_main(config, opt).await
kanidm_main(sconfig, config, opt).await
}
fn main() -> ExitCode {
@ -526,6 +556,10 @@ fn main() -> ExitCode {
return ExitCode::SUCCESS;
};
//we set up a list of these so we can set the log config THEN log out the errors.
let mut config_error: Vec<String> = Vec::new();
let mut config = Configuration::new();
if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
println!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
return ExitCode::FAILURE;
@ -547,56 +581,49 @@ fn main() -> ExitCode {
}
};
let maybe_sconfig = if let Some(config_path) = maybe_config_path {
match ServerConfigUntagged::new(config_path) {
Ok(c) => Some(c),
Err(err) => {
eprintln!("ERROR: Configuration Parse Failure: {:?}", err);
return ExitCode::FAILURE;
}
}
} else {
eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
None
};
let envconfig = match EnvironmentConfig::new() {
Ok(ec) => ec,
Err(err) => {
eprintln!("ERROR: Environment Configuration Parse Failure: {:?}", err);
let sconfig = match ServerConfig::new(maybe_config_path) {
Ok(c) => Some(c),
Err(e) => {
config_error.push(format!("Config Parse failure {:?}", e));
return ExitCode::FAILURE;
}
};
let cli_config = CliConfig {
output_mode: Some(opt.commands.commonopt().output_mode.to_owned().into()),
};
let is_server = matches!(&opt.commands, KanidmdOpt::Server(_));
let config = Configuration::build()
.add_env_config(envconfig)
.add_opt_toml_config(maybe_sconfig)
// We always set threads to 1 unless it's the main server.
.add_cli_config(cli_config)
.is_server_mode(is_server)
.finish();
let Some(config) = config else {
eprintln!(
"ERROR: Unable to build server configuration from provided configuration inputs."
);
return ExitCode::FAILURE;
};
// ===========================================================================
// Config ready
// Get information on the windows username
#[cfg(target_family = "windows")]
get_user_details_windows();
if !config_error.is_empty() {
println!("There were errors on startup, which prevent the server from starting:");
for e in config_error {
println!(" - {}", e);
}
return ExitCode::FAILURE;
}
let sconfig = match sconfig {
Some(val) => val,
None => {
println!("Somehow you got an empty ServerConfig after error checking? Cannot start!");
return ExitCode::FAILURE;
}
};
// ===========================================================================
// Config ready
// We always set threads to 1 unless it's the main server.
if matches!(&opt.commands, KanidmdOpt::Server(_)) {
// If not updated, will default to maximum
if let Some(threads) = sconfig.thread_count {
config.update_threads_count(threads);
}
} else {
config.update_threads_count(1);
};
// Start the runtime
let maybe_rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(config.threads)
.enable_all()
@ -616,12 +643,16 @@ fn main() -> ExitCode {
}
};
rt.block_on(start_daemon(opt, config))
rt.block_on(start_daemon(opt, config, sconfig))
}
/// Build and execute the main server. The ServerConfig are the configuration options
/// that we are processing into the config for the main server.
async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
async fn kanidm_main(
sconfig: ServerConfig,
mut config: Configuration,
opt: KanidmdParser,
) -> ExitCode {
match &opt.commands {
KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => {
let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_));
@ -631,90 +662,88 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
info!("Running in server mode ...");
};
// Verify the TLs configs.
if let Some(tls_config) = config.tls_config.as_ref() {
{
let i_meta = match metadata(&tls_config.chain) {
Ok(m) => m,
Err(e) => {
error!(
"Unable to read metadata for TLS chain file '{}' - {:?}",
tls_config.chain.display(),
e
);
let diag =
kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
info!(%diag);
return ExitCode::FAILURE;
}
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
}
}
// configuration options that only relate to server mode
config.update_config_for_server_mode(&sconfig);
{
let i_meta = match metadata(&tls_config.key) {
Ok(m) => m,
Err(e) => {
error!(
"Unable to read metadata for TLS key file '{}' - {:?}",
tls_config.key.display(),
e
);
let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
info!(%diag);
return ExitCode::FAILURE;
}
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
}
}
if let Some(ca_dir) = tls_config.client_ca.as_ref() {
// check that the TLS client CA config option is what we expect
let ca_dir_path = PathBuf::from(&ca_dir);
if !ca_dir_path.exists() {
if let Some(i_str) = &(sconfig.tls_chain) {
let i_path = PathBuf::from(i_str.as_str());
let i_meta = match metadata(&i_path) {
Ok(m) => m,
Err(e) => {
error!(
"TLS CA folder {} does not exist, server startup will FAIL!",
ca_dir.display()
"Unable to read metadata for TLS chain file '{}' - {:?}",
&i_path.to_str().unwrap_or("invalid file path"),
e
);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
let diag = kanidm_lib_file_permissions::diagnose_path(&i_path);
info!(%diag);
}
let i_meta = match metadata(&ca_dir_path) {
Ok(m) => m,
Err(e) => {
error!(
"Unable to read metadata for '{}' - {:?}",
ca_dir.display(),
e
);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
info!(%diag);
return ExitCode::FAILURE;
}
};
if !i_meta.is_dir() {
error!(
"ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
ca_dir.display()
);
return ExitCode::FAILURE;
}
if kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str);
}
}
if let Some(i_str) = &(sconfig.tls_key) {
let i_path = PathBuf::from(i_str.as_str());
let i_meta = match metadata(&i_path) {
Ok(m) => m,
Err(e) => {
error!(
"Unable to read metadata for TLS key file '{}' - {:?}",
&i_path.to_str().unwrap_or("invalid file path"),
e
);
let diag = kanidm_lib_file_permissions::diagnose_path(&i_path);
info!(%diag);
return ExitCode::FAILURE;
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str);
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", i_str);
}
}
if let Some(ca_dir) = &(sconfig.tls_client_ca) {
// check that the TLS client CA config option is what we expect
let ca_dir_path = PathBuf::from(&ca_dir);
if !ca_dir_path.exists() {
error!(
"TLS CA folder {} does not exist, server startup will FAIL!",
ca_dir
);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
info!(%diag);
}
let i_meta = match metadata(&ca_dir_path) {
Ok(m) => m,
Err(e) => {
error!("Unable to read metadata for '{}' - {:?}", ca_dir, e);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
info!(%diag);
return ExitCode::FAILURE;
}
};
if !i_meta.is_dir() {
error!(
"ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
ca_dir
);
return ExitCode::FAILURE;
}
if kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir);
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir);
}
}
@ -724,6 +753,14 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
#[cfg(target_os = "linux")]
{
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
// Undocumented systemd feature - all messages should have a monotonic usec sent
// with them. In some cases like "reloading" messages, it is undocumented but
// failure to send this message causes the reload to fail.
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
let _ = sd_notify::notify(true, &[monotonic_usec]);
} else {
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
let _ = sd_notify::notify(
true,
&[sd_notify::NotifyState::Status("Started Kanidm 🦀")],
@ -737,80 +774,86 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
{
let mut listener = sctx.subscribe();
tokio::select! {
Ok(()) = tokio::signal::ctrl_c() => {
break
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::terminate();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
break
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::alarm();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::hangup();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Reload TLS certificates
// systemd has a special reload handler for this.
#[cfg(target_os = "linux")]
{
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading, monotonic_usec]);
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reloading ...")]);
} else {
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
}
Ok(()) = tokio::signal::ctrl_c() => {
break
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::terminate();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
break
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::alarm();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::hangup();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Reload TLS certificates
// systemd has a special reload handler for this.
#[cfg(target_os = "linux")]
{
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]);
// CRITICAL - if you do not send a monotonic usec message after a reloading
// message, your service WILL BE KILLED.
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
let _ =
sd_notify::notify(true, &[monotonic_usec]);
} else {
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reloading ...")]);
}
sctx.tls_acceptor_reload().await;
sctx.tls_acceptor_reload().await;
// Systemd freaks out if you send the ready state too fast after the
// reload state and can kill Kanidmd as a result.
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
// Systemd freaks out if you send the ready state too fast after the
// reload state and can kill Kanidmd as a result.
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
#[cfg(target_os = "linux")]
{
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready, monotonic_usec]);
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reload Success")]);
} else {
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
}
#[cfg(target_os = "linux")]
{
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
let _ =
sd_notify::notify(true, &[monotonic_usec]);
} else {
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reload Success")]);
}
info!("Reload complete");
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined1();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined2();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
// we got a message on thr broadcast from somewhere else
Ok(msg) = async move {
listener.recv().await
} => {
debug!("Main loop received message: {:?}", msg);
break
}
}
info!("Reload complete");
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined1();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined2();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
// we got a message on thr broadcast from somewhere else
Ok(msg) = async move {
listener.recv().await
} => {
debug!("Main loop received message: {:?}", msg);
break
}
}
}
#[cfg(target_family = "windows")]
{
@ -837,19 +880,34 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
}
KanidmdOpt::CertGenerate(_sopt) => {
info!("Running in certificate generate mode ...");
config.update_config_for_server_mode(&sconfig);
cert_generate_core(&config);
}
KanidmdOpt::Database {
commands: DbCommands::Backup(bopt),
} => {
info!("Running in backup mode ...");
backup_server_core(&config, &bopt.path);
let p = match bopt.path.to_str() {
Some(p) => p,
None => {
error!("Invalid backup path");
return ExitCode::FAILURE;
}
};
backup_server_core(&config, p);
}
KanidmdOpt::Database {
commands: DbCommands::Restore(ropt),
} => {
info!("Running in restore mode ...");
restore_server_core(&config, &ropt.path).await;
let p = match ropt.path.to_str() {
Some(p) => p,
None => {
error!("Invalid restore path");
return ExitCode::FAILURE;
}
};
restore_server_core(&config, p).await;
}
KanidmdOpt::Database {
commands: DbCommands::Verify(_vopt),
@ -1030,6 +1088,8 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
vacuum_server_core(&config);
}
KanidmdOpt::HealthCheck(sopt) => {
config.update_config_for_server_mode(&sconfig);
debug!("{sopt:?}");
let healthcheck_url = match &sopt.check_origin {
@ -1050,15 +1110,12 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
.danger_accept_invalid_hostnames(!sopt.verify_tls)
.https_only(true);
client = match &config.tls_config {
client = match &sconfig.tls_chain {
None => client,
Some(tls_config) => {
debug!(
"Trying to load {} to build a CA cert path",
tls_config.chain.display()
);
Some(ca_cert) => {
debug!("Trying to load {} to build a CA cert path", ca_cert);
// if the ca_cert file exists, then we'll use it
let ca_cert_path = tls_config.chain.clone();
let ca_cert_path = PathBuf::from(ca_cert);
match ca_cert_path.exists() {
true => {
let mut cert_buf = Vec::new();
@ -1091,10 +1148,7 @@ async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
client
}
false => {
warn!(
"Couldn't find ca cert {} but carrying on...",
tls_config.chain.display()
);
warn!("Couldn't find ca cert {} but carrying on...", ca_cert);
client
}
}

View file

@ -1,21 +1,27 @@
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::convert::{TryFrom, TryInto};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use super::keystorage::{KeyHandle, KeyHandleId};
// use crate::valueset;
use hashbrown::HashMap;
use idlset::v2::IDLBitRange;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use rusqlite::vtab::array::Array;
use rusqlite::{Connection, OpenFlags, OptionalExtension};
use uuid::Uuid;
use crate::be::dbentry::DbIdentSpn;
use crate::be::dbvalue::DbCidV1;
use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope};
use crate::entry::{Entry, EntryCommitted, EntrySealed};
use crate::prelude::*;
use crate::value::{IndexType, Value};
use hashbrown::HashMap;
use idlset::v2::IDLBitRange;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use rusqlite::vtab::array::Array;
use rusqlite::{Connection, OpenFlags, OptionalExtension};
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::convert::{TryFrom, TryInto};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use uuid::Uuid;
// use uuid::Uuid;
const DBV_ID2ENTRY: &str = "id2entry";
const DBV_INDEXV: &str = "indexv";
@ -1706,7 +1712,7 @@ impl IdlSqliteWriteTransaction {
impl IdlSqlite {
pub fn new(cfg: &BackendConfig, vacuum: bool) -> Result<Self, OperationError> {
if cfg.path.as_os_str().is_empty() {
if cfg.path.is_empty() {
debug_assert_eq!(cfg.pool_size, 1);
}
// If provided, set the page size to match the tuning we want. By default we use 4096. The VACUUM
@ -1728,7 +1734,8 @@ impl IdlSqlite {
// Initial setup routines.
{
let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
let vconn =
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
vconn
.execute_batch(
@ -1757,7 +1764,8 @@ impl IdlSqlite {
);
*/
let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
let vconn =
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
vconn
.execute_batch("PRAGMA wal_checkpoint(TRUNCATE);")
@ -1778,7 +1786,8 @@ impl IdlSqlite {
OperationError::SqliteError
})?;
let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
let vconn =
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
vconn
.pragma_update(None, "page_size", cfg.fstype as u32)
@ -1812,7 +1821,7 @@ impl IdlSqlite {
.map(|i| {
trace!("Opening Connection {}", i);
let conn =
Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error);
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error);
match conn {
Ok(conn) => {
// We need to set the cachesize at this point as well.

View file

@ -4,6 +4,20 @@
//! is to persist content safely to disk, load that content, and execute queries
//! utilising indexes in the most effective way possible.
use std::collections::BTreeMap;
use std::fs;
use std::ops::DerefMut;
use std::sync::Arc;
use std::time::Duration;
use concread::cowcell::*;
use hashbrown::{HashMap as Map, HashSet};
use idlset::v2::IDLBitRange;
use idlset::AndNot;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use tracing::{trace, trace_span};
use uuid::Uuid;
use crate::be::dbentry::{DbBackup, DbEntry};
use crate::be::dbrepl::DbReplMeta;
use crate::entry::Entry;
@ -17,19 +31,6 @@ use crate::repl::ruv::{
};
use crate::utils::trigraph_iter;
use crate::value::{IndexType, Value};
use concread::cowcell::*;
use hashbrown::{HashMap as Map, HashSet};
use idlset::v2::IDLBitRange;
use idlset::AndNot;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use std::collections::BTreeMap;
use std::fs;
use std::ops::DerefMut;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use tracing::{trace, trace_span};
use uuid::Uuid;
pub(crate) mod dbentry;
pub(crate) mod dbrepl;
@ -131,7 +132,7 @@ impl IdxMeta {
#[derive(Clone)]
pub struct BackendConfig {
path: PathBuf,
path: String,
pool_size: u32,
db_name: &'static str,
fstype: FsType,
@ -140,16 +141,10 @@ pub struct BackendConfig {
}
impl BackendConfig {
pub fn new(
path: Option<&Path>,
pool_size: u32,
fstype: FsType,
arcsize: Option<usize>,
) -> Self {
pub fn new(path: &str, pool_size: u32, fstype: FsType, arcsize: Option<usize>) -> Self {
BackendConfig {
pool_size,
// This means if path is None, that "" implies an sqlite in memory/ram only database.
path: path.unwrap_or_else(|| Path::new("")).to_path_buf(),
path: path.to_string(),
db_name: "main",
fstype,
arcsize,
@ -159,7 +154,7 @@ impl BackendConfig {
pub(crate) fn new_test(db_name: &'static str) -> Self {
BackendConfig {
pool_size: 1,
path: PathBuf::from(""),
path: "".to_string(),
db_name,
fstype: FsType::Generic,
arcsize: Some(2048),
@ -941,7 +936,7 @@ pub trait BackendTransaction {
self.get_ruv().verify(&entries, results);
}
fn backup(&mut self, dst_path: &Path) -> Result<(), OperationError> {
fn backup(&mut self, dst_path: &str) -> Result<(), OperationError> {
let repl_meta = self.get_ruv().to_db_backup_ruv();
// load all entries into RAM, may need to change this later
@ -1813,7 +1808,7 @@ impl<'a> BackendWriteTransaction<'a> {
Ok(slope)
}
pub fn restore(&mut self, src_path: &Path) -> Result<(), OperationError> {
pub fn restore(&mut self, src_path: &str) -> Result<(), OperationError> {
let serialized_string = fs::read_to_string(src_path).map_err(|e| {
admin_error!("fs::read_to_string {:?}", e);
OperationError::FsError
@ -2126,7 +2121,7 @@ impl Backend {
debug!(db_tickets = ?cfg.pool_size, profile = %env!("KANIDM_PROFILE_NAME"), cpu_flags = %env!("KANIDM_CPU_FLAGS"));
// If in memory, reduce pool to 1
if cfg.path.as_os_str().is_empty() {
if cfg.path.is_empty() {
cfg.pool_size = 1;
}
@ -2212,6 +2207,13 @@ impl Backend {
#[cfg(test)]
mod tests {
use std::fs;
use std::iter::FromIterator;
use std::sync::Arc;
use std::time::Duration;
use idlset::v2::IDLBitRange;
use super::super::entry::{Entry, EntryInit, EntryNew};
use super::Limits;
use super::{
@ -2221,12 +2223,6 @@ mod tests {
use crate::prelude::*;
use crate::repl::cid::Cid;
use crate::value::{IndexType, PartialValue, Value};
use idlset::v2::IDLBitRange;
use std::fs;
use std::iter::FromIterator;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
lazy_static! {
static ref CID_ZERO: Cid = Cid::new_zero();
@ -2601,9 +2597,11 @@ mod tests {
#[test]
fn test_be_backup_restore() {
let db_backup_file_name =
Path::new(option_env!("OUT_DIR").unwrap_or("/tmp")).join(".backup_test.json");
eprintln!(" ⚠️ {}", db_backup_file_name.display());
let db_backup_file_name = format!(
"{}/.backup_test.json",
option_env!("OUT_DIR").unwrap_or("/tmp")
);
eprintln!(" ⚠️ {db_backup_file_name}");
run_test!(|be: &mut BackendWriteTransaction| {
// Important! Need db metadata setup!
be.reset_db_s_uuid().unwrap();
@ -2658,9 +2656,11 @@ mod tests {
#[test]
fn test_be_backup_restore_tampered() {
let db_backup_file_name =
Path::new(option_env!("OUT_DIR").unwrap_or("/tmp")).join(".backup2_test.json");
eprintln!(" ⚠️ {}", db_backup_file_name.display());
let db_backup_file_name = format!(
"{}/.backup2_test.json",
option_env!("OUT_DIR").unwrap_or("/tmp")
);
eprintln!(" ⚠️ {db_backup_file_name}");
run_test!(|be: &mut BackendWriteTransaction| {
// Important! Need db metadata setup!
be.reset_db_s_uuid().unwrap();

View file

@ -63,7 +63,7 @@ fn parse_attributes(
"ldap" => {
flags.ldap = true;
field_modifications.extend(quote! {
ldapbindaddress: Some("on".to_string()),})
ldapaddress: Some("on".to_string()),})
}
_ => {
let field_name = p.value().left.to_token_stream(); // here we can use to_token_stream as we know we're iterating over ExprAssigns

View file

@ -84,9 +84,9 @@ pub async fn setup_async_test(mut config: Configuration) -> AsyncTestEnvironment
let addr = format!("http://localhost:{}", port);
let ldap_url = if config.ldapbindaddress.is_some() {
let ldap_url = if config.ldapaddress.is_some() {
let ldapport = port_loop();
config.ldapbindaddress = Some(format!("127.0.0.1:{}", ldapport));
config.ldapaddress = Some(format!("127.0.0.1:{}", ldapport));
Url::parse(&format!("ldap://127.0.0.1:{}", ldapport))
.inspect_err(|err| error!(?err, "ldap address setup"))
.ok()

View file

@ -70,7 +70,7 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) {
});
// Setup the config ...
let mut config = Configuration::new_for_test();
let mut config = Configuration::new();
config.address = format!("127.0.0.1:{}", port);
config.integration_test_config = Some(int_config);
config.role = ServerRole::WriteReplicaNoUI;