Compare commits

...

5 commits

Author SHA1 Message Date
phoenixbackups a807ce83c0
Merge 8ad70c6111 into ad012cd6fd 2025-04-05 03:31:43 +02:00
Arian van Putten ad012cd6fd
implement notify-reload protocol () 2025-04-04 09:24:14 +10:00
Firstyear 82a883089f
Allow versioning of server configs ()
This allows our server configuration to be versioned, in preparation
for a change related to the proxy protocol additions.
2025-04-02 02:44:19 +00:00
phoenixbackups 8ad70c6111
Merge pull request from phoenixbackups/phoenixbackups-patch-1
Create bookstack.md
2024-03-18 18:08:41 -04:00
phoenixbackups ed342e562d
Create bookstack.md
Add bookstack config example
2024-03-18 18:07:33 -04:00
20 changed files with 974 additions and 673 deletions

View file

@ -53,7 +53,6 @@
- [Service Integration Examples](examples/readme.md) - [Service Integration Examples](examples/readme.md)
- [Kubernetes Ingress](examples/kubernetes_ingress.md) - [Kubernetes Ingress](examples/kubernetes_ingress.md)
- [OAuth2 Examples](integrations/oauth2/examples.md)
- [Traefik](examples/traefik.md) - [Traefik](examples/traefik.md)
- [Replication](repl/readme.md) - [Replication](repl/readme.md)

60
examples/bookstack.md Normal file
View file

@ -0,0 +1,60 @@
# Bookstack (non-docker version)
## On Kanidm
### 1. Create the bookstack resource server
```
kanidm system oauth2 create bookstack "Bookstack" https://yourbookstack.example.com
```
### 2. Create the appropriate group(s)
```
kanidm group create bookstack-users --name idm_admin
```
### 3. Add the appropriate users to the group
```
kanidm group add-members bookstack-users user.name
```
### 4. Add the scopes:
```
kanidm system ouath2 update-scope-map bookstack openid profile email keys
```
### 5. Get the client secret:
```
kanidm system oauth2 show-basic-secret bookstack
```
Copy the value that is returned.
### 6. Disable PKCE / Enable Legacy crypto
```
kanidm system oauth2 warning-insecure-client-disable-pkce bookstack
kanidm system oauth2 warning-enable-legacy-crypto
```
## On Bookstack server
### 1. Add the following to the .env file at the bottom
```
#OIDC
AUTH_AUTO_INITIATE=false
OIDC_NAME=Kanidm
OIDC_DISPLAY_NAME_CLAIMS=openid
OIDC_CLIENT_ID=bookstack
OIDC_CLIENT_SECRET=<secret from step 5>
OIDC_ISSUER=https://idm.example.com:8443/oauth2/openid/bookstack
OIDC_END_SESSION_ENDPOINT=false
OIDC_ISSUER_DISCOVER=true
OIDC_DUMP_USER_DETAILS=false
OIDC_EXTERNAL_ID_CLAIM=openid
```
### 2. Change the AUTH_METHOD to oidc in the .env file
```
AUTH_METHOD=oidc
```
### 3. Open the `app/Access/Oidc/OidcService.php` file with your favorite editor.
### 4. Go to line 214 and make the following changes:
```
return [
'external_id' => $token->getClaim('sub'),
'email' => $token->getClaim('email'),
'name' => $token->getClaim('name'),
'groups' => $this->getUserGroups($token),
];
```
Open your bookstack URL and click the Signin with Kanidm button.

View file

@ -1,3 +1,6 @@
# The server configuration file version.
version = "2"
# The webserver bind address. Requires TLS certificates. # The webserver bind address. Requires TLS certificates.
# If the port is set to 443 you may require the # If the port is set to 443 you may require the
# NET_BIND_SERVICE capability. # NET_BIND_SERVICE capability.

View file

@ -1,3 +1,6 @@
# The server configuration file version.
version = "2"
# The webserver bind address. Requires TLS certificates. # The webserver bind address. Requires TLS certificates.
# If the port is set to 443 you may require the # If the port is set to 443 you may require the
# NET_BIND_SERVICE capability. # NET_BIND_SERVICE capability.

View file

@ -25,7 +25,7 @@ def recover_account(username: str) -> str:
"recover-account", "recover-account",
username, username,
"--config", "--config",
"../../examples/insecure_server.toml", "./insecure_server.toml",
"--output", "--output",
"json", "json",
] ]

View file

@ -44,7 +44,7 @@ fi
# defaults # defaults
KANIDM_CONFIG_FILE="../../examples/insecure_server.toml" KANIDM_CONFIG_FILE="./insecure_server.toml"
KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')" KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')"
KANIDM_CA_PATH="/tmp/kanidm/ca.pem" KANIDM_CA_PATH="/tmp/kanidm/ca.pem"
@ -83,7 +83,7 @@ if [ "${REMOVE_TEST_DB}" -eq 1 ]; then
rm /tmp/kanidm/kanidm.db || true rm /tmp/kanidm/kanidm.db || true
fi fi
export KANIDM_CONFIG="../../examples/insecure_server.toml" export KANIDM_CONFIG="./insecure_server.toml"
IDM_ADMIN_USER="idm_admin@localhost" IDM_ADMIN_USER="idm_admin@localhost"
echo "Resetting the idm_admin user..." echo "Resetting the idm_admin user..."

View file

@ -25,7 +25,7 @@ if [ ! -f "run_insecure_dev_server.sh" ]; then
exit 1 exit 1
fi fi
export KANIDM_CONFIG="../../examples/insecure_server.toml" export KANIDM_CONFIG="./insecure_server.toml"
mkdir -p /tmp/kanidm/client_ca mkdir -p /tmp/kanidm/client_ca
@ -48,7 +48,7 @@ fi
ATTEMPT=0 ATTEMPT=0
KANIDM_CONFIG_FILE="../../examples/insecure_server.toml" KANIDM_CONFIG_FILE="./insecure_server.toml"
KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')" KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')"
KANIDM_CA_PATH="/tmp/kanidm/ca.pem" KANIDM_CA_PATH="/tmp/kanidm/ca.pem"

View file

@ -191,7 +191,7 @@ impl QueryServerReadV1 {
pub async fn handle_online_backup( pub async fn handle_online_backup(
&self, &self,
msg: OnlineBackupEvent, msg: OnlineBackupEvent,
outpath: &str, outpath: &Path,
versions: usize, versions: usize,
) -> Result<(), OperationError> { ) -> Result<(), OperationError> {
trace!(eventid = ?msg.eventid, "Begin online backup event"); trace!(eventid = ?msg.eventid, "Begin online backup event");
@ -200,12 +200,12 @@ impl QueryServerReadV1 {
#[allow(clippy::unwrap_used)] #[allow(clippy::unwrap_used)]
let timestamp = now.format(&Rfc3339).unwrap(); let timestamp = now.format(&Rfc3339).unwrap();
let dest_file = format!("{}/backup-{}.json", outpath, timestamp); let dest_file = outpath.join(format!("backup-{}.json", timestamp));
if Path::new(&dest_file).exists() { if dest_file.exists() {
error!( error!(
"Online backup file {} already exists, will not overwrite it.", "Online backup file {} already exists, will not overwrite it.",
dest_file dest_file.display()
); );
return Err(OperationError::InvalidState); return Err(OperationError::InvalidState);
} }
@ -218,10 +218,14 @@ impl QueryServerReadV1 {
.get_be_txn() .get_be_txn()
.backup(&dest_file) .backup(&dest_file)
.map(|()| { .map(|()| {
info!("Online backup created {} successfully", dest_file); info!("Online backup created {} successfully", dest_file.display());
}) })
.map_err(|e| { .map_err(|e| {
error!("Online backup failed to create {}: {:?}", dest_file, e); error!(
"Online backup failed to create {}: {:?}",
dest_file.display(),
e
);
OperationError::InvalidState OperationError::InvalidState
})?; })?;
} }
@ -267,7 +271,11 @@ impl QueryServerReadV1 {
} }
} }
Err(e) => { Err(e) => {
error!("Online backup cleanup error read dir {}: {}", outpath, e); error!(
"Online backup cleanup error read dir {}: {}",
outpath.display(),
e
);
return Err(OperationError::InvalidState); return Err(OperationError::InvalidState);
} }
} }

File diff suppressed because it is too large Load diff

View file

@ -112,19 +112,19 @@ impl IntervalActor {
if !op.exists() { if !op.exists() {
info!( info!(
"Online backup output folder '{}' does not exist, trying to create it.", "Online backup output folder '{}' does not exist, trying to create it.",
outpath outpath.display()
); );
fs::create_dir_all(&outpath).map_err(|e| { fs::create_dir_all(&outpath).map_err(|e| {
error!( error!(
"Online backup failed to create output directory '{}': {}", "Online backup failed to create output directory '{}': {}",
outpath.clone(), outpath.display(),
e e
) )
})?; })?;
} }
if !op.is_dir() { if !op.is_dir() {
error!("Online backup output '{}' is not a directory or we are missing permissions to access it.", outpath); error!("Online backup output '{}' is not a directory or we are missing permissions to access it.", outpath.display());
return Err(()); return Err(());
} }
@ -148,7 +148,7 @@ impl IntervalActor {
if let Err(e) = server if let Err(e) = server
.handle_online_backup( .handle_online_backup(
OnlineBackupEvent::new(), OnlineBackupEvent::new(),
outpath.clone().as_str(), &outpath,
versions, versions,
) )
.await .await

View file

@ -36,9 +36,10 @@ mod ldaps;
mod repl; mod repl;
mod utils; mod utils;
use std::fmt::{Display, Formatter}; use crate::actors::{QueryServerReadV1, QueryServerWriteV1};
use std::sync::Arc; use crate::admin::AdminActor;
use crate::config::{Configuration, ServerRole};
use crate::interval::IntervalActor;
use crate::utils::touch_file_or_quit; use crate::utils::touch_file_or_quit;
use compact_jwt::{JwsHs256Signer, JwsSigner}; use compact_jwt::{JwsHs256Signer, JwsSigner};
use kanidm_proto::internal::OperationError; use kanidm_proto::internal::OperationError;
@ -50,17 +51,14 @@ use kanidmd_lib::status::StatusActor;
use kanidmd_lib::value::CredentialType; use kanidmd_lib::value::CredentialType;
#[cfg(not(target_family = "windows"))] #[cfg(not(target_family = "windows"))]
use libc::umask; use libc::umask;
use std::fmt::{Display, Formatter};
use std::path::Path;
use std::sync::Arc;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::Notify; use tokio::sync::Notify;
use tokio::task; use tokio::task;
use crate::actors::{QueryServerReadV1, QueryServerWriteV1};
use crate::admin::AdminActor;
use crate::config::{Configuration, ServerRole};
use crate::interval::IntervalActor;
use tokio::sync::mpsc;
// === internal setup helpers // === internal setup helpers
fn setup_backend(config: &Configuration, schema: &Schema) -> Result<Backend, OperationError> { fn setup_backend(config: &Configuration, schema: &Schema) -> Result<Backend, OperationError> {
@ -80,7 +78,7 @@ fn setup_backend_vacuum(
let pool_size: u32 = config.threads as u32; let pool_size: u32 = config.threads as u32;
let cfg = BackendConfig::new( let cfg = BackendConfig::new(
config.db_path.as_str(), config.db_path.as_deref(),
pool_size, pool_size,
config.db_fs_type.unwrap_or_default(), config.db_fs_type.unwrap_or_default(),
config.db_arc_size, config.db_arc_size,
@ -335,7 +333,7 @@ pub fn dbscan_restore_quarantined_core(config: &Configuration, id: u64) {
}; };
} }
pub fn backup_server_core(config: &Configuration, dst_path: &str) { pub fn backup_server_core(config: &Configuration, dst_path: &Path) {
let schema = match Schema::new() { let schema = match Schema::new() {
Ok(s) => s, Ok(s) => s,
Err(e) => { Err(e) => {
@ -371,8 +369,11 @@ pub fn backup_server_core(config: &Configuration, dst_path: &str) {
// Let the txn abort, even on success. // Let the txn abort, even on success.
} }
pub async fn restore_server_core(config: &Configuration, dst_path: &str) { pub async fn restore_server_core(config: &Configuration, dst_path: &Path) {
touch_file_or_quit(config.db_path.as_str()); // If it's an in memory database, we don't need to touch anything
if let Some(db_path) = config.db_path.as_ref() {
touch_file_or_quit(db_path);
}
// First, we provide the in-memory schema so that core attrs are indexed correctly. // First, we provide the in-memory schema so that core attrs are indexed correctly.
let schema = match Schema::new() { let schema = match Schema::new() {
@ -1011,7 +1012,7 @@ pub async fn create_server_core(
let tls_accepter_reload_task_notify = tls_acceptor_reload_notify.clone(); let tls_accepter_reload_task_notify = tls_acceptor_reload_notify.clone();
let tls_config = config.tls_config.clone(); let tls_config = config.tls_config.clone();
let ldap_configured = config.ldapaddress.is_some(); let ldap_configured = config.ldapbindaddress.is_some();
let (ldap_tls_acceptor_reload_tx, ldap_tls_acceptor_reload_rx) = mpsc::channel(1); let (ldap_tls_acceptor_reload_tx, ldap_tls_acceptor_reload_rx) = mpsc::channel(1);
let (http_tls_acceptor_reload_tx, http_tls_acceptor_reload_rx) = mpsc::channel(1); let (http_tls_acceptor_reload_tx, http_tls_acceptor_reload_rx) = mpsc::channel(1);
@ -1076,7 +1077,7 @@ pub async fn create_server_core(
}; };
// If we have been requested to init LDAP, configure it now. // If we have been requested to init LDAP, configure it now.
let maybe_ldap_acceptor_handle = match &config.ldapaddress { let maybe_ldap_acceptor_handle = match &config.ldapbindaddress {
Some(la) => { Some(la) => {
let opt_ldap_ssl_acceptor = maybe_tls_acceptor.clone(); let opt_ldap_ssl_acceptor = maybe_tls_acceptor.clone();

View file

@ -1,32 +1,39 @@
use filetime::FileTime; use filetime::FileTime;
use std::fs::File; use std::fs::File;
use std::io::ErrorKind; use std::io::ErrorKind;
use std::path::PathBuf; use std::path::Path;
use std::time::SystemTime; use std::time::SystemTime;
pub fn touch_file_or_quit(file_path: &str) { pub fn touch_file_or_quit<P: AsRef<Path>>(file_path: P) {
/* /*
Attempt to touch the file file_path, will quit the application if it fails for any reason. Attempt to touch the file file_path, will quit the application if it fails for any reason.
Will also create a new file if it doesn't already exist. Will also create a new file if it doesn't already exist.
*/ */
if PathBuf::from(file_path).exists() {
let file_path: &Path = file_path.as_ref();
if file_path.exists() {
let t = FileTime::from_system_time(SystemTime::now()); let t = FileTime::from_system_time(SystemTime::now());
match filetime::set_file_times(file_path, t, t) { match filetime::set_file_times(file_path, t, t) {
Ok(_) => debug!( Ok(_) => debug!(
"Successfully touched existing file {}, can continue", "Successfully touched existing file {}, can continue",
file_path file_path.display()
), ),
Err(e) => { Err(e) => {
match e.kind() { match e.kind() {
ErrorKind::PermissionDenied => { ErrorKind::PermissionDenied => {
// we bail here because you won't be able to write them back... // we bail here because you won't be able to write them back...
error!("Permission denied writing to {}, quitting.", file_path) error!(
"Permission denied writing to {}, quitting.",
file_path.display()
)
} }
_ => { _ => {
error!( error!(
"Failed to write to {} due to error: {:?} ... quitting.", "Failed to write to {} due to error: {:?} ... quitting.",
file_path, e file_path.display(),
e
) )
} }
} }
@ -35,11 +42,12 @@ pub fn touch_file_or_quit(file_path: &str) {
} }
} else { } else {
match File::create(file_path) { match File::create(file_path) {
Ok(_) => debug!("Successfully touched new file {}", file_path), Ok(_) => debug!("Successfully touched new file {}", file_path.display()),
Err(e) => { Err(e) => {
error!( error!(
"Failed to write to {} due to error: {:?} ... quitting.", "Failed to write to {} due to error: {:?} ... quitting.",
file_path, e file_path.display(),
e
); );
std::process::exit(1); std::process::exit(1);
} }

View file

@ -1,3 +1,4 @@
version = "2"
bindaddress = "[::]:8443" bindaddress = "[::]:8443"
ldapbindaddress = "127.0.0.1:3636" ldapbindaddress = "127.0.0.1:3636"

View file

@ -22,7 +22,7 @@ fi
mkdir -p "${KANI_TMP}"/client_ca mkdir -p "${KANI_TMP}"/client_ca
CONFIG_FILE=${CONFIG_FILE:="${SCRIPT_DIR}/../../examples/insecure_server.toml"} CONFIG_FILE=${CONFIG_FILE:="${SCRIPT_DIR}/insecure_server.toml"}
if [ ! -f "${CONFIG_FILE}" ]; then if [ ! -f "${CONFIG_FILE}" ]; then
echo "Couldn't find configuration file at ${CONFIG_FILE}, please ensure you're running this script from its base directory (${SCRIPT_DIR})." echo "Couldn't find configuration file at ${CONFIG_FILE}, please ensure you're running this script from its base directory (${SCRIPT_DIR})."

View file

@ -37,7 +37,7 @@ use kanidmd_core::admin::{
AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo, AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus, ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
}; };
use kanidmd_core::config::{Configuration, ServerConfig}; use kanidmd_core::config::{CliConfig, Configuration, EnvironmentConfig, ServerConfigUntagged};
use kanidmd_core::{ use kanidmd_core::{
backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core, backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core, dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
@ -379,17 +379,13 @@ fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
} }
// We have to do this because we can't use tracing until we've started the logging pipeline, and we can't start the logging pipeline until the tokio runtime's doing its thing. // We have to do this because we can't use tracing until we've started the logging pipeline, and we can't start the logging pipeline until the tokio runtime's doing its thing.
async fn start_daemon( async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
opt: KanidmdParser,
mut config: Configuration,
sconfig: ServerConfig,
) -> ExitCode {
// if we have a server config and it has an OTEL URL, then we'll start the logging pipeline now. // if we have a server config and it has an OTEL URL, then we'll start the logging pipeline now.
// TODO: only send to stderr when we're not in a TTY // TODO: only send to stderr when we're not in a TTY
let sub = match sketching::otel::start_logging_pipeline( let sub = match sketching::otel::start_logging_pipeline(
&sconfig.otel_grpc_url, &config.otel_grpc_url,
sconfig.log_level.unwrap_or_default(), config.log_level,
"kanidmd", "kanidmd",
) { ) {
Err(err) => { Err(err) => {
@ -423,8 +419,8 @@ async fn start_daemon(
return err; return err;
}; };
if let Some(db_path) = sconfig.db_path.as_ref() { if let Some(db_path) = config.db_path.as_ref() {
let db_pathbuf = PathBuf::from(db_path.as_str()); let db_pathbuf = db_path.to_path_buf();
// We can't check the db_path permissions because it may not exist yet! // We can't check the db_path permissions because it may not exist yet!
if let Some(db_parent_path) = db_pathbuf.parent() { if let Some(db_parent_path) = db_pathbuf.parent() {
if !db_parent_path.exists() { if !db_parent_path.exists() {
@ -464,33 +460,11 @@ async fn start_daemon(
warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path")); warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
} }
} }
config.update_db_path(db_path);
} else { } else {
error!("No db_path set in configuration, server startup will FAIL!"); error!("No db_path set in configuration, server startup will FAIL!");
return ExitCode::FAILURE; return ExitCode::FAILURE;
} }
if let Some(origin) = sconfig.origin.clone() {
config.update_origin(&origin);
} else {
error!("No origin set in configuration, server startup will FAIL!");
return ExitCode::FAILURE;
}
if let Some(domain) = sconfig.domain.clone() {
config.update_domain(&domain);
} else {
error!("No domain set in configuration, server startup will FAIL!");
return ExitCode::FAILURE;
}
config.update_db_arc_size(sconfig.get_db_arc_size());
config.update_role(sconfig.role);
config.update_output_mode(opt.commands.commonopt().output_mode.to_owned().into());
config.update_trust_x_forward_for(sconfig.trust_x_forward_for);
config.update_admin_bind_path(&sconfig.adminbindpath);
config.update_replication_config(sconfig.repl_config.clone());
match &opt.commands { match &opt.commands {
// we aren't going to touch the DB so we can carry on // we aren't going to touch the DB so we can carry on
KanidmdOpt::ShowReplicationCertificate { .. } KanidmdOpt::ShowReplicationCertificate { .. }
@ -501,19 +475,15 @@ async fn start_daemon(
_ => { _ => {
// Okay - Lets now create our lock and go. // Okay - Lets now create our lock and go.
#[allow(clippy::expect_used)] #[allow(clippy::expect_used)]
let klock_path = match sconfig.db_path.clone() { let klock_path = match config.db_path.clone() {
Some(val) => format!("{}.klock", val), Some(val) => val.with_extension("klock"),
None => std::env::temp_dir() None => std::env::temp_dir().join("kanidmd.klock"),
.join("kanidmd.klock")
.to_str()
.expect("Unable to create klock path, this is a critical error!")
.to_string(),
}; };
let flock = match File::create(&klock_path) { let flock = match File::create(&klock_path) {
Ok(flock) => flock, Ok(flock) => flock,
Err(e) => { Err(e) => {
error!("ERROR: Refusing to start - unable to create kanidmd exclusive lock at {} - {:?}", klock_path, e); error!("ERROR: Refusing to start - unable to create kanidmd exclusive lock at {} - {:?}", klock_path.display(), e);
return ExitCode::FAILURE; return ExitCode::FAILURE;
} }
}; };
@ -521,7 +491,7 @@ async fn start_daemon(
match flock.try_lock_exclusive() { match flock.try_lock_exclusive() {
Ok(()) => debug!("Acquired kanidm exclusive lock"), Ok(()) => debug!("Acquired kanidm exclusive lock"),
Err(e) => { Err(e) => {
error!("ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {} - {:?}", klock_path, e); error!("ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {} - {:?}", klock_path.display(), e);
error!("Is another kanidmd process running?"); error!("Is another kanidmd process running?");
return ExitCode::FAILURE; return ExitCode::FAILURE;
} }
@ -529,7 +499,7 @@ async fn start_daemon(
} }
} }
kanidm_main(sconfig, config, opt).await kanidm_main(config, opt).await
} }
fn main() -> ExitCode { fn main() -> ExitCode {
@ -556,10 +526,6 @@ fn main() -> ExitCode {
return ExitCode::SUCCESS; return ExitCode::SUCCESS;
}; };
//we set up a list of these so we can set the log config THEN log out the errors.
let mut config_error: Vec<String> = Vec::new();
let mut config = Configuration::new();
if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() { if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
println!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value"); println!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
return ExitCode::FAILURE; return ExitCode::FAILURE;
@ -581,49 +547,56 @@ fn main() -> ExitCode {
} }
}; };
let sconfig = match ServerConfig::new(maybe_config_path) { let maybe_sconfig = if let Some(config_path) = maybe_config_path {
Ok(c) => Some(c), match ServerConfigUntagged::new(config_path) {
Err(e) => { Ok(c) => Some(c),
config_error.push(format!("Config Parse failure {:?}", e)); Err(err) => {
eprintln!("ERROR: Configuration Parse Failure: {:?}", err);
return ExitCode::FAILURE;
}
}
} else {
eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
None
};
let envconfig = match EnvironmentConfig::new() {
Ok(ec) => ec,
Err(err) => {
eprintln!("ERROR: Environment Configuration Parse Failure: {:?}", err);
return ExitCode::FAILURE; return ExitCode::FAILURE;
} }
}; };
// Get information on the windows username let cli_config = CliConfig {
#[cfg(target_family = "windows")] output_mode: Some(opt.commands.commonopt().output_mode.to_owned().into()),
get_user_details_windows(); };
if !config_error.is_empty() { let is_server = matches!(&opt.commands, KanidmdOpt::Server(_));
println!("There were errors on startup, which prevent the server from starting:");
for e in config_error { let config = Configuration::build()
println!(" - {}", e); .add_env_config(envconfig)
} .add_opt_toml_config(maybe_sconfig)
// We always set threads to 1 unless it's the main server.
.add_cli_config(cli_config)
.is_server_mode(is_server)
.finish();
let Some(config) = config else {
eprintln!(
"ERROR: Unable to build server configuration from provided configuration inputs."
);
return ExitCode::FAILURE; return ExitCode::FAILURE;
}
let sconfig = match sconfig {
Some(val) => val,
None => {
println!("Somehow you got an empty ServerConfig after error checking? Cannot start!");
return ExitCode::FAILURE;
}
}; };
// =========================================================================== // ===========================================================================
// Config ready // Config ready
// We always set threads to 1 unless it's the main server. // Get information on the windows username
if matches!(&opt.commands, KanidmdOpt::Server(_)) { #[cfg(target_family = "windows")]
// If not updated, will default to maximum get_user_details_windows();
if let Some(threads) = sconfig.thread_count {
config.update_threads_count(threads);
}
} else {
config.update_threads_count(1);
};
// Start the runtime // Start the runtime
let maybe_rt = tokio::runtime::Builder::new_multi_thread() let maybe_rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(config.threads) .worker_threads(config.threads)
.enable_all() .enable_all()
@ -643,16 +616,12 @@ fn main() -> ExitCode {
} }
}; };
rt.block_on(start_daemon(opt, config, sconfig)) rt.block_on(start_daemon(opt, config))
} }
/// Build and execute the main server. The ServerConfig are the configuration options /// Build and execute the main server. The ServerConfig are the configuration options
/// that we are processing into the config for the main server. /// that we are processing into the config for the main server.
async fn kanidm_main( async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
sconfig: ServerConfig,
mut config: Configuration,
opt: KanidmdParser,
) -> ExitCode {
match &opt.commands { match &opt.commands {
KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => { KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => {
let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_)); let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_));
@ -662,88 +631,90 @@ async fn kanidm_main(
info!("Running in server mode ..."); info!("Running in server mode ...");
}; };
// configuration options that only relate to server mode // Verify the TLs configs.
config.update_config_for_server_mode(&sconfig); if let Some(tls_config) = config.tls_config.as_ref() {
{
if let Some(i_str) = &(sconfig.tls_chain) { let i_meta = match metadata(&tls_config.chain) {
let i_path = PathBuf::from(i_str.as_str()); Ok(m) => m,
let i_meta = match metadata(&i_path) { Err(e) => {
Ok(m) => m, error!(
Err(e) => { "Unable to read metadata for TLS chain file '{}' - {:?}",
error!( tls_config.chain.display(),
"Unable to read metadata for TLS chain file '{}' - {:?}", e
&i_path.to_str().unwrap_or("invalid file path"), );
e let diag =
); kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
let diag = kanidm_lib_file_permissions::diagnose_path(&i_path); info!(%diag);
info!(%diag); return ExitCode::FAILURE;
return ExitCode::FAILURE; }
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
} }
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str);
} }
}
if let Some(i_str) = &(sconfig.tls_key) { {
let i_path = PathBuf::from(i_str.as_str()); let i_meta = match metadata(&tls_config.key) {
Ok(m) => m,
let i_meta = match metadata(&i_path) { Err(e) => {
Ok(m) => m, error!(
Err(e) => { "Unable to read metadata for TLS key file '{}' - {:?}",
error!( tls_config.key.display(),
"Unable to read metadata for TLS key file '{}' - {:?}", e
&i_path.to_str().unwrap_or("invalid file path"), );
e let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
); info!(%diag);
let diag = kanidm_lib_file_permissions::diagnose_path(&i_path); return ExitCode::FAILURE;
info!(%diag); }
return ExitCode::FAILURE; };
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
} }
};
if !kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str);
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", i_str);
}
}
if let Some(ca_dir) = &(sconfig.tls_client_ca) {
// check that the TLS client CA config option is what we expect
let ca_dir_path = PathBuf::from(&ca_dir);
if !ca_dir_path.exists() {
error!(
"TLS CA folder {} does not exist, server startup will FAIL!",
ca_dir
);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
info!(%diag);
} }
let i_meta = match metadata(&ca_dir_path) { if let Some(ca_dir) = tls_config.client_ca.as_ref() {
Ok(m) => m, // check that the TLS client CA config option is what we expect
Err(e) => { let ca_dir_path = PathBuf::from(&ca_dir);
error!("Unable to read metadata for '{}' - {:?}", ca_dir, e); if !ca_dir_path.exists() {
error!(
"TLS CA folder {} does not exist, server startup will FAIL!",
ca_dir.display()
);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path); let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
info!(%diag); info!(%diag);
}
let i_meta = match metadata(&ca_dir_path) {
Ok(m) => m,
Err(e) => {
error!(
"Unable to read metadata for '{}' - {:?}",
ca_dir.display(),
e
);
let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
info!(%diag);
return ExitCode::FAILURE;
}
};
if !i_meta.is_dir() {
error!(
"ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
ca_dir.display()
);
return ExitCode::FAILURE; return ExitCode::FAILURE;
} }
}; if kanidm_lib_file_permissions::readonly(&i_meta) {
if !i_meta.is_dir() { warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
error!( }
"ERROR: Refusing to run - TLS Client CA folder {} may not be a directory", #[cfg(not(target_os = "windows"))]
ca_dir if i_meta.mode() & 0o007 != 0 {
); warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
return ExitCode::FAILURE; }
}
if kanidm_lib_file_permissions::readonly(&i_meta) {
warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir);
}
#[cfg(not(target_os = "windows"))]
if i_meta.mode() & 0o007 != 0 {
warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir);
} }
} }
@ -753,14 +724,6 @@ async fn kanidm_main(
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
{ {
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]);
// Undocumented systemd feature - all messages should have a monotonic usec sent
// with them. In some cases like "reloading" messages, it is undocumented but
// failure to send this message causes the reload to fail.
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
let _ = sd_notify::notify(true, &[monotonic_usec]);
} else {
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
let _ = sd_notify::notify( let _ = sd_notify::notify(
true, true,
&[sd_notify::NotifyState::Status("Started Kanidm 🦀")], &[sd_notify::NotifyState::Status("Started Kanidm 🦀")],
@ -774,86 +737,80 @@ async fn kanidm_main(
{ {
let mut listener = sctx.subscribe(); let mut listener = sctx.subscribe();
tokio::select! { tokio::select! {
Ok(()) = tokio::signal::ctrl_c() => { Ok(()) = tokio::signal::ctrl_c() => {
break break
} }
Some(()) = async move { Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::terminate(); let sigterm = tokio::signal::unix::SignalKind::terminate();
#[allow(clippy::unwrap_used)] #[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => { } => {
break break
} }
Some(()) = async move { Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::alarm(); let sigterm = tokio::signal::unix::SignalKind::alarm();
#[allow(clippy::unwrap_used)] #[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => { } => {
// Ignore // Ignore
} }
Some(()) = async move { Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::hangup(); let sigterm = tokio::signal::unix::SignalKind::hangup();
#[allow(clippy::unwrap_used)] #[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => { } => {
// Reload TLS certificates // Reload TLS certificates
// systemd has a special reload handler for this. // systemd has a special reload handler for this.
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
{ {
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]); if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
// CRITICAL - if you do not send a monotonic usec message after a reloading let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Reloading, monotonic_usec]);
// message, your service WILL BE KILLED. let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reloading ...")]);
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() { } else {
let _ = error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
sd_notify::notify(true, &[monotonic_usec]); };
} else { }
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
};
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reloading ...")]);
}
sctx.tls_acceptor_reload().await; sctx.tls_acceptor_reload().await;
// Systemd freaks out if you send the ready state too fast after the // Systemd freaks out if you send the ready state too fast after the
// reload state and can kill Kanidmd as a result. // reload state and can kill Kanidmd as a result.
tokio::time::sleep(std::time::Duration::from_secs(5)).await; tokio::time::sleep(std::time::Duration::from_secs(5)).await;
#[cfg(target_os = "linux")] #[cfg(target_os = "linux")]
{ {
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready]); if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() {
if let Ok(monotonic_usec) = sd_notify::NotifyState::monotonic_usec_now() { let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Ready, monotonic_usec]);
let _ = let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reload Success")]);
sd_notify::notify(true, &[monotonic_usec]); } else {
} else { error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US.");
error!("CRITICAL!!! Unable to access clock monotonic time. SYSTEMD WILL KILL US."); };
}; }
let _ = sd_notify::notify(true, &[sd_notify::NotifyState::Status("Reload Success")]);
}
info!("Reload complete"); info!("Reload complete");
} }
Some(()) = async move { Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined1(); let sigterm = tokio::signal::unix::SignalKind::user_defined1();
#[allow(clippy::unwrap_used)] #[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => { } => {
// Ignore // Ignore
} }
Some(()) = async move { Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined2(); let sigterm = tokio::signal::unix::SignalKind::user_defined2();
#[allow(clippy::unwrap_used)] #[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => { } => {
// Ignore // Ignore
} }
// we got a message on thr broadcast from somewhere else // we got a message on thr broadcast from somewhere else
Ok(msg) = async move { Ok(msg) = async move {
listener.recv().await listener.recv().await
} => { } => {
debug!("Main loop received message: {:?}", msg); debug!("Main loop received message: {:?}", msg);
break break
} }
} }
} }
#[cfg(target_family = "windows")] #[cfg(target_family = "windows")]
{ {
@ -880,34 +837,19 @@ async fn kanidm_main(
} }
KanidmdOpt::CertGenerate(_sopt) => { KanidmdOpt::CertGenerate(_sopt) => {
info!("Running in certificate generate mode ..."); info!("Running in certificate generate mode ...");
config.update_config_for_server_mode(&sconfig);
cert_generate_core(&config); cert_generate_core(&config);
} }
KanidmdOpt::Database { KanidmdOpt::Database {
commands: DbCommands::Backup(bopt), commands: DbCommands::Backup(bopt),
} => { } => {
info!("Running in backup mode ..."); info!("Running in backup mode ...");
let p = match bopt.path.to_str() { backup_server_core(&config, &bopt.path);
Some(p) => p,
None => {
error!("Invalid backup path");
return ExitCode::FAILURE;
}
};
backup_server_core(&config, p);
} }
KanidmdOpt::Database { KanidmdOpt::Database {
commands: DbCommands::Restore(ropt), commands: DbCommands::Restore(ropt),
} => { } => {
info!("Running in restore mode ..."); info!("Running in restore mode ...");
let p = match ropt.path.to_str() { restore_server_core(&config, &ropt.path).await;
Some(p) => p,
None => {
error!("Invalid restore path");
return ExitCode::FAILURE;
}
};
restore_server_core(&config, p).await;
} }
KanidmdOpt::Database { KanidmdOpt::Database {
commands: DbCommands::Verify(_vopt), commands: DbCommands::Verify(_vopt),
@ -1088,8 +1030,6 @@ async fn kanidm_main(
vacuum_server_core(&config); vacuum_server_core(&config);
} }
KanidmdOpt::HealthCheck(sopt) => { KanidmdOpt::HealthCheck(sopt) => {
config.update_config_for_server_mode(&sconfig);
debug!("{sopt:?}"); debug!("{sopt:?}");
let healthcheck_url = match &sopt.check_origin { let healthcheck_url = match &sopt.check_origin {
@ -1110,12 +1050,15 @@ async fn kanidm_main(
.danger_accept_invalid_hostnames(!sopt.verify_tls) .danger_accept_invalid_hostnames(!sopt.verify_tls)
.https_only(true); .https_only(true);
client = match &sconfig.tls_chain { client = match &config.tls_config {
None => client, None => client,
Some(ca_cert) => { Some(tls_config) => {
debug!("Trying to load {} to build a CA cert path", ca_cert); debug!(
"Trying to load {} to build a CA cert path",
tls_config.chain.display()
);
// if the ca_cert file exists, then we'll use it // if the ca_cert file exists, then we'll use it
let ca_cert_path = PathBuf::from(ca_cert); let ca_cert_path = tls_config.chain.clone();
match ca_cert_path.exists() { match ca_cert_path.exists() {
true => { true => {
let mut cert_buf = Vec::new(); let mut cert_buf = Vec::new();
@ -1148,7 +1091,10 @@ async fn kanidm_main(
client client
} }
false => { false => {
warn!("Couldn't find ca cert {} but carrying on...", ca_cert); warn!(
"Couldn't find ca cert {} but carrying on...",
tls_config.chain.display()
);
client client
} }
} }

View file

@ -1,27 +1,21 @@
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::convert::{TryFrom, TryInto};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use super::keystorage::{KeyHandle, KeyHandleId}; use super::keystorage::{KeyHandle, KeyHandleId};
// use crate::valueset;
use hashbrown::HashMap;
use idlset::v2::IDLBitRange;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use rusqlite::vtab::array::Array;
use rusqlite::{Connection, OpenFlags, OptionalExtension};
use uuid::Uuid;
use crate::be::dbentry::DbIdentSpn; use crate::be::dbentry::DbIdentSpn;
use crate::be::dbvalue::DbCidV1; use crate::be::dbvalue::DbCidV1;
use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope}; use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope};
use crate::entry::{Entry, EntryCommitted, EntrySealed}; use crate::entry::{Entry, EntryCommitted, EntrySealed};
use crate::prelude::*; use crate::prelude::*;
use crate::value::{IndexType, Value}; use crate::value::{IndexType, Value};
use hashbrown::HashMap;
// use uuid::Uuid; use idlset::v2::IDLBitRange;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use rusqlite::vtab::array::Array;
use rusqlite::{Connection, OpenFlags, OptionalExtension};
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::convert::{TryFrom, TryInto};
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use uuid::Uuid;
const DBV_ID2ENTRY: &str = "id2entry"; const DBV_ID2ENTRY: &str = "id2entry";
const DBV_INDEXV: &str = "indexv"; const DBV_INDEXV: &str = "indexv";
@ -1712,7 +1706,7 @@ impl IdlSqliteWriteTransaction {
impl IdlSqlite { impl IdlSqlite {
pub fn new(cfg: &BackendConfig, vacuum: bool) -> Result<Self, OperationError> { pub fn new(cfg: &BackendConfig, vacuum: bool) -> Result<Self, OperationError> {
if cfg.path.is_empty() { if cfg.path.as_os_str().is_empty() {
debug_assert_eq!(cfg.pool_size, 1); debug_assert_eq!(cfg.pool_size, 1);
} }
// If provided, set the page size to match the tuning we want. By default we use 4096. The VACUUM // If provided, set the page size to match the tuning we want. By default we use 4096. The VACUUM
@ -1734,8 +1728,7 @@ impl IdlSqlite {
// Initial setup routines. // Initial setup routines.
{ {
let vconn = let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
vconn vconn
.execute_batch( .execute_batch(
@ -1764,8 +1757,7 @@ impl IdlSqlite {
); );
*/ */
let vconn = let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
vconn vconn
.execute_batch("PRAGMA wal_checkpoint(TRUNCATE);") .execute_batch("PRAGMA wal_checkpoint(TRUNCATE);")
@ -1786,8 +1778,7 @@ impl IdlSqlite {
OperationError::SqliteError OperationError::SqliteError
})?; })?;
let vconn = let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
vconn vconn
.pragma_update(None, "page_size", cfg.fstype as u32) .pragma_update(None, "page_size", cfg.fstype as u32)
@ -1821,7 +1812,7 @@ impl IdlSqlite {
.map(|i| { .map(|i| {
trace!("Opening Connection {}", i); trace!("Opening Connection {}", i);
let conn = let conn =
Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error); Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error);
match conn { match conn {
Ok(conn) => { Ok(conn) => {
// We need to set the cachesize at this point as well. // We need to set the cachesize at this point as well.

View file

@ -4,20 +4,6 @@
//! is to persist content safely to disk, load that content, and execute queries //! is to persist content safely to disk, load that content, and execute queries
//! utilising indexes in the most effective way possible. //! utilising indexes in the most effective way possible.
use std::collections::BTreeMap;
use std::fs;
use std::ops::DerefMut;
use std::sync::Arc;
use std::time::Duration;
use concread::cowcell::*;
use hashbrown::{HashMap as Map, HashSet};
use idlset::v2::IDLBitRange;
use idlset::AndNot;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use tracing::{trace, trace_span};
use uuid::Uuid;
use crate::be::dbentry::{DbBackup, DbEntry}; use crate::be::dbentry::{DbBackup, DbEntry};
use crate::be::dbrepl::DbReplMeta; use crate::be::dbrepl::DbReplMeta;
use crate::entry::Entry; use crate::entry::Entry;
@ -31,6 +17,19 @@ use crate::repl::ruv::{
}; };
use crate::utils::trigraph_iter; use crate::utils::trigraph_iter;
use crate::value::{IndexType, Value}; use crate::value::{IndexType, Value};
use concread::cowcell::*;
use hashbrown::{HashMap as Map, HashSet};
use idlset::v2::IDLBitRange;
use idlset::AndNot;
use kanidm_proto::internal::{ConsistencyError, OperationError};
use std::collections::BTreeMap;
use std::fs;
use std::ops::DerefMut;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use tracing::{trace, trace_span};
use uuid::Uuid;
pub(crate) mod dbentry; pub(crate) mod dbentry;
pub(crate) mod dbrepl; pub(crate) mod dbrepl;
@ -132,7 +131,7 @@ impl IdxMeta {
#[derive(Clone)] #[derive(Clone)]
pub struct BackendConfig { pub struct BackendConfig {
path: String, path: PathBuf,
pool_size: u32, pool_size: u32,
db_name: &'static str, db_name: &'static str,
fstype: FsType, fstype: FsType,
@ -141,10 +140,16 @@ pub struct BackendConfig {
} }
impl BackendConfig { impl BackendConfig {
pub fn new(path: &str, pool_size: u32, fstype: FsType, arcsize: Option<usize>) -> Self { pub fn new(
path: Option<&Path>,
pool_size: u32,
fstype: FsType,
arcsize: Option<usize>,
) -> Self {
BackendConfig { BackendConfig {
pool_size, pool_size,
path: path.to_string(), // This means if path is None, that "" implies an sqlite in memory/ram only database.
path: path.unwrap_or_else(|| Path::new("")).to_path_buf(),
db_name: "main", db_name: "main",
fstype, fstype,
arcsize, arcsize,
@ -154,7 +159,7 @@ impl BackendConfig {
pub(crate) fn new_test(db_name: &'static str) -> Self { pub(crate) fn new_test(db_name: &'static str) -> Self {
BackendConfig { BackendConfig {
pool_size: 1, pool_size: 1,
path: "".to_string(), path: PathBuf::from(""),
db_name, db_name,
fstype: FsType::Generic, fstype: FsType::Generic,
arcsize: Some(2048), arcsize: Some(2048),
@ -936,7 +941,7 @@ pub trait BackendTransaction {
self.get_ruv().verify(&entries, results); self.get_ruv().verify(&entries, results);
} }
fn backup(&mut self, dst_path: &str) -> Result<(), OperationError> { fn backup(&mut self, dst_path: &Path) -> Result<(), OperationError> {
let repl_meta = self.get_ruv().to_db_backup_ruv(); let repl_meta = self.get_ruv().to_db_backup_ruv();
// load all entries into RAM, may need to change this later // load all entries into RAM, may need to change this later
@ -1808,7 +1813,7 @@ impl<'a> BackendWriteTransaction<'a> {
Ok(slope) Ok(slope)
} }
pub fn restore(&mut self, src_path: &str) -> Result<(), OperationError> { pub fn restore(&mut self, src_path: &Path) -> Result<(), OperationError> {
let serialized_string = fs::read_to_string(src_path).map_err(|e| { let serialized_string = fs::read_to_string(src_path).map_err(|e| {
admin_error!("fs::read_to_string {:?}", e); admin_error!("fs::read_to_string {:?}", e);
OperationError::FsError OperationError::FsError
@ -2121,7 +2126,7 @@ impl Backend {
debug!(db_tickets = ?cfg.pool_size, profile = %env!("KANIDM_PROFILE_NAME"), cpu_flags = %env!("KANIDM_CPU_FLAGS")); debug!(db_tickets = ?cfg.pool_size, profile = %env!("KANIDM_PROFILE_NAME"), cpu_flags = %env!("KANIDM_CPU_FLAGS"));
// If in memory, reduce pool to 1 // If in memory, reduce pool to 1
if cfg.path.is_empty() { if cfg.path.as_os_str().is_empty() {
cfg.pool_size = 1; cfg.pool_size = 1;
} }
@ -2207,13 +2212,6 @@ impl Backend {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::fs;
use std::iter::FromIterator;
use std::sync::Arc;
use std::time::Duration;
use idlset::v2::IDLBitRange;
use super::super::entry::{Entry, EntryInit, EntryNew}; use super::super::entry::{Entry, EntryInit, EntryNew};
use super::Limits; use super::Limits;
use super::{ use super::{
@ -2223,6 +2221,12 @@ mod tests {
use crate::prelude::*; use crate::prelude::*;
use crate::repl::cid::Cid; use crate::repl::cid::Cid;
use crate::value::{IndexType, PartialValue, Value}; use crate::value::{IndexType, PartialValue, Value};
use idlset::v2::IDLBitRange;
use std::fs;
use std::iter::FromIterator;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
lazy_static! { lazy_static! {
static ref CID_ZERO: Cid = Cid::new_zero(); static ref CID_ZERO: Cid = Cid::new_zero();
@ -2597,11 +2601,9 @@ mod tests {
#[test] #[test]
fn test_be_backup_restore() { fn test_be_backup_restore() {
let db_backup_file_name = format!( let db_backup_file_name =
"{}/.backup_test.json", Path::new(option_env!("OUT_DIR").unwrap_or("/tmp")).join(".backup_test.json");
option_env!("OUT_DIR").unwrap_or("/tmp") eprintln!(" ⚠️ {}", db_backup_file_name.display());
);
eprintln!(" ⚠️ {db_backup_file_name}");
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
// Important! Need db metadata setup! // Important! Need db metadata setup!
be.reset_db_s_uuid().unwrap(); be.reset_db_s_uuid().unwrap();
@ -2656,11 +2658,9 @@ mod tests {
#[test] #[test]
fn test_be_backup_restore_tampered() { fn test_be_backup_restore_tampered() {
let db_backup_file_name = format!( let db_backup_file_name =
"{}/.backup2_test.json", Path::new(option_env!("OUT_DIR").unwrap_or("/tmp")).join(".backup2_test.json");
option_env!("OUT_DIR").unwrap_or("/tmp") eprintln!(" ⚠️ {}", db_backup_file_name.display());
);
eprintln!(" ⚠️ {db_backup_file_name}");
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
// Important! Need db metadata setup! // Important! Need db metadata setup!
be.reset_db_s_uuid().unwrap(); be.reset_db_s_uuid().unwrap();

View file

@ -63,7 +63,7 @@ fn parse_attributes(
"ldap" => { "ldap" => {
flags.ldap = true; flags.ldap = true;
field_modifications.extend(quote! { field_modifications.extend(quote! {
ldapaddress: Some("on".to_string()),}) ldapbindaddress: Some("on".to_string()),})
} }
_ => { _ => {
let field_name = p.value().left.to_token_stream(); // here we can use to_token_stream as we know we're iterating over ExprAssigns let field_name = p.value().left.to_token_stream(); // here we can use to_token_stream as we know we're iterating over ExprAssigns

View file

@ -84,9 +84,9 @@ pub async fn setup_async_test(mut config: Configuration) -> AsyncTestEnvironment
let addr = format!("http://localhost:{}", port); let addr = format!("http://localhost:{}", port);
let ldap_url = if config.ldapaddress.is_some() { let ldap_url = if config.ldapbindaddress.is_some() {
let ldapport = port_loop(); let ldapport = port_loop();
config.ldapaddress = Some(format!("127.0.0.1:{}", ldapport)); config.ldapbindaddress = Some(format!("127.0.0.1:{}", ldapport));
Url::parse(&format!("ldap://127.0.0.1:{}", ldapport)) Url::parse(&format!("ldap://127.0.0.1:{}", ldapport))
.inspect_err(|err| error!(?err, "ldap address setup")) .inspect_err(|err| error!(?err, "ldap address setup"))
.ok() .ok()

View file

@ -70,7 +70,7 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) {
}); });
// Setup the config ... // Setup the config ...
let mut config = Configuration::new(); let mut config = Configuration::new_for_test();
config.address = format!("127.0.0.1:{}", port); config.address = format!("127.0.0.1:{}", port);
config.integration_test_config = Some(int_config); config.integration_test_config = Some(int_config);
config.role = ServerRole::WriteReplicaNoUI; config.role = ServerRole::WriteReplicaNoUI;