Adding env var configs for the server (#2329)

* env var config for server
* I am my own clippy now
* Man, that got complicated quick
This commit is contained in:
James Hodgkinson 2023-11-24 11:27:49 +10:00 committed by GitHub
parent 24c4f15b5e
commit 916bb4ec04
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 357 additions and 97 deletions

1
.gitignore vendored
View file

@ -37,4 +37,5 @@ pykanidm/site/
# oauth2 integration test things # oauth2 integration test things
scripts/oauth_proxy/client.secret scripts/oauth_proxy/client.secret
scripts/oauth_proxy/envfile scripts/oauth_proxy/envfile
# local config things
.envrc .envrc

View file

@ -15,12 +15,14 @@ tls_key = "/tmp/kanidm/key.pem"
log_level = "debug" log_level = "debug"
# log_level = "trace" # log_level = "trace"
otel_grpc_url = "http://localhost:4317" # otel_grpc_url = "http://localhost:4317"
domain = "localhost" domain = "localhost"
origin = "https://localhost:8443" origin = "https://localhost:8443"
trust_x_forward_for = true trust_x_forward_for = true
[online_backup] [online_backup]
path = "/tmp/kanidm/backups/" # defaults to db_path
# path = "/tmp/kanidm/backups/"
schedule = "@hourly" schedule = "@hourly"
# enabled = true # default enabled

View file

@ -11,11 +11,6 @@ use tracing_subscriber::{prelude::*, EnvFilter};
pub const MAX_EVENTS_PER_SPAN: u32 = 64 * 1024; pub const MAX_EVENTS_PER_SPAN: u32 = 64 * 1024;
pub const MAX_ATTRIBUTES_PER_SPAN: u32 = 128; pub const MAX_ATTRIBUTES_PER_SPAN: u32 = 128;
/// if you set the KANIDM_OTEL_GRPC_ENDPOINT env var you'll start the OpenTelemetry pipeline.
pub fn get_otlp_endpoint() -> Option<String> {
std::env::var("KANIDM_OTEL_GRPC_ENDPOINT").ok()
}
// TODO: this is coming back later // TODO: this is coming back later
// #[allow(dead_code)] // #[allow(dead_code)]
// pub fn init_metrics() -> metrics::Result<MeterProvider> { // pub fn init_metrics() -> metrics::Result<MeterProvider> {

View file

@ -33,6 +33,9 @@ pub const DEFAULT_SERVER_LOCALHOST: &str = "localhost:8443";
/// The default LDAP bind address for the Kanidm server /// The default LDAP bind address for the Kanidm server
pub const DEFAULT_LDAP_ADDRESS: &str = "127.0.0.1:636"; pub const DEFAULT_LDAP_ADDRESS: &str = "127.0.0.1:636";
pub const DEFAULT_LDAP_LOCALHOST: &str = "localhost:636"; pub const DEFAULT_LDAP_LOCALHOST: &str = "localhost:636";
/// Default replication configuration
pub const DEFAULT_REPLICATION_ADDRESS: &str = "127.0.0.1:8444";
pub const DEFAULT_REPLICATION_ORIGIN: &str = "repl://localhost:8444";
// IF YOU CHANGE THESE VALUES YOU BREAK EVERYTHING // IF YOU CHANGE THESE VALUES YOU BREAK EVERYTHING
pub const ATTR_ACCOUNT_EXPIRE: &str = "account_expire"; pub const ATTR_ACCOUNT_EXPIRE: &str = "account_expire";

View file

@ -4,11 +4,9 @@
//! These components should be "per server". Any "per domain" config should be in the system //! These components should be "per server". Any "per domain" config should be in the system
//! or domain entries that are able to be replicated. //! or domain entries that are able to be replicated.
use std::collections::BTreeMap;
use std::fmt; use std::fmt;
use std::fs::File; use std::fs::File;
use std::io::Read; use std::io::Read;
use std::net::SocketAddr;
use std::path::Path; use std::path::Path;
use std::str::FromStr; use std::str::FromStr;
@ -16,19 +14,17 @@ use kanidm_proto::constants::DEFAULT_SERVER_ADDRESS;
use kanidm_proto::internal::FsType; use kanidm_proto::internal::FsType;
use kanidm_proto::messages::ConsoleOutputMode; use kanidm_proto::messages::ConsoleOutputMode;
use kanidm_lib_crypto::prelude::X509;
use kanidm_lib_crypto::serialise::x509b64;
use serde::Deserialize; use serde::Deserialize;
use sketching::LogLevel; use sketching::LogLevel;
use url::Url; use url::Url;
use crate::repl::config::ReplicationConfiguration;
#[derive(Deserialize, Debug, Clone)] #[derive(Deserialize, Debug, Clone)]
pub struct OnlineBackup { pub struct OnlineBackup {
/// The destination folder for your backups /// The destination folder for your backups, defaults to the db_path dir if not set
pub path: String, pub path: Option<String>,
#[serde(default = "default_online_backup_schedule")] /// The schedule to run online backups (see <https://crontab.guru/>), defaults to @daily
/// The schedule to run online backups (see <https://crontab.guru/>)
/// ///
/// Examples: /// Examples:
/// ///
@ -47,8 +43,26 @@ pub struct OnlineBackup {
/// (it's very similar to the standard cron syntax, it just allows to specify the seconds at the beginning and the year at the end) /// (it's very similar to the standard cron syntax, it just allows to specify the seconds at the beginning and the year at the end)
pub schedule: String, pub schedule: String,
#[serde(default = "default_online_backup_versions")] #[serde(default = "default_online_backup_versions")]
/// How many past backup versions to keep /// How many past backup versions to keep, defaults to 7
pub versions: usize, pub versions: usize,
/// Enabled by default
#[serde(default = "default_online_backup_enabled")]
pub enabled: bool,
}
impl Default for OnlineBackup {
fn default() -> Self {
OnlineBackup {
path: None, // This makes it revert to the kanidm_db path
schedule: default_online_backup_schedule(),
versions: default_online_backup_versions(),
enabled: default_online_backup_enabled(),
}
}
}
fn default_online_backup_enabled() -> bool {
true
} }
fn default_online_backup_schedule() -> String { fn default_online_backup_schedule() -> String {
@ -65,61 +79,12 @@ pub struct TlsConfiguration {
pub key: String, pub key: String,
} }
#[derive(Deserialize, Debug, Clone)]
#[serde(tag = "type")]
pub enum RepNodeConfig {
#[serde(rename = "allow-pull")]
AllowPull {
#[serde(with = "x509b64")]
consumer_cert: X509,
},
#[serde(rename = "pull")]
Pull {
#[serde(with = "x509b64")]
supplier_cert: X509,
automatic_refresh: bool,
},
#[serde(rename = "mutual-pull")]
MutualPull {
#[serde(with = "x509b64")]
partner_cert: X509,
automatic_refresh: bool,
},
/*
AllowPush {
},
Push {
},
*/
}
#[derive(Deserialize, Debug, Clone)]
pub struct ReplicationConfiguration {
pub origin: Url,
pub bindaddress: SocketAddr,
/// Number of seconds between running a replication event
pub task_poll_interval: Option<u64>,
#[serde(flatten)]
pub manual: BTreeMap<Url, RepNodeConfig>,
}
const DEFAULT_REPL_TASK_POLL_INTERVAL: u64 = 15;
impl ReplicationConfiguration {
/// Get the task poll interval, or the default if not set.
pub(crate) fn get_task_poll_interval(&self) -> core::time::Duration {
core::time::Duration::from_secs(
self.task_poll_interval
.unwrap_or(DEFAULT_REPL_TASK_POLL_INTERVAL),
)
}
}
/// This is the Server Configuration as read from `server.toml`. /// This is the Server Configuration as read from `server.toml`.
/// ///
/// NOTE: not all flags or values from the internal [Configuration] object are exposed via this structure /// NOTE: not all flags or values from the internal [Configuration] object are exposed via this structure
/// to prevent certain settings being set (e.g. integration test modes) /// to prevent certain settings being set (e.g. integration test modes)
///
/// If you want to set these as environment variables, prefix them with `KANIDM_` and they will be picked up. This doesn't include replication peer config.
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
#[serde(deny_unknown_fields)] #[serde(deny_unknown_fields)]
pub struct ServerConfig { pub struct ServerConfig {
@ -176,6 +141,7 @@ pub struct ServerConfig {
} }
impl ServerConfig { impl ServerConfig {
/// loads the configuration file from the path specified, then overlays fields from environment variables starting with `KANIDM_``
pub fn new<P: AsRef<Path>>(config_path: P) -> Result<Self, std::io::Error> { pub fn new<P: AsRef<Path>>(config_path: P) -> Result<Self, std::io::Error> {
let mut f = File::open(config_path.as_ref()).map_err(|e| { let mut f = File::open(config_path.as_ref()).map_err(|e| {
eprintln!("Unable to open config file [{:?}] 🥺", e); eprintln!("Unable to open config file [{:?}] 🥺", e);
@ -192,10 +158,194 @@ impl ServerConfig {
e e
})?; })?;
toml::from_str(contents.as_str()).map_err(|e| { let res: ServerConfig = toml::from_str(contents.as_str()).map_err(|e| {
eprintln!("unable to parse config {:?}", e); eprintln!(
"Unable to parse config from '{:?}': {:?}",
config_path.as_ref(),
e
);
std::io::Error::new(std::io::ErrorKind::Other, e) std::io::Error::new(std::io::ErrorKind::Other, e)
})?;
let res = res.try_from_env().map_err(|e| {
println!("Failed to use environment variable config: {e}");
std::io::Error::new(std::io::ErrorKind::Other, e)
})?;
Ok(res)
}
/// Updates the ServerConfig from environment variables starting with `KANIDM_`
fn try_from_env(mut self) -> Result<Self, String> {
for (key, value) in std::env::vars() {
if !key.starts_with("KANIDM_") {
continue;
}
let ignorable_build_fields = [
"KANIDM_CPU_FLAGS",
"KANIDM_DEFAULT_CONFIG_PATH",
"KANIDM_DEFAULT_UNIX_SHELL_PATH",
"KANIDM_PKG_VERSION",
"KANIDM_PROFILE_NAME",
"KANIDM_WEB_UI_PKG_PATH",
];
if ignorable_build_fields.contains(&key.as_str()) {
#[cfg(any(debug_assertions, test))]
eprintln!("-- Ignoring build-time env var {}", key);
continue;
}
match key.replace("KANIDM_", "").as_str() {
"DOMAIN" => {
self.domain = value.to_string();
}
"ORIGIN" => {
self.origin = value.to_string();
}
"DB_PATH" => {
self.origin = value.to_string();
}
"TLS_CHAIN" => {
self.tls_chain = Some(value.to_string());
}
"TLS_KEY" => {
self.tls_key = Some(value.to_string());
}
"BINDADDRESS" => {
self.bindaddress = Some(value.to_string());
}
"LDAPBINDADDRESS" => {
self.ldapbindaddress = Some(value.to_string());
}
"ROLE" => {
self.role = ServerRole::from_str(&value).map_err(|err| {
format!("Failed to parse KANIDM_ROLE as ServerRole: {}", err)
})?;
}
"LOG_LEVEL" => {
self.log_level = LogLevel::from_str(&value)
.map_err(|err| {
format!("Failed to parse KANIDM_LOG_LEVEL as LogLevel: {}", err)
}) })
.ok();
}
"ONLINE_BACKUP_PATH" => {
if let Some(backup) = &mut self.online_backup {
backup.path = Some(value.to_string());
} else {
self.online_backup = Some(OnlineBackup {
path: Some(value.to_string()),
..Default::default()
});
}
}
"ONLINE_BACKUP_SCHEDULE" => {
if let Some(backup) = &mut self.online_backup {
backup.schedule = value.to_string();
} else {
self.online_backup = Some(OnlineBackup {
schedule: value.to_string(),
..Default::default()
});
}
}
"ONLINE_BACKUP_VERSIONS" => {
let versions = value.parse().map_err(|_| {
"Failed to parse KANIDM_ONLINE_BACKUP_VERSIONS as usize".to_string()
})?;
if let Some(backup) = &mut self.online_backup {
backup.versions = versions;
} else {
self.online_backup = Some(OnlineBackup {
versions,
..Default::default()
})
}
}
"TRUST_X_FORWARD_FOR" => {
self.trust_x_forward_for = value
.parse()
.map_err(|_| {
"Failed to parse KANIDM_TRUST_X_FORWARD_FOR as bool".to_string()
})
.ok();
}
"DB_FS_TYPE" => {
self.db_fs_type = FsType::try_from(value.as_str())
.map_err(|_| {
"Failed to parse KANIDM_DB_FS_TYPE env var to valid value!".to_string()
})
.ok();
}
"DB_ARC_SIZE" => {
self.db_arc_size = value
.parse()
.map_err(|_| "Failed to parse KANIDM_DB_ARC_SIZE as value".to_string())
.ok();
}
"ADMIN_BIND_PATH" => {
self.adminbindpath = Some(value.to_string());
}
"REPLICATION_ORIGIN" => {
let repl_origin = Url::parse(value.as_str()).map_err(|err| {
format!("Failed to parse KANIDM_REPLICATION_ORIGIN as URL: {}", err)
})?;
if let Some(repl) = &mut self.repl_config {
repl.origin = repl_origin
} else {
self.repl_config = Some(ReplicationConfiguration {
origin: repl_origin,
..Default::default()
});
}
}
"I_ACKNOWLEDGE_THAT_REPLICATION_IS_IN_DEVELOPMENT" => {
self.i_acknowledge_that_replication_is_in_development =
value.parse().map_err(|_| {
"Failed to parse terribly long confirmation of replication beta-ness!"
.to_string()
})?;
}
"REPLICATION_BINDADDRESS" => {
let repl_bind_address = value
.parse()
.map_err(|_| "Failed to parse replication bind address".to_string())?;
if let Some(repl) = &mut self.repl_config {
repl.bindaddress = repl_bind_address;
} else {
self.repl_config = Some(ReplicationConfiguration {
bindaddress: repl_bind_address,
..Default::default()
});
}
}
"REPLICATION_TASK_POLL_INTERVAL" => {
let poll_interval = value
.parse()
.map_err(|_| {
"Failed to parse replication task poll interval as u64".to_string()
})
.ok();
if let Some(repl) = &mut self.repl_config {
repl.task_poll_interval = poll_interval;
} else {
self.repl_config = Some(ReplicationConfiguration {
task_poll_interval: poll_interval,
..Default::default()
});
}
}
"OTEL_GRPC_URL" => {
self.otel_grpc_url = Some(value.to_string());
}
_ => eprintln!("Ignoring env var {}", key),
}
}
Ok(self)
} }
/// Return the ARC size for the database, it's something you really shouldn't touch unless you are doing extreme tuning. /// Return the ARC size for the database, it's something you really shouldn't touch unless you are doing extreme tuning.
@ -276,6 +426,8 @@ pub struct Configuration {
pub repl_config: Option<ReplicationConfiguration>, pub repl_config: Option<ReplicationConfiguration>,
/// This allows internally setting some unsafe options for replication. /// This allows internally setting some unsafe options for replication.
pub integration_repl_config: Option<Box<IntegrationReplConfig>>, pub integration_repl_config: Option<Box<IntegrationReplConfig>>,
pub otel_grpc_url: Option<String>,
} }
impl fmt::Display for Configuration { impl fmt::Display for Configuration {
@ -300,8 +452,11 @@ impl fmt::Display for Configuration {
match &self.online_backup { match &self.online_backup {
Some(bck) => write!( Some(bck) => write!(
f, f,
"online_backup: enabled - schedule: {} versions: {}, ", "online_backup: enabled: {} - schedule: {} versions: {} path: {}, ",
bck.schedule, bck.versions bck.enabled,
bck.schedule,
bck.versions,
bck.path.clone().unwrap_or("<unset>".to_string()),
), ),
None => write!(f, "online_backup: disabled, "), None => write!(f, "online_backup: disabled, "),
}?; }?;
@ -328,6 +483,7 @@ impl fmt::Display for Configuration {
write!(f, "replication: disabled, ")?; write!(f, "replication: disabled, ")?;
} }
} }
write!(f, "otel_grpc_url: {:?}", self.otel_grpc_url)?;
Ok(()) Ok(())
} }
} }
@ -365,6 +521,7 @@ impl Configuration {
role: ServerRole::WriteReplica, role: ServerRole::WriteReplica,
repl_config: None, repl_config: None,
integration_repl_config: None, integration_repl_config: None,
otel_grpc_url: None,
} }
} }
@ -379,13 +536,28 @@ impl Configuration {
match cfg { match cfg {
None => {} None => {}
Some(cfg) => { Some(cfg) => {
let path = cfg.path.to_string(); let path = match cfg.path.clone() {
let schedule = cfg.schedule.to_string(); Some(path) => Some(path),
let versions = cfg.versions; // Default to the same path as the data directory
None => {
let db_filepath = Path::new(&self.db_path);
#[allow(clippy::expect_used)]
let db_path = db_filepath
.parent()
.map(|p| {
#[allow(clippy::expect_used)]
p.to_str()
.expect("Couldn't turn db_path to str")
.to_string()
})
.expect("Unable to get parent directory of db_path");
Some(db_path)
}
};
self.online_backup = Some(OnlineBackup { self.online_backup = Some(OnlineBackup {
path, path,
schedule, ..cfg.clone()
versions,
}) })
} }
} }

View file

@ -59,7 +59,13 @@ impl IntervalActor {
online_backup_config: &OnlineBackup, online_backup_config: &OnlineBackup,
mut rx: broadcast::Receiver<CoreAction>, mut rx: broadcast::Receiver<CoreAction>,
) -> Result<tokio::task::JoinHandle<()>, ()> { ) -> Result<tokio::task::JoinHandle<()>, ()> {
let outpath = online_backup_config.path.to_owned(); let outpath = match online_backup_config.path.to_owned() {
Some(val) => val,
None => {
error!("Online backup output path is not set.");
return Err(());
}
};
let versions = online_backup_config.versions; let versions = online_backup_config.versions;
let crono_expr = online_backup_config.schedule.as_str().to_string(); let crono_expr = online_backup_config.schedule.as_str().to_string();
let mut crono_expr_values = crono_expr.split_ascii_whitespace().collect::<Vec<&str>>(); let mut crono_expr_values = crono_expr.split_ascii_whitespace().collect::<Vec<&str>>();

View file

@ -957,12 +957,17 @@ pub async fn create_server_core(
// Setup timed events associated to the read thread // Setup timed events associated to the read thread
let maybe_backup_handle = match &config.online_backup { let maybe_backup_handle = match &config.online_backup {
Some(online_backup_config) => { Some(online_backup_config) => {
if online_backup_config.enabled {
let handle = IntervalActor::start_online_backup( let handle = IntervalActor::start_online_backup(
server_read_ref, server_read_ref,
online_backup_config, online_backup_config,
broadcast_tx.subscribe(), broadcast_tx.subscribe(),
)?; )?;
Some(handle) Some(handle)
} else {
debug!("Backups disabled");
None
}
} }
None => { None => {
debug!("Online backup not requested, skipping"); debug!("Online backup not requested, skipping");

View file

@ -0,0 +1,80 @@
use kanidm_lib_crypto::prelude::X509;
use kanidm_lib_crypto::serialise::x509b64;
use kanidm_proto::constants::{DEFAULT_REPLICATION_ADDRESS, DEFAULT_REPLICATION_ORIGIN};
use serde::Deserialize;
use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::str::FromStr;
use url::Url;
#[derive(Deserialize, Debug, Clone)]
#[serde(tag = "type")]
pub enum RepNodeConfig {
#[serde(rename = "allow-pull")]
AllowPull {
#[serde(with = "x509b64")]
consumer_cert: X509,
},
#[serde(rename = "pull")]
Pull {
#[serde(with = "x509b64")]
supplier_cert: X509,
automatic_refresh: bool,
},
#[serde(rename = "mutual-pull")]
MutualPull {
#[serde(with = "x509b64")]
partner_cert: X509,
automatic_refresh: bool,
},
/*
AllowPush {
},
Push {
},
*/
}
#[derive(Deserialize, Debug, Clone)]
pub struct ReplicationConfiguration {
/// Defaults to [kanidm_proto::constants::DEFAULT_REPLICATION_ORIGIN]
pub origin: Url,
/// Defaults to [kanidm_proto::constants::DEFAULT_REPLICATION_ADDRESS]
pub bindaddress: SocketAddr,
/// Number of seconds between running a replication event
pub task_poll_interval: Option<u64>,
#[serde(flatten)]
pub manual: BTreeMap<Url, RepNodeConfig>,
}
impl Default for ReplicationConfiguration {
fn default() -> Self {
// we're using expect here because if we stuff it up, we did it at compile time
#[allow(clippy::expect_used)]
let origin: Url = Url::from_str(DEFAULT_REPLICATION_ORIGIN)
.expect("Failed to parse default replication origin URL");
#[allow(clippy::expect_used)]
let bindaddress: SocketAddr = DEFAULT_REPLICATION_ADDRESS
.parse()
.expect("Failed to parse default replication bind address");
Self {
origin,
bindaddress,
task_poll_interval: None,
manual: BTreeMap::new(),
}
}
}
const DEFAULT_REPL_TASK_POLL_INTERVAL: u64 = 15;
impl ReplicationConfiguration {
/// Get the task poll interval, or the default if not set.
pub(crate) fn get_task_poll_interval(&self) -> core::time::Duration {
core::time::Duration::from_secs(
self.task_poll_interval
.unwrap_or(DEFAULT_REPL_TASK_POLL_INTERVAL),
)
}
}

View file

@ -31,13 +31,13 @@ use kanidmd_lib::prelude::IdmServer;
use kanidmd_lib::repl::proto::ConsumerState; use kanidmd_lib::repl::proto::ConsumerState;
use kanidmd_lib::server::QueryServerTransaction; use kanidmd_lib::server::QueryServerTransaction;
use crate::config::RepNodeConfig;
use crate::config::ReplicationConfiguration;
use crate::CoreAction; use crate::CoreAction;
use config::{RepNodeConfig, ReplicationConfiguration};
use self::codec::{ConsumerRequest, SupplierResponse}; use self::codec::{ConsumerRequest, SupplierResponse};
mod codec; mod codec;
pub(crate) mod config;
pub(crate) enum ReplCtrl { pub(crate) enum ReplCtrl {
GetCertificate { GetCertificate {

View file

@ -246,14 +246,10 @@ async fn kanidm_main() -> ExitCode {
println!("Log filter: {:?}", log_filter); println!("Log filter: {:?}", log_filter);
let otel_grpc_url = match sconfig.as_ref() { // if we have a server config and it has an otel url, then we'll start the logging pipeline
Some(sconfig) => match sconfig.otel_grpc_url.clone() { let otel_grpc_url = sconfig
Some(otel_grpc_url) => Some(otel_grpc_url), .as_ref()
None => sketching::otel::get_otlp_endpoint(), .and_then(|config| config.otel_grpc_url.clone());
},
// if we don't have a config, fall back to trying the env var
None => sketching::otel::get_otlp_endpoint(),
};
// TODO: only send to stderr when we're not in a TTY // TODO: only send to stderr when we're not in a TTY
let sub = match sketching::otel::start_logging_pipeline( let sub = match sketching::otel::start_logging_pipeline(

View file

@ -1073,8 +1073,8 @@ impl FilterComp {
terms.push(FilterComp::Stw(a.clone(), v)); terms.push(FilterComp::Stw(a.clone(), v));
} }
for term in any.into_iter() { for term in any.iter() {
let v = qs.clone_partialvalue(a.as_str(), &term)?; let v = qs.clone_partialvalue(a.as_str(), term)?;
terms.push(FilterComp::Cnt(a.clone(), v)); terms.push(FilterComp::Cnt(a.clone(), v));
} }