diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md
index c01e5660a..ba8f7f4c2 100644
--- a/book/src/SUMMARY.md
+++ b/book/src/SUMMARY.md
@@ -53,7 +53,6 @@
 
 - [Service Integration Examples](examples/readme.md)
   - [Kubernetes Ingress](examples/kubernetes_ingress.md)
-  - [OAuth2 Examples](integrations/oauth2/examples.md)
   - [Traefik](examples/traefik.md)
 
 - [Replication](repl/readme.md)
diff --git a/examples/server.toml b/examples/server.toml
index e0c2f7e7d..9a41738c5 100644
--- a/examples/server.toml
+++ b/examples/server.toml
@@ -1,3 +1,6 @@
+# The server configuration file version.
+version = "2"
+
 #   The webserver bind address. Requires TLS certificates.
 #   If the port is set to 443 you may require the
 #   NET_BIND_SERVICE capability.
diff --git a/examples/server_container.toml b/examples/server_container.toml
index 1378351e8..f57923a40 100644
--- a/examples/server_container.toml
+++ b/examples/server_container.toml
@@ -1,3 +1,6 @@
+# The server configuration file version.
+version = "2"
+
 #   The webserver bind address. Requires TLS certificates.
 #   If the port is set to 443 you may require the
 #   NET_BIND_SERVICE capability.
diff --git a/scripts/pykanidm/integration_test.py b/scripts/pykanidm/integration_test.py
index 0a39d3a93..5f0c891af 100644
--- a/scripts/pykanidm/integration_test.py
+++ b/scripts/pykanidm/integration_test.py
@@ -25,7 +25,7 @@ def recover_account(username: str) -> str:
         "recover-account",
         username,
         "--config",
-        "../../examples/insecure_server.toml",
+        "./insecure_server.toml",
         "--output",
         "json",
     ]
diff --git a/scripts/setup_dev_environment.sh b/scripts/setup_dev_environment.sh
index 0fdb91334..7eeffdab6 100755
--- a/scripts/setup_dev_environment.sh
+++ b/scripts/setup_dev_environment.sh
@@ -44,7 +44,7 @@ fi
 
 
 # defaults
-KANIDM_CONFIG_FILE="../../examples/insecure_server.toml"
+KANIDM_CONFIG_FILE="./insecure_server.toml"
 KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')"
 KANIDM_CA_PATH="/tmp/kanidm/ca.pem"
 
@@ -83,7 +83,7 @@ if [ "${REMOVE_TEST_DB}" -eq 1 ]; then
     rm /tmp/kanidm/kanidm.db || true
 fi
 
-export KANIDM_CONFIG="../../examples/insecure_server.toml"
+export KANIDM_CONFIG="./insecure_server.toml"
 IDM_ADMIN_USER="idm_admin@localhost"
 
 echo "Resetting the idm_admin user..."
diff --git a/scripts/test_run_release_server.sh b/scripts/test_run_release_server.sh
index fdba39b76..6cc4c64f9 100755
--- a/scripts/test_run_release_server.sh
+++ b/scripts/test_run_release_server.sh
@@ -25,7 +25,7 @@ if [ ! -f "run_insecure_dev_server.sh" ]; then
     exit 1
 fi
 
-export KANIDM_CONFIG="../../examples/insecure_server.toml"
+export KANIDM_CONFIG="./insecure_server.toml"
 
 mkdir -p /tmp/kanidm/client_ca
 
@@ -48,7 +48,7 @@ fi
 
 ATTEMPT=0
 
-KANIDM_CONFIG_FILE="../../examples/insecure_server.toml"
+KANIDM_CONFIG_FILE="./insecure_server.toml"
 KANIDM_URL="$(rg origin "${KANIDM_CONFIG_FILE}" | awk '{print $NF}' | tr -d '"')"
 KANIDM_CA_PATH="/tmp/kanidm/ca.pem"
 
diff --git a/server/core/src/actors/v1_read.rs b/server/core/src/actors/v1_read.rs
index a91cbad95..5b44bb31c 100644
--- a/server/core/src/actors/v1_read.rs
+++ b/server/core/src/actors/v1_read.rs
@@ -191,7 +191,7 @@ impl QueryServerReadV1 {
     pub async fn handle_online_backup(
         &self,
         msg: OnlineBackupEvent,
-        outpath: &str,
+        outpath: &Path,
         versions: usize,
     ) -> Result<(), OperationError> {
         trace!(eventid = ?msg.eventid, "Begin online backup event");
@@ -200,12 +200,12 @@ impl QueryServerReadV1 {
 
         #[allow(clippy::unwrap_used)]
         let timestamp = now.format(&Rfc3339).unwrap();
-        let dest_file = format!("{}/backup-{}.json", outpath, timestamp);
+        let dest_file = outpath.join(format!("backup-{}.json", timestamp));
 
-        if Path::new(&dest_file).exists() {
+        if dest_file.exists() {
             error!(
                 "Online backup file {} already exists, will not overwrite it.",
-                dest_file
+                dest_file.display()
             );
             return Err(OperationError::InvalidState);
         }
@@ -218,10 +218,14 @@ impl QueryServerReadV1 {
                 .get_be_txn()
                 .backup(&dest_file)
                 .map(|()| {
-                    info!("Online backup created {} successfully", dest_file);
+                    info!("Online backup created {} successfully", dest_file.display());
                 })
                 .map_err(|e| {
-                    error!("Online backup failed to create {}: {:?}", dest_file, e);
+                    error!(
+                        "Online backup failed to create {}: {:?}",
+                        dest_file.display(),
+                        e
+                    );
                     OperationError::InvalidState
                 })?;
         }
@@ -267,7 +271,11 @@ impl QueryServerReadV1 {
                 }
             }
             Err(e) => {
-                error!("Online backup cleanup error read dir {}: {}", outpath, e);
+                error!(
+                    "Online backup cleanup error read dir {}: {}",
+                    outpath.display(),
+                    e
+                );
                 return Err(OperationError::InvalidState);
             }
         }
diff --git a/server/core/src/config.rs b/server/core/src/config.rs
index ce6f7c33d..ad3d3bd9c 100644
--- a/server/core/src/config.rs
+++ b/server/core/src/config.rs
@@ -20,10 +20,30 @@ use url::Url;
 
 use crate::repl::config::ReplicationConfiguration;
 
+// Allowed as the large enum is only short lived at startup to the true config
+#[allow(clippy::large_enum_variant)]
+// These structures allow us to move to version tagging of the configuration structure.
+#[derive(Debug, Deserialize)]
+#[serde(untagged)]
+pub enum ServerConfigUntagged {
+    Version(ServerConfigVersion),
+    Legacy(ServerConfig),
+}
+
+#[derive(Debug, Deserialize)]
+#[serde(tag = "version")]
+pub enum ServerConfigVersion {
+    #[serde(rename = "2")]
+    V2 {
+        #[serde(flatten)]
+        values: ServerConfigV2,
+    },
+}
+
 #[derive(Deserialize, Debug, Clone)]
 pub struct OnlineBackup {
     /// The destination folder for your backups, defaults to the db_path dir if not set
-    pub path: Option<String>,
+    pub path: Option<PathBuf>,
     /// The schedule to run online backups (see <https://crontab.guru/>), defaults to @daily
     ///
     /// Examples:
@@ -92,51 +112,53 @@ pub struct TlsConfiguration {
 #[serde(deny_unknown_fields)]
 pub struct ServerConfig {
     /// *REQUIRED* - Kanidm Domain, eg `kanidm.example.com`.
-    pub domain: Option<String>,
+    domain: Option<String>,
     /// *REQUIRED* - The user-facing HTTPS URL for this server, eg <https://idm.example.com>
     // TODO  -this should be URL
-    pub origin: Option<String>,
+    origin: Option<String>,
     /// File path of the database file
-    pub db_path: Option<String>,
+    db_path: Option<PathBuf>,
+    /// The filesystem type, either "zfs" or "generic". Defaults to "generic" if unset. I you change this, run a database vacuum.
+    db_fs_type: Option<kanidm_proto::internal::FsType>,
+
     ///  *REQUIRED* - The file path to the TLS Certificate Chain
-    pub tls_chain: Option<String>,
+    tls_chain: Option<PathBuf>,
     ///  *REQUIRED* - The file path to the TLS Private Key
-    pub tls_key: Option<String>,
+    tls_key: Option<PathBuf>,
 
     /// The directory path of the client ca and crl dir.
-    pub tls_client_ca: Option<String>,
+    tls_client_ca: Option<PathBuf>,
 
     /// The listener address for the HTTPS server.
     ///
     /// eg. `[::]:8443` or `127.0.0.1:8443`. Defaults to [kanidm_proto::constants::DEFAULT_SERVER_ADDRESS]
-    pub bindaddress: Option<String>,
+    bindaddress: Option<String>,
     /// The listener address for the LDAP server.
     ///
     /// eg. `[::]:3636` or `127.0.0.1:3636`.
     ///
     /// If unset, the LDAP server will be disabled.
-    pub ldapbindaddress: Option<String>,
+    ldapbindaddress: Option<String>,
     /// The role of this server, one of write_replica, write_replica_no_ui, read_only_replica, defaults to [ServerRole::WriteReplica]
-    #[serde(default)]
-    pub role: ServerRole,
+    role: Option<ServerRole>,
     /// The log level, one of info, debug, trace. Defaults to "info" if not set.
-    pub log_level: Option<LogLevel>,
+    log_level: Option<LogLevel>,
 
     /// Backup Configuration, see [OnlineBackup] for details on sub-keys.
-    pub online_backup: Option<OnlineBackup>,
+    online_backup: Option<OnlineBackup>,
 
     /// Trust the X-Forwarded-For header for client IP address. Defaults to false if unset.
-    pub trust_x_forward_for: Option<bool>,
-
-    /// The filesystem type, either "zfs" or "generic". Defaults to "generic" if unset. I you change this, run a database vacuum.
-    pub db_fs_type: Option<kanidm_proto::internal::FsType>,
+    trust_x_forward_for: Option<bool>,
 
     /// The path to the "admin" socket, used for local communication when performing certain server control tasks. Default is set on build, based on the system target.
-    pub adminbindpath: Option<String>,
+    adminbindpath: Option<String>,
 
     /// The maximum amount of threads the server will use for the async worker pool. Defaults
     /// to std::threads::available_parallelism.
-    pub thread_count: Option<usize>,
+    thread_count: Option<usize>,
+
+    /// Maximum Request Size in bytes
+    maximum_request_size_bytes: Option<usize>,
 
     /// Don't touch this unless you know what you're doing!
     #[allow(dead_code)]
@@ -144,110 +166,100 @@ pub struct ServerConfig {
     #[serde(default)]
     #[serde(rename = "replication")]
     /// Replication configuration, this is a development feature and not yet ready for production use.
-    pub repl_config: Option<ReplicationConfiguration>,
+    repl_config: Option<ReplicationConfiguration>,
     /// An optional OpenTelemetry collector (GRPC) url to send trace and log data to, eg `http://localhost:4317`. If not set, disables the feature.
-    pub otel_grpc_url: Option<String>,
+    otel_grpc_url: Option<String>,
 }
 
-impl ServerConfig {
+impl ServerConfigUntagged {
     /// loads the configuration file from the path specified, then overlays fields from environment variables starting with `KANIDM_``
-    pub fn new<P: AsRef<Path>>(config_path: Option<P>) -> Result<Self, std::io::Error> {
-        // start with a base config
-        let mut config = ServerConfig::default();
-
-        if let Some(config_path) = config_path {
-            // see if we can load it from the config file you asked for
-            if config_path.as_ref().exists() {
-                eprintln!("📜 Using config file: {:?}", config_path.as_ref());
-                let mut f: File = File::open(config_path.as_ref()).map_err(|e| {
-                    eprintln!("Unable to open config file [{:?}] 🥺", e);
-                    let diag = kanidm_lib_file_permissions::diagnose_path(config_path.as_ref());
-                    eprintln!("{}", diag);
-                    e
-                })?;
-
-                let mut contents = String::new();
-
-                f.read_to_string(&mut contents).map_err(|e| {
-                    eprintln!("unable to read contents {:?}", e);
-                    let diag = kanidm_lib_file_permissions::diagnose_path(config_path.as_ref());
-                    eprintln!("{}", diag);
-                    e
-                })?;
-
-                // if we *can* load the config we'll set config to that.
-                match toml::from_str::<ServerConfig>(contents.as_str()) {
-                    Err(err) => {
-                        eprintln!(
-                            "Unable to parse config from '{:?}': {:?}",
-                            config_path.as_ref(),
-                            err
-                        );
-                    }
-                    Ok(val) => config = val,
-                };
-            } else {
-                eprintln!("📜 No config file found at {:?}", config_path.as_ref());
-            }
-        } else {
-            eprintln!(
-                "WARNING: No configuration path was provided, relying on environment variables."
-            );
-        };
-
-        // build from the environment variables
-        let res = config.try_from_env().map_err(|e| {
-            println!("Failed to use environment variable config: {e}");
-            std::io::Error::new(std::io::ErrorKind::Other, e)
+    pub fn new<P: AsRef<Path>>(config_path: P) -> Result<Self, std::io::Error> {
+        // see if we can load it from the config file you asked for
+        eprintln!("📜 Using config file: {:?}", config_path.as_ref());
+        let mut f: File = File::open(config_path.as_ref()).inspect_err(|e| {
+            eprintln!("Unable to open config file [{:?}] 🥺", e);
+            let diag = kanidm_lib_file_permissions::diagnose_path(config_path.as_ref());
+            eprintln!("{}", diag);
         })?;
 
-        // check if the required fields are there
-        let mut config_failed = false;
-        if res.domain.is_none() {
-            eprintln!("❌ 'domain' field in server configuration is not set, server cannot start!");
-            config_failed = true;
-        }
+        let mut contents = String::new();
 
-        if res.origin.is_none() {
-            eprintln!("❌ 'origin' field in server configuration is not set, server cannot start!");
-            config_failed = true;
-        }
+        f.read_to_string(&mut contents).inspect_err(|e| {
+            eprintln!("unable to read contents {:?}", e);
+            let diag = kanidm_lib_file_permissions::diagnose_path(config_path.as_ref());
+            eprintln!("{}", diag);
+        })?;
 
-        if res.db_path.is_none() {
+        // if we *can* load the config we'll set config to that.
+        toml::from_str::<ServerConfigUntagged>(contents.as_str()).map_err(|err| {
             eprintln!(
-                "❌ 'db_path' field in server configuration is not set, server cannot start!"
+                "Unable to parse config from '{:?}': {:?}",
+                config_path.as_ref(),
+                err
             );
-            config_failed = true;
-        }
-
-        #[cfg(not(test))]
-        if res.tls_chain.is_none() {
-            eprintln!(
-                "❌ 'tls_chain' field in server configuration is not set, server cannot start!"
-            );
-            config_failed = true;
-        }
-        #[cfg(not(test))]
-        if res.tls_key.is_none() {
-            eprintln!(
-                "❌ 'tls_key' field in server configuration is not set, server cannot start!"
-            );
-            config_failed = true;
-        }
-
-        if config_failed {
-            eprintln!("Failed to parse configuration, server cannot start!");
-            Err(std::io::Error::new(
-                std::io::ErrorKind::Other,
-                "Failed to parse configuration, server cannot start!",
-            ))
-        } else {
-            Ok(res)
-        }
+            std::io::Error::new(std::io::ErrorKind::InvalidData, err)
+        })
     }
+}
 
+#[derive(Debug, Deserialize, Default)]
+#[serde(deny_unknown_fields)]
+pub struct ServerConfigV2 {
+    domain: Option<String>,
+    origin: Option<String>,
+    db_path: Option<PathBuf>,
+    db_fs_type: Option<kanidm_proto::internal::FsType>,
+    tls_chain: Option<PathBuf>,
+    tls_key: Option<PathBuf>,
+    tls_client_ca: Option<PathBuf>,
+    bindaddress: Option<String>,
+    ldapbindaddress: Option<String>,
+    role: Option<ServerRole>,
+    log_level: Option<LogLevel>,
+    online_backup: Option<OnlineBackup>,
+    trust_x_forward_for: Option<bool>,
+    adminbindpath: Option<String>,
+    thread_count: Option<usize>,
+    maximum_request_size_bytes: Option<usize>,
+    #[allow(dead_code)]
+    db_arc_size: Option<usize>,
+    #[serde(default)]
+    #[serde(rename = "replication")]
+    repl_config: Option<ReplicationConfiguration>,
+    otel_grpc_url: Option<String>,
+}
+
+#[derive(Default)]
+pub struct CliConfig {
+    pub output_mode: Option<ConsoleOutputMode>,
+}
+
+#[derive(Default)]
+pub struct EnvironmentConfig {
+    domain: Option<String>,
+    origin: Option<String>,
+    db_path: Option<PathBuf>,
+    tls_chain: Option<PathBuf>,
+    tls_key: Option<PathBuf>,
+    tls_client_ca: Option<PathBuf>,
+    bindaddress: Option<String>,
+    ldapbindaddress: Option<String>,
+    role: Option<ServerRole>,
+    log_level: Option<LogLevel>,
+    online_backup: Option<OnlineBackup>,
+    trust_x_forward_for: Option<bool>,
+    db_fs_type: Option<kanidm_proto::internal::FsType>,
+    adminbindpath: Option<String>,
+    db_arc_size: Option<usize>,
+    repl_config: Option<ReplicationConfiguration>,
+    otel_grpc_url: Option<String>,
+}
+
+impl EnvironmentConfig {
     /// Updates the ServerConfig from environment variables starting with `KANIDM_`
-    fn try_from_env(mut self) -> Result<Self, String> {
+    pub fn new() -> Result<Self, String> {
+        let mut env_config = Self::default();
+
         for (key, value) in std::env::vars() {
             let Some(key) = key.strip_prefix("KANIDM_") else {
                 continue;
@@ -272,56 +284,56 @@ impl ServerConfig {
 
             match key {
                 "DOMAIN" => {
-                    self.domain = Some(value.to_string());
+                    env_config.domain = Some(value.to_string());
                 }
                 "ORIGIN" => {
-                    self.origin = Some(value.to_string());
+                    env_config.origin = Some(value.to_string());
                 }
                 "DB_PATH" => {
-                    self.db_path = Some(value.to_string());
+                    env_config.db_path = Some(PathBuf::from(value.to_string()));
                 }
                 "TLS_CHAIN" => {
-                    self.tls_chain = Some(value.to_string());
+                    env_config.tls_chain = Some(PathBuf::from(value.to_string()));
                 }
                 "TLS_KEY" => {
-                    self.tls_key = Some(value.to_string());
+                    env_config.tls_key = Some(PathBuf::from(value.to_string()));
                 }
                 "TLS_CLIENT_CA" => {
-                    self.tls_client_ca = Some(value.to_string());
+                    env_config.tls_client_ca = Some(PathBuf::from(value.to_string()));
                 }
                 "BINDADDRESS" => {
-                    self.bindaddress = Some(value.to_string());
+                    env_config.bindaddress = Some(value.to_string());
                 }
                 "LDAPBINDADDRESS" => {
-                    self.ldapbindaddress = Some(value.to_string());
+                    env_config.ldapbindaddress = Some(value.to_string());
                 }
                 "ROLE" => {
-                    self.role = ServerRole::from_str(&value).map_err(|err| {
+                    env_config.role = Some(ServerRole::from_str(&value).map_err(|err| {
                         format!("Failed to parse KANIDM_ROLE as ServerRole: {}", err)
-                    })?;
+                    })?);
                 }
                 "LOG_LEVEL" => {
-                    self.log_level = LogLevel::from_str(&value)
+                    env_config.log_level = LogLevel::from_str(&value)
                         .map_err(|err| {
                             format!("Failed to parse KANIDM_LOG_LEVEL as LogLevel: {}", err)
                         })
                         .ok();
                 }
                 "ONLINE_BACKUP_PATH" => {
-                    if let Some(backup) = &mut self.online_backup {
-                        backup.path = Some(value.to_string());
+                    if let Some(backup) = &mut env_config.online_backup {
+                        backup.path = Some(PathBuf::from(value.to_string()));
                     } else {
-                        self.online_backup = Some(OnlineBackup {
-                            path: Some(value.to_string()),
+                        env_config.online_backup = Some(OnlineBackup {
+                            path: Some(PathBuf::from(value.to_string())),
                             ..Default::default()
                         });
                     }
                 }
                 "ONLINE_BACKUP_SCHEDULE" => {
-                    if let Some(backup) = &mut self.online_backup {
+                    if let Some(backup) = &mut env_config.online_backup {
                         backup.schedule = value.to_string();
                     } else {
-                        self.online_backup = Some(OnlineBackup {
+                        env_config.online_backup = Some(OnlineBackup {
                             schedule: value.to_string(),
                             ..Default::default()
                         });
@@ -331,17 +343,17 @@ impl ServerConfig {
                     let versions = value.parse().map_err(|_| {
                         "Failed to parse KANIDM_ONLINE_BACKUP_VERSIONS as usize".to_string()
                     })?;
-                    if let Some(backup) = &mut self.online_backup {
+                    if let Some(backup) = &mut env_config.online_backup {
                         backup.versions = versions;
                     } else {
-                        self.online_backup = Some(OnlineBackup {
+                        env_config.online_backup = Some(OnlineBackup {
                             versions,
                             ..Default::default()
                         })
                     }
                 }
                 "TRUST_X_FORWARD_FOR" => {
-                    self.trust_x_forward_for = value
+                    env_config.trust_x_forward_for = value
                         .parse()
                         .map_err(|_| {
                             "Failed to parse KANIDM_TRUST_X_FORWARD_FOR as bool".to_string()
@@ -349,29 +361,29 @@ impl ServerConfig {
                         .ok();
                 }
                 "DB_FS_TYPE" => {
-                    self.db_fs_type = FsType::try_from(value.as_str())
+                    env_config.db_fs_type = FsType::try_from(value.as_str())
                         .map_err(|_| {
                             "Failed to parse KANIDM_DB_FS_TYPE env var to valid value!".to_string()
                         })
                         .ok();
                 }
                 "DB_ARC_SIZE" => {
-                    self.db_arc_size = value
+                    env_config.db_arc_size = value
                         .parse()
                         .map_err(|_| "Failed to parse KANIDM_DB_ARC_SIZE as value".to_string())
                         .ok();
                 }
                 "ADMIN_BIND_PATH" => {
-                    self.adminbindpath = Some(value.to_string());
+                    env_config.adminbindpath = Some(value.to_string());
                 }
                 "REPLICATION_ORIGIN" => {
                     let repl_origin = Url::parse(value.as_str()).map_err(|err| {
                         format!("Failed to parse KANIDM_REPLICATION_ORIGIN as URL: {}", err)
                     })?;
-                    if let Some(repl) = &mut self.repl_config {
+                    if let Some(repl) = &mut env_config.repl_config {
                         repl.origin = repl_origin
                     } else {
-                        self.repl_config = Some(ReplicationConfiguration {
+                        env_config.repl_config = Some(ReplicationConfiguration {
                             origin: repl_origin,
                             ..Default::default()
                         });
@@ -381,10 +393,10 @@ impl ServerConfig {
                     let repl_bind_address = value
                         .parse()
                         .map_err(|_| "Failed to parse replication bind address".to_string())?;
-                    if let Some(repl) = &mut self.repl_config {
+                    if let Some(repl) = &mut env_config.repl_config {
                         repl.bindaddress = repl_bind_address;
                     } else {
-                        self.repl_config = Some(ReplicationConfiguration {
+                        env_config.repl_config = Some(ReplicationConfiguration {
                             bindaddress: repl_bind_address,
                             ..Default::default()
                         });
@@ -397,29 +409,24 @@ impl ServerConfig {
                             "Failed to parse replication task poll interval as u64".to_string()
                         })
                         .ok();
-                    if let Some(repl) = &mut self.repl_config {
+                    if let Some(repl) = &mut env_config.repl_config {
                         repl.task_poll_interval = poll_interval;
                     } else {
-                        self.repl_config = Some(ReplicationConfiguration {
+                        env_config.repl_config = Some(ReplicationConfiguration {
                             task_poll_interval: poll_interval,
                             ..Default::default()
                         });
                     }
                 }
                 "OTEL_GRPC_URL" => {
-                    self.otel_grpc_url = Some(value.to_string());
+                    env_config.otel_grpc_url = Some(value.to_string());
                 }
 
                 _ => eprintln!("Ignoring env var KANIDM_{key}"),
             }
         }
 
-        Ok(self)
-    }
-
-    /// Return the ARC size for the database, it's something you really shouldn't touch unless you are doing extreme tuning.
-    pub fn get_db_arc_size(&self) -> Option<usize> {
-        self.db_arc_size
+        Ok(env_config)
     }
 }
 
@@ -475,11 +482,11 @@ pub struct IntegrationReplConfig {
 #[derive(Debug, Clone)]
 pub struct Configuration {
     pub address: String,
-    pub ldapaddress: Option<String>,
+    pub ldapbindaddress: Option<String>,
     pub adminbindpath: String,
     pub threads: usize,
     // db type later
-    pub db_path: String,
+    pub db_path: Option<PathBuf>,
     pub db_fs_type: Option<FsType>,
     pub db_arc_size: Option<usize>,
     pub maximum_request: usize,
@@ -492,27 +499,89 @@ pub struct Configuration {
     pub role: ServerRole,
     pub output_mode: ConsoleOutputMode,
     pub log_level: LogLevel,
-
     /// Replication settings.
     pub repl_config: Option<ReplicationConfiguration>,
     /// This allows internally setting some unsafe options for replication.
     pub integration_repl_config: Option<Box<IntegrationReplConfig>>,
-
     pub otel_grpc_url: Option<String>,
 }
 
+impl Configuration {
+    pub fn build() -> ConfigurationBuilder {
+        ConfigurationBuilder {
+            bindaddress: None,
+            ldapbindaddress: None,
+            adminbindpath: None,
+            threads: std::thread::available_parallelism()
+                .map(|t| t.get())
+                .unwrap_or_else(|_e| {
+                    eprintln!("WARNING: Unable to read number of available CPUs, defaulting to 4");
+                    4
+                }),
+            db_path: None,
+            db_fs_type: None,
+            db_arc_size: None,
+            maximum_request: 256 * 1024, // 256k
+            trust_x_forward_for: None,
+            tls_key: None,
+            tls_chain: None,
+            tls_client_ca: None,
+            online_backup: None,
+            domain: None,
+            origin: None,
+            output_mode: None,
+            log_level: None,
+            role: None,
+            repl_config: None,
+            otel_grpc_url: None,
+        }
+    }
+
+    pub fn new_for_test() -> Self {
+        Configuration {
+            address: DEFAULT_SERVER_ADDRESS.to_string(),
+            ldapbindaddress: None,
+            adminbindpath: env!("KANIDM_SERVER_ADMIN_BIND_PATH").to_string(),
+            threads: 1,
+            db_path: None,
+            db_fs_type: None,
+            db_arc_size: None,
+            maximum_request: 256 * 1024, // 256k
+            trust_x_forward_for: false,
+            tls_config: None,
+            integration_test_config: None,
+            online_backup: None,
+            domain: "idm.example.com".to_string(),
+            origin: "https://idm.example.com".to_string(),
+            output_mode: ConsoleOutputMode::default(),
+            log_level: LogLevel::default(),
+            role: ServerRole::WriteReplica,
+            repl_config: None,
+            integration_repl_config: None,
+            otel_grpc_url: None,
+        }
+    }
+}
+
 impl fmt::Display for Configuration {
     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
         write!(f, "address: {}, ", self.address)?;
         write!(f, "domain: {}, ", self.domain)?;
-        match &self.ldapaddress {
+        match &self.ldapbindaddress {
             Some(la) => write!(f, "ldap address: {}, ", la),
             None => write!(f, "ldap address: disabled, "),
         }?;
         write!(f, "origin: {} ", self.origin)?;
         write!(f, "admin bind path: {}, ", self.adminbindpath)?;
         write!(f, "thread count: {}, ", self.threads)?;
-        write!(f, "dbpath: {}, ", self.db_path)?;
+        write!(
+            f,
+            "dbpath: {}, ",
+            self.db_path
+                .as_ref()
+                .map(|p| p.to_string_lossy().to_string())
+                .unwrap_or("MEMORY".to_string())
+        )?;
         match self.db_arc_size {
             Some(v) => write!(f, "arcsize: {}, ", v),
             None => write!(f, "arcsize: AUTO, "),
@@ -527,7 +596,10 @@ impl fmt::Display for Configuration {
                 bck.enabled,
                 bck.schedule,
                 bck.versions,
-                bck.path.clone().unwrap_or("<unset>".to_string()),
+                bck.path
+                    .as_ref()
+                    .map(|p| p.to_string_lossy().to_string())
+                    .unwrap_or("<unset>".to_string())
             ),
             None => write!(f, "online_backup: disabled, "),
         }?;
@@ -559,178 +631,387 @@ impl fmt::Display for Configuration {
     }
 }
 
-impl Default for Configuration {
-    fn default() -> Self {
-        Self::new()
-    }
+/// The internal configuration of the server. User-facing configuration is in [ServerConfig], as the configuration file is parsed by that object.
+#[derive(Debug, Clone)]
+pub struct ConfigurationBuilder {
+    bindaddress: Option<String>,
+    ldapbindaddress: Option<String>,
+    adminbindpath: Option<String>,
+    threads: usize,
+    db_path: Option<PathBuf>,
+    db_fs_type: Option<FsType>,
+    db_arc_size: Option<usize>,
+    maximum_request: usize,
+    trust_x_forward_for: Option<bool>,
+    tls_key: Option<PathBuf>,
+    tls_chain: Option<PathBuf>,
+    tls_client_ca: Option<PathBuf>,
+    online_backup: Option<OnlineBackup>,
+    domain: Option<String>,
+    origin: Option<String>,
+    role: Option<ServerRole>,
+    output_mode: Option<ConsoleOutputMode>,
+    log_level: Option<LogLevel>,
+    repl_config: Option<ReplicationConfiguration>,
+    otel_grpc_url: Option<String>,
 }
 
-impl Configuration {
-    pub fn new() -> Self {
-        Configuration {
-            address: DEFAULT_SERVER_ADDRESS.to_string(),
-            ldapaddress: None,
-            adminbindpath: env!("KANIDM_SERVER_ADMIN_BIND_PATH").to_string(),
-            threads: std::thread::available_parallelism()
-                .map(|t| t.get())
-                .unwrap_or_else(|_e| {
-                    eprintln!("WARNING: Unable to read number of available CPUs, defaulting to 4");
-                    4
-                }),
-            db_path: String::from(""),
-            db_fs_type: None,
-            db_arc_size: None,
-            maximum_request: 256 * 1024, // 256k
-            trust_x_forward_for: false,
-            tls_config: None,
-            integration_test_config: None,
-            online_backup: None,
-            domain: "idm.example.com".to_string(),
-            origin: "https://idm.example.com".to_string(),
-            output_mode: ConsoleOutputMode::default(),
-            log_level: Default::default(),
-            role: ServerRole::WriteReplica,
-            repl_config: None,
-            integration_repl_config: None,
-            otel_grpc_url: None,
+impl ConfigurationBuilder {
+    #![allow(clippy::needless_pass_by_value)]
+    pub fn add_cli_config(mut self, cli_config: CliConfig) -> Self {
+        if cli_config.output_mode.is_some() {
+            self.output_mode = cli_config.output_mode;
         }
+
+        self
     }
 
-    pub fn new_for_test() -> Self {
-        Configuration {
-            threads: 1,
-            ..Configuration::new()
+    pub fn add_env_config(mut self, env_config: EnvironmentConfig) -> Self {
+        if env_config.bindaddress.is_some() {
+            self.bindaddress = env_config.bindaddress;
         }
+
+        if env_config.ldapbindaddress.is_some() {
+            self.ldapbindaddress = env_config.ldapbindaddress;
+        }
+
+        if env_config.adminbindpath.is_some() {
+            self.adminbindpath = env_config.adminbindpath;
+        }
+
+        if env_config.db_path.is_some() {
+            self.db_path = env_config.db_path;
+        }
+
+        if env_config.db_fs_type.is_some() {
+            self.db_fs_type = env_config.db_fs_type;
+        }
+
+        if env_config.db_arc_size.is_some() {
+            self.db_arc_size = env_config.db_arc_size;
+        }
+
+        if env_config.trust_x_forward_for.is_some() {
+            self.trust_x_forward_for = env_config.trust_x_forward_for;
+        }
+
+        if env_config.tls_key.is_some() {
+            self.tls_key = env_config.tls_key;
+        }
+
+        if env_config.tls_chain.is_some() {
+            self.tls_chain = env_config.tls_chain;
+        }
+
+        if env_config.tls_client_ca.is_some() {
+            self.tls_client_ca = env_config.tls_client_ca;
+        }
+
+        if env_config.online_backup.is_some() {
+            self.online_backup = env_config.online_backup;
+        }
+
+        if env_config.domain.is_some() {
+            self.domain = env_config.domain;
+        }
+
+        if env_config.origin.is_some() {
+            self.origin = env_config.origin;
+        }
+
+        if env_config.role.is_some() {
+            self.role = env_config.role;
+        }
+
+        if env_config.log_level.is_some() {
+            self.log_level = env_config.log_level;
+        }
+
+        if env_config.repl_config.is_some() {
+            self.repl_config = env_config.repl_config;
+        }
+
+        if env_config.otel_grpc_url.is_some() {
+            self.otel_grpc_url = env_config.otel_grpc_url;
+        }
+
+        self
     }
 
-    pub fn update_online_backup(&mut self, cfg: &Option<OnlineBackup>) {
-        match cfg {
-            None => {}
-            Some(cfg) => {
-                let path = match cfg.path.clone() {
-                    Some(path) => Some(path),
-                    // Default to the same path as the data directory
-                    None => {
-                        let db_filepath = Path::new(&self.db_path);
-                        #[allow(clippy::expect_used)]
-                        let db_path = db_filepath
-                            .parent()
-                            .map(|p| {
-                                #[allow(clippy::expect_used)]
-                                p.to_str()
-                                    .expect("Couldn't turn db_path to str")
-                                    .to_string()
-                            })
-                            .expect("Unable to get parent directory of db_path");
+    pub fn add_opt_toml_config(self, toml_config: Option<ServerConfigUntagged>) -> Self {
+        // Can only proceed if the config is real
+        let Some(toml_config) = toml_config else {
+            return self;
+        };
 
-                        Some(db_path)
-                    }
-                };
-                self.online_backup = Some(OnlineBackup {
-                    path,
-                    ..cfg.clone()
-                })
+        match toml_config {
+            ServerConfigUntagged::Version(ServerConfigVersion::V2 { values }) => {
+                self.add_v2_config(values)
             }
+            ServerConfigUntagged::Legacy(config) => self.add_legacy_config(config),
         }
     }
 
-    pub fn update_log_level(&mut self, level: &Option<LogLevel>) {
-        self.log_level = level.unwrap_or_default();
-    }
-
-    // Startup config action, used in kanidmd server etc
-    pub fn update_config_for_server_mode(&mut self, sconfig: &ServerConfig) {
-        #[cfg(any(test, debug_assertions))]
-        debug!("update_config_for_server_mode {:?}", sconfig);
-        self.update_tls(&sconfig.tls_chain, &sconfig.tls_key, &sconfig.tls_client_ca);
-        self.update_bind(&sconfig.bindaddress);
-        self.update_ldapbind(&sconfig.ldapbindaddress);
-        self.update_online_backup(&sconfig.online_backup);
-        self.update_log_level(&sconfig.log_level);
-    }
-
-    pub fn update_trust_x_forward_for(&mut self, t: Option<bool>) {
-        self.trust_x_forward_for = t.unwrap_or(false);
-    }
-
-    pub fn update_db_path(&mut self, p: &str) {
-        self.db_path = p.to_string();
-    }
-
-    pub fn update_db_arc_size(&mut self, v: Option<usize>) {
-        self.db_arc_size = v
-    }
-
-    pub fn update_db_fs_type(&mut self, p: &Option<FsType>) {
-        p.clone_into(&mut self.db_fs_type);
-    }
-
-    pub fn update_bind(&mut self, b: &Option<String>) {
-        self.address = b
-            .as_ref()
-            .cloned()
-            .unwrap_or_else(|| DEFAULT_SERVER_ADDRESS.to_string());
-    }
-
-    pub fn update_ldapbind(&mut self, l: &Option<String>) {
-        self.ldapaddress.clone_from(l);
-    }
-
-    pub fn update_admin_bind_path(&mut self, p: &Option<String>) {
-        if let Some(p) = p {
-            self.adminbindpath.clone_from(p);
+    fn add_legacy_config(mut self, config: ServerConfig) -> Self {
+        if config.domain.is_some() {
+            self.domain = config.domain;
         }
+
+        if config.origin.is_some() {
+            self.origin = config.origin;
+        }
+
+        if config.db_path.is_some() {
+            self.db_path = config.db_path;
+        }
+
+        if config.db_fs_type.is_some() {
+            self.db_fs_type = config.db_fs_type;
+        }
+
+        if config.tls_key.is_some() {
+            self.tls_key = config.tls_key;
+        }
+
+        if config.tls_chain.is_some() {
+            self.tls_chain = config.tls_chain;
+        }
+
+        if config.tls_client_ca.is_some() {
+            self.tls_client_ca = config.tls_client_ca;
+        }
+
+        if config.bindaddress.is_some() {
+            self.bindaddress = config.bindaddress;
+        }
+
+        if config.ldapbindaddress.is_some() {
+            self.ldapbindaddress = config.ldapbindaddress;
+        }
+
+        if config.adminbindpath.is_some() {
+            self.adminbindpath = config.adminbindpath;
+        }
+
+        if config.role.is_some() {
+            self.role = config.role;
+        }
+
+        if config.log_level.is_some() {
+            self.log_level = config.log_level;
+        }
+
+        if let Some(threads) = config.thread_count {
+            self.threads = threads;
+        }
+
+        if let Some(maximum) = config.maximum_request_size_bytes {
+            self.maximum_request = maximum;
+        }
+
+        if config.db_arc_size.is_some() {
+            self.db_arc_size = config.db_arc_size;
+        }
+
+        if config.trust_x_forward_for.is_some() {
+            self.trust_x_forward_for = config.trust_x_forward_for;
+        }
+
+        if config.online_backup.is_some() {
+            self.online_backup = config.online_backup;
+        }
+
+        if config.repl_config.is_some() {
+            self.repl_config = config.repl_config;
+        }
+
+        if config.otel_grpc_url.is_some() {
+            self.otel_grpc_url = config.otel_grpc_url;
+        }
+
+        self
     }
 
-    pub fn update_origin(&mut self, o: &str) {
-        self.origin = o.to_string();
+    fn add_v2_config(mut self, config: ServerConfigV2) -> Self {
+        if config.domain.is_some() {
+            self.domain = config.domain;
+        }
+
+        if config.origin.is_some() {
+            self.origin = config.origin;
+        }
+
+        if config.db_path.is_some() {
+            self.db_path = config.db_path;
+        }
+
+        if config.db_fs_type.is_some() {
+            self.db_fs_type = config.db_fs_type;
+        }
+
+        if config.tls_key.is_some() {
+            self.tls_key = config.tls_key;
+        }
+
+        if config.tls_chain.is_some() {
+            self.tls_chain = config.tls_chain;
+        }
+
+        if config.tls_client_ca.is_some() {
+            self.tls_client_ca = config.tls_client_ca;
+        }
+
+        if config.bindaddress.is_some() {
+            self.bindaddress = config.bindaddress;
+        }
+
+        if config.ldapbindaddress.is_some() {
+            self.ldapbindaddress = config.ldapbindaddress;
+        }
+
+        if config.adminbindpath.is_some() {
+            self.adminbindpath = config.adminbindpath;
+        }
+
+        if config.role.is_some() {
+            self.role = config.role;
+        }
+
+        if config.log_level.is_some() {
+            self.log_level = config.log_level;
+        }
+
+        if let Some(threads) = config.thread_count {
+            self.threads = threads;
+        }
+
+        if let Some(maximum) = config.maximum_request_size_bytes {
+            self.maximum_request = maximum;
+        }
+
+        if config.db_arc_size.is_some() {
+            self.db_arc_size = config.db_arc_size;
+        }
+
+        if config.trust_x_forward_for.is_some() {
+            self.trust_x_forward_for = config.trust_x_forward_for;
+        }
+
+        if config.online_backup.is_some() {
+            self.online_backup = config.online_backup;
+        }
+
+        if config.repl_config.is_some() {
+            self.repl_config = config.repl_config;
+        }
+
+        if config.otel_grpc_url.is_some() {
+            self.otel_grpc_url = config.otel_grpc_url;
+        }
+
+        self
     }
 
-    pub fn update_domain(&mut self, d: &str) {
-        self.domain = d.to_string();
+    // We always set threads to 1 unless it's the main server.
+    pub fn is_server_mode(mut self, is_server: bool) -> Self {
+        if is_server {
+            self.threads = 1;
+        }
+        self
     }
 
-    pub fn update_role(&mut self, r: ServerRole) {
-        self.role = r;
-    }
+    pub fn finish(self) -> Option<Configuration> {
+        let ConfigurationBuilder {
+            bindaddress,
+            ldapbindaddress,
+            adminbindpath,
+            threads,
+            db_path,
+            db_fs_type,
+            db_arc_size,
+            maximum_request,
+            trust_x_forward_for,
+            tls_key,
+            tls_chain,
+            tls_client_ca,
+            mut online_backup,
+            domain,
+            origin,
+            role,
+            output_mode,
+            log_level,
+            repl_config,
+            otel_grpc_url,
+        } = self;
 
-    /// Sets the output mode for writing to the console
-    pub fn update_output_mode(&mut self, om: ConsoleOutputMode) {
-        self.output_mode = om;
-    }
-
-    pub fn update_replication_config(&mut self, repl_config: Option<ReplicationConfiguration>) {
-        self.repl_config = repl_config;
-    }
-
-    pub fn update_tls(
-        &mut self,
-        chain: &Option<String>,
-        key: &Option<String>,
-        client_ca: &Option<String>,
-    ) {
-        match (chain, key) {
-            (None, None) => {}
-            (Some(chainp), Some(keyp)) => {
-                let chain = PathBuf::from(chainp.clone());
-                let key = PathBuf::from(keyp.clone());
-                let client_ca = client_ca.clone().map(PathBuf::from);
-                self.tls_config = Some(TlsConfiguration {
-                    chain,
-                    key,
-                    client_ca,
-                })
-            }
+        let tls_config = match (tls_key, tls_chain, tls_client_ca) {
+            (Some(key), Some(chain), client_ca) => Some(TlsConfiguration {
+                chain,
+                key,
+                client_ca,
+            }),
             _ => {
-                eprintln!("ERROR: Invalid TLS configuration - must provide chain and key!");
-                std::process::exit(1);
+                eprintln!("ERROR: Tls Private Key and Certificate Chain are required.");
+                return None;
             }
-        }
-    }
+        };
 
-    // Update the thread count of this server, only up to the maximum set by self threads
-    // which is configured with available parallelism.
-    pub fn update_threads_count(&mut self, threads: usize) {
-        self.threads = std::cmp::min(self.threads, threads);
+        let domain = domain.or_else(|| {
+            eprintln!("ERROR: domain was not set.");
+            None
+        })?;
+
+        let origin = origin.or_else(|| {
+            eprintln!("ERROR: origin was not set.");
+            None
+        })?;
+
+        if let Some(online_backup_ref) = online_backup.as_mut() {
+            if online_backup_ref.path.is_none() {
+                if let Some(db_path) = db_path.as_ref() {
+                    if let Some(db_parent_path) = db_path.parent() {
+                        online_backup_ref.path = Some(db_parent_path.to_path_buf());
+                    } else {
+                        eprintln!("ERROR: when db_path has no parent, and can not be used for online backups.");
+                        return None;
+                    }
+                } else {
+                    eprintln!("ERROR: when db_path is unset (in memory) then online backup paths must be declared.");
+                    return None;
+                }
+            }
+        };
+
+        // Apply any defaults if needed
+        let adminbindpath =
+            adminbindpath.unwrap_or(env!("KANIDM_SERVER_ADMIN_BIND_PATH").to_string());
+        let address = bindaddress.unwrap_or(DEFAULT_SERVER_ADDRESS.to_string());
+        let trust_x_forward_for = trust_x_forward_for.unwrap_or_default();
+        let output_mode = output_mode.unwrap_or_default();
+        let role = role.unwrap_or(ServerRole::WriteReplica);
+        let log_level = log_level.unwrap_or_default();
+
+        Some(Configuration {
+            address,
+            ldapbindaddress,
+            adminbindpath,
+            threads,
+            db_path,
+            db_fs_type,
+            db_arc_size,
+            maximum_request,
+            trust_x_forward_for,
+            tls_config,
+            online_backup,
+            domain,
+            origin,
+            role,
+            output_mode,
+            log_level,
+            repl_config,
+            otel_grpc_url,
+            integration_repl_config: None,
+            integration_test_config: None,
+        })
     }
 }
diff --git a/server/core/src/interval.rs b/server/core/src/interval.rs
index be1e662dd..b71d93c16 100644
--- a/server/core/src/interval.rs
+++ b/server/core/src/interval.rs
@@ -112,19 +112,19 @@ impl IntervalActor {
         if !op.exists() {
             info!(
                 "Online backup output folder '{}' does not exist, trying to create it.",
-                outpath
+                outpath.display()
             );
             fs::create_dir_all(&outpath).map_err(|e| {
                 error!(
                     "Online backup failed to create output directory '{}': {}",
-                    outpath.clone(),
+                    outpath.display(),
                     e
                 )
             })?;
         }
 
         if !op.is_dir() {
-            error!("Online backup output '{}' is not a directory or we are missing permissions to access it.", outpath);
+            error!("Online backup output '{}' is not a directory or we are missing permissions to access it.", outpath.display());
             return Err(());
         }
 
@@ -148,7 +148,7 @@ impl IntervalActor {
                         if let Err(e) = server
                             .handle_online_backup(
                                 OnlineBackupEvent::new(),
-                                outpath.clone().as_str(),
+                                &outpath,
                                 versions,
                             )
                             .await
diff --git a/server/core/src/lib.rs b/server/core/src/lib.rs
index f781998dc..1117f446a 100644
--- a/server/core/src/lib.rs
+++ b/server/core/src/lib.rs
@@ -36,9 +36,10 @@ mod ldaps;
 mod repl;
 mod utils;
 
-use std::fmt::{Display, Formatter};
-use std::sync::Arc;
-
+use crate::actors::{QueryServerReadV1, QueryServerWriteV1};
+use crate::admin::AdminActor;
+use crate::config::{Configuration, ServerRole};
+use crate::interval::IntervalActor;
 use crate::utils::touch_file_or_quit;
 use compact_jwt::{JwsHs256Signer, JwsSigner};
 use kanidm_proto::internal::OperationError;
@@ -50,17 +51,14 @@ use kanidmd_lib::status::StatusActor;
 use kanidmd_lib::value::CredentialType;
 #[cfg(not(target_family = "windows"))]
 use libc::umask;
-
+use std::fmt::{Display, Formatter};
+use std::path::Path;
+use std::sync::Arc;
 use tokio::sync::broadcast;
+use tokio::sync::mpsc;
 use tokio::sync::Notify;
 use tokio::task;
 
-use crate::actors::{QueryServerReadV1, QueryServerWriteV1};
-use crate::admin::AdminActor;
-use crate::config::{Configuration, ServerRole};
-use crate::interval::IntervalActor;
-use tokio::sync::mpsc;
-
 // === internal setup helpers
 
 fn setup_backend(config: &Configuration, schema: &Schema) -> Result<Backend, OperationError> {
@@ -80,7 +78,7 @@ fn setup_backend_vacuum(
     let pool_size: u32 = config.threads as u32;
 
     let cfg = BackendConfig::new(
-        config.db_path.as_str(),
+        config.db_path.as_deref(),
         pool_size,
         config.db_fs_type.unwrap_or_default(),
         config.db_arc_size,
@@ -335,7 +333,7 @@ pub fn dbscan_restore_quarantined_core(config: &Configuration, id: u64) {
     };
 }
 
-pub fn backup_server_core(config: &Configuration, dst_path: &str) {
+pub fn backup_server_core(config: &Configuration, dst_path: &Path) {
     let schema = match Schema::new() {
         Ok(s) => s,
         Err(e) => {
@@ -371,8 +369,11 @@ pub fn backup_server_core(config: &Configuration, dst_path: &str) {
     // Let the txn abort, even on success.
 }
 
-pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
-    touch_file_or_quit(config.db_path.as_str());
+pub async fn restore_server_core(config: &Configuration, dst_path: &Path) {
+    // If it's an in memory database, we don't need to touch anything
+    if let Some(db_path) = config.db_path.as_ref() {
+        touch_file_or_quit(db_path);
+    }
 
     // First, we provide the in-memory schema so that core attrs are indexed correctly.
     let schema = match Schema::new() {
@@ -1011,7 +1012,7 @@ pub async fn create_server_core(
     let tls_accepter_reload_task_notify = tls_acceptor_reload_notify.clone();
     let tls_config = config.tls_config.clone();
 
-    let ldap_configured = config.ldapaddress.is_some();
+    let ldap_configured = config.ldapbindaddress.is_some();
     let (ldap_tls_acceptor_reload_tx, ldap_tls_acceptor_reload_rx) = mpsc::channel(1);
     let (http_tls_acceptor_reload_tx, http_tls_acceptor_reload_rx) = mpsc::channel(1);
 
@@ -1076,7 +1077,7 @@ pub async fn create_server_core(
     };
 
     // If we have been requested to init LDAP, configure it now.
-    let maybe_ldap_acceptor_handle = match &config.ldapaddress {
+    let maybe_ldap_acceptor_handle = match &config.ldapbindaddress {
         Some(la) => {
             let opt_ldap_ssl_acceptor = maybe_tls_acceptor.clone();
 
diff --git a/server/core/src/utils.rs b/server/core/src/utils.rs
index d3017944e..cad8ed1f9 100644
--- a/server/core/src/utils.rs
+++ b/server/core/src/utils.rs
@@ -1,32 +1,39 @@
 use filetime::FileTime;
 use std::fs::File;
 use std::io::ErrorKind;
-use std::path::PathBuf;
+use std::path::Path;
 use std::time::SystemTime;
 
-pub fn touch_file_or_quit(file_path: &str) {
+pub fn touch_file_or_quit<P: AsRef<Path>>(file_path: P) {
     /*
     Attempt to touch the file file_path, will quit the application if it fails for any reason.
 
     Will also create a new file if it doesn't already exist.
     */
-    if PathBuf::from(file_path).exists() {
+
+    let file_path: &Path = file_path.as_ref();
+
+    if file_path.exists() {
         let t = FileTime::from_system_time(SystemTime::now());
         match filetime::set_file_times(file_path, t, t) {
             Ok(_) => debug!(
                 "Successfully touched existing file {}, can continue",
-                file_path
+                file_path.display()
             ),
             Err(e) => {
                 match e.kind() {
                     ErrorKind::PermissionDenied => {
                         // we bail here because you won't be able to write them back...
-                        error!("Permission denied writing to {}, quitting.", file_path)
+                        error!(
+                            "Permission denied writing to {}, quitting.",
+                            file_path.display()
+                        )
                     }
                     _ => {
                         error!(
                             "Failed to write to {} due to error: {:?} ... quitting.",
-                            file_path, e
+                            file_path.display(),
+                            e
                         )
                     }
                 }
@@ -35,11 +42,12 @@ pub fn touch_file_or_quit(file_path: &str) {
         }
     } else {
         match File::create(file_path) {
-            Ok(_) => debug!("Successfully touched new file {}", file_path),
+            Ok(_) => debug!("Successfully touched new file {}", file_path.display()),
             Err(e) => {
                 error!(
                     "Failed to write to {} due to error: {:?} ... quitting.",
-                    file_path, e
+                    file_path.display(),
+                    e
                 );
                 std::process::exit(1);
             }
diff --git a/examples/insecure_server.toml b/server/daemon/insecure_server.toml
similarity index 98%
rename from examples/insecure_server.toml
rename to server/daemon/insecure_server.toml
index 2ca1c0392..b0cc23bc8 100644
--- a/examples/insecure_server.toml
+++ b/server/daemon/insecure_server.toml
@@ -1,3 +1,4 @@
+version = "2"
 bindaddress = "[::]:8443"
 ldapbindaddress = "127.0.0.1:3636"
 
diff --git a/server/daemon/run_insecure_dev_server.sh b/server/daemon/run_insecure_dev_server.sh
index 3724cbaf2..568336a9d 100755
--- a/server/daemon/run_insecure_dev_server.sh
+++ b/server/daemon/run_insecure_dev_server.sh
@@ -22,7 +22,7 @@ fi
 
 mkdir -p "${KANI_TMP}"/client_ca
 
-CONFIG_FILE=${CONFIG_FILE:="${SCRIPT_DIR}/../../examples/insecure_server.toml"}
+CONFIG_FILE=${CONFIG_FILE:="${SCRIPT_DIR}/insecure_server.toml"}
 
 if [ ! -f "${CONFIG_FILE}" ]; then
     echo "Couldn't find configuration file at ${CONFIG_FILE}, please ensure you're running this script from its base directory (${SCRIPT_DIR})."
diff --git a/server/daemon/src/main.rs b/server/daemon/src/main.rs
index 92ac418df..4ba1c1679 100644
--- a/server/daemon/src/main.rs
+++ b/server/daemon/src/main.rs
@@ -37,7 +37,7 @@ use kanidmd_core::admin::{
     AdminTaskRequest, AdminTaskResponse, ClientCodec, ProtoDomainInfo,
     ProtoDomainUpgradeCheckReport, ProtoDomainUpgradeCheckStatus,
 };
-use kanidmd_core::config::{Configuration, ServerConfig};
+use kanidmd_core::config::{CliConfig, Configuration, EnvironmentConfig, ServerConfigUntagged};
 use kanidmd_core::{
     backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
     dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
@@ -379,17 +379,13 @@ fn check_file_ownership(opt: &KanidmdParser) -> Result<(), ExitCode> {
 }
 
 // We have to do this because we can't use tracing until we've started the logging pipeline, and we can't start the logging pipeline until the tokio runtime's doing its thing.
-async fn start_daemon(
-    opt: KanidmdParser,
-    mut config: Configuration,
-    sconfig: ServerConfig,
-) -> ExitCode {
+async fn start_daemon(opt: KanidmdParser, config: Configuration) -> ExitCode {
     // if we have a server config and it has an OTEL URL, then we'll start the logging pipeline now.
 
     // TODO: only send to stderr when we're not in a TTY
     let sub = match sketching::otel::start_logging_pipeline(
-        &sconfig.otel_grpc_url,
-        sconfig.log_level.unwrap_or_default(),
+        &config.otel_grpc_url,
+        config.log_level,
         "kanidmd",
     ) {
         Err(err) => {
@@ -423,8 +419,8 @@ async fn start_daemon(
         return err;
     };
 
-    if let Some(db_path) = sconfig.db_path.as_ref() {
-        let db_pathbuf = PathBuf::from(db_path.as_str());
+    if let Some(db_path) = config.db_path.as_ref() {
+        let db_pathbuf = db_path.to_path_buf();
         // We can't check the db_path permissions because it may not exist yet!
         if let Some(db_parent_path) = db_pathbuf.parent() {
             if !db_parent_path.exists() {
@@ -464,33 +460,11 @@ async fn start_daemon(
                 warn!("WARNING: DB folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", db_par_path_buf.to_str().unwrap_or("invalid file path"));
             }
         }
-        config.update_db_path(db_path);
     } else {
         error!("No db_path set in configuration, server startup will FAIL!");
         return ExitCode::FAILURE;
     }
 
-    if let Some(origin) = sconfig.origin.clone() {
-        config.update_origin(&origin);
-    } else {
-        error!("No origin set in configuration, server startup will FAIL!");
-        return ExitCode::FAILURE;
-    }
-
-    if let Some(domain) = sconfig.domain.clone() {
-        config.update_domain(&domain);
-    } else {
-        error!("No domain set in configuration, server startup will FAIL!");
-        return ExitCode::FAILURE;
-    }
-
-    config.update_db_arc_size(sconfig.get_db_arc_size());
-    config.update_role(sconfig.role);
-    config.update_output_mode(opt.commands.commonopt().output_mode.to_owned().into());
-    config.update_trust_x_forward_for(sconfig.trust_x_forward_for);
-    config.update_admin_bind_path(&sconfig.adminbindpath);
-    config.update_replication_config(sconfig.repl_config.clone());
-
     match &opt.commands {
         // we aren't going to touch the DB so we can carry on
         KanidmdOpt::ShowReplicationCertificate { .. }
@@ -501,19 +475,15 @@ async fn start_daemon(
         _ => {
             // Okay - Lets now create our lock and go.
             #[allow(clippy::expect_used)]
-            let klock_path = match sconfig.db_path.clone() {
-                Some(val) => format!("{}.klock", val),
-                None => std::env::temp_dir()
-                    .join("kanidmd.klock")
-                    .to_str()
-                    .expect("Unable to create klock path, this is a critical error!")
-                    .to_string(),
+            let klock_path = match config.db_path.clone() {
+                Some(val) => val.with_extension("klock"),
+                None => std::env::temp_dir().join("kanidmd.klock"),
             };
 
             let flock = match File::create(&klock_path) {
                 Ok(flock) => flock,
                 Err(e) => {
-                    error!("ERROR: Refusing to start - unable to create kanidmd exclusive lock at {} - {:?}", klock_path, e);
+                    error!("ERROR: Refusing to start - unable to create kanidmd exclusive lock at {} - {:?}", klock_path.display(), e);
                     return ExitCode::FAILURE;
                 }
             };
@@ -521,7 +491,7 @@ async fn start_daemon(
             match flock.try_lock_exclusive() {
                 Ok(()) => debug!("Acquired kanidm exclusive lock"),
                 Err(e) => {
-                    error!("ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {} - {:?}", klock_path, e);
+                    error!("ERROR: Refusing to start - unable to lock kanidmd exclusive lock at {} - {:?}", klock_path.display(), e);
                     error!("Is another kanidmd process running?");
                     return ExitCode::FAILURE;
                 }
@@ -529,7 +499,7 @@ async fn start_daemon(
         }
     }
 
-    kanidm_main(sconfig, config, opt).await
+    kanidm_main(config, opt).await
 }
 
 fn main() -> ExitCode {
@@ -556,10 +526,6 @@ fn main() -> ExitCode {
         return ExitCode::SUCCESS;
     };
 
-    //we set up a list of these so we can set the log config THEN log out the errors.
-    let mut config_error: Vec<String> = Vec::new();
-    let mut config = Configuration::new();
-
     if env!("KANIDM_SERVER_CONFIG_PATH").is_empty() {
         println!("CRITICAL: Kanidmd was not built correctly and is missing a valid KANIDM_SERVER_CONFIG_PATH value");
         return ExitCode::FAILURE;
@@ -581,49 +547,56 @@ fn main() -> ExitCode {
         }
     };
 
-    let sconfig = match ServerConfig::new(maybe_config_path) {
-        Ok(c) => Some(c),
-        Err(e) => {
-            config_error.push(format!("Config Parse failure {:?}", e));
+    let maybe_sconfig = if let Some(config_path) = maybe_config_path {
+        match ServerConfigUntagged::new(config_path) {
+            Ok(c) => Some(c),
+            Err(err) => {
+                eprintln!("ERROR: Configuration Parse Failure: {:?}", err);
+                return ExitCode::FAILURE;
+            }
+        }
+    } else {
+        eprintln!("WARNING: No configuration path was provided, relying on environment variables.");
+        None
+    };
+
+    let envconfig = match EnvironmentConfig::new() {
+        Ok(ec) => ec,
+        Err(err) => {
+            eprintln!("ERROR: Environment Configuration Parse Failure: {:?}", err);
             return ExitCode::FAILURE;
         }
     };
 
-    // Get information on the windows username
-    #[cfg(target_family = "windows")]
-    get_user_details_windows();
+    let cli_config = CliConfig {
+        output_mode: Some(opt.commands.commonopt().output_mode.to_owned().into()),
+    };
 
-    if !config_error.is_empty() {
-        println!("There were errors on startup, which prevent the server from starting:");
-        for e in config_error {
-            println!(" - {}", e);
-        }
+    let is_server = matches!(&opt.commands, KanidmdOpt::Server(_));
+
+    let config = Configuration::build()
+        .add_env_config(envconfig)
+        .add_opt_toml_config(maybe_sconfig)
+        // We always set threads to 1 unless it's the main server.
+        .add_cli_config(cli_config)
+        .is_server_mode(is_server)
+        .finish();
+
+    let Some(config) = config else {
+        eprintln!(
+            "ERROR: Unable to build server configuration from provided configuration inputs."
+        );
         return ExitCode::FAILURE;
-    }
-
-    let sconfig = match sconfig {
-        Some(val) => val,
-        None => {
-            println!("Somehow you got an empty ServerConfig after error checking? Cannot start!");
-            return ExitCode::FAILURE;
-        }
     };
 
     // ===========================================================================
     // Config ready
 
-    // We always set threads to 1 unless it's the main server.
-    if matches!(&opt.commands, KanidmdOpt::Server(_)) {
-        // If not updated, will default to maximum
-        if let Some(threads) = sconfig.thread_count {
-            config.update_threads_count(threads);
-        }
-    } else {
-        config.update_threads_count(1);
-    };
+    // Get information on the windows username
+    #[cfg(target_family = "windows")]
+    get_user_details_windows();
 
     // Start the runtime
-
     let maybe_rt = tokio::runtime::Builder::new_multi_thread()
         .worker_threads(config.threads)
         .enable_all()
@@ -643,16 +616,12 @@ fn main() -> ExitCode {
         }
     };
 
-    rt.block_on(start_daemon(opt, config, sconfig))
+    rt.block_on(start_daemon(opt, config))
 }
 
 /// Build and execute the main server. The ServerConfig are the configuration options
 /// that we are processing into the config for the main server.
-async fn kanidm_main(
-    sconfig: ServerConfig,
-    mut config: Configuration,
-    opt: KanidmdParser,
-) -> ExitCode {
+async fn kanidm_main(config: Configuration, opt: KanidmdParser) -> ExitCode {
     match &opt.commands {
         KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => {
             let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_));
@@ -662,88 +631,90 @@ async fn kanidm_main(
                 info!("Running in server mode ...");
             };
 
-            // configuration options that only relate to server mode
-            config.update_config_for_server_mode(&sconfig);
-
-            if let Some(i_str) = &(sconfig.tls_chain) {
-                let i_path = PathBuf::from(i_str.as_str());
-                let i_meta = match metadata(&i_path) {
-                    Ok(m) => m,
-                    Err(e) => {
-                        error!(
-                            "Unable to read metadata for TLS chain file '{}' - {:?}",
-                            &i_path.to_str().unwrap_or("invalid file path"),
-                            e
-                        );
-                        let diag = kanidm_lib_file_permissions::diagnose_path(&i_path);
-                        info!(%diag);
-                        return ExitCode::FAILURE;
+            // Verify the TLs configs.
+            if let Some(tls_config) = config.tls_config.as_ref() {
+                {
+                    let i_meta = match metadata(&tls_config.chain) {
+                        Ok(m) => m,
+                        Err(e) => {
+                            error!(
+                                "Unable to read metadata for TLS chain file '{}' - {:?}",
+                                tls_config.chain.display(),
+                                e
+                            );
+                            let diag =
+                                kanidm_lib_file_permissions::diagnose_path(&tls_config.chain);
+                            info!(%diag);
+                            return ExitCode::FAILURE;
+                        }
+                    };
+                    if !kanidm_lib_file_permissions::readonly(&i_meta) {
+                        warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.chain.display());
                     }
-                };
-                if !kanidm_lib_file_permissions::readonly(&i_meta) {
-                    warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str);
                 }
-            }
 
-            if let Some(i_str) = &(sconfig.tls_key) {
-                let i_path = PathBuf::from(i_str.as_str());
-
-                let i_meta = match metadata(&i_path) {
-                    Ok(m) => m,
-                    Err(e) => {
-                        error!(
-                            "Unable to read metadata for TLS key file '{}' - {:?}",
-                            &i_path.to_str().unwrap_or("invalid file path"),
-                            e
-                        );
-                        let diag = kanidm_lib_file_permissions::diagnose_path(&i_path);
-                        info!(%diag);
-                        return ExitCode::FAILURE;
+                {
+                    let i_meta = match metadata(&tls_config.key) {
+                        Ok(m) => m,
+                        Err(e) => {
+                            error!(
+                                "Unable to read metadata for TLS key file '{}' - {:?}",
+                                tls_config.key.display(),
+                                e
+                            );
+                            let diag = kanidm_lib_file_permissions::diagnose_path(&tls_config.key);
+                            info!(%diag);
+                            return ExitCode::FAILURE;
+                        }
+                    };
+                    if !kanidm_lib_file_permissions::readonly(&i_meta) {
+                        warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", tls_config.key.display());
+                    }
+                    #[cfg(not(target_os = "windows"))]
+                    if i_meta.mode() & 0o007 != 0 {
+                        warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", tls_config.key.display());
                     }
-                };
-                if !kanidm_lib_file_permissions::readonly(&i_meta) {
-                    warn!("permissions on {} may not be secure. Should be readonly to running uid. This could be a security risk ...", i_str);
-                }
-                #[cfg(not(target_os = "windows"))]
-                if i_meta.mode() & 0o007 != 0 {
-                    warn!("WARNING: {} has 'everyone' permission bits in the mode. This could be a security risk ...", i_str);
-                }
-            }
-
-            if let Some(ca_dir) = &(sconfig.tls_client_ca) {
-                // check that the TLS client CA config option is what we expect
-                let ca_dir_path = PathBuf::from(&ca_dir);
-                if !ca_dir_path.exists() {
-                    error!(
-                        "TLS CA folder {} does not exist, server startup will FAIL!",
-                        ca_dir
-                    );
-                    let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
-                    info!(%diag);
                 }
 
-                let i_meta = match metadata(&ca_dir_path) {
-                    Ok(m) => m,
-                    Err(e) => {
-                        error!("Unable to read metadata for '{}' - {:?}", ca_dir, e);
+                if let Some(ca_dir) = tls_config.client_ca.as_ref() {
+                    // check that the TLS client CA config option is what we expect
+                    let ca_dir_path = PathBuf::from(&ca_dir);
+                    if !ca_dir_path.exists() {
+                        error!(
+                            "TLS CA folder {} does not exist, server startup will FAIL!",
+                            ca_dir.display()
+                        );
                         let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
                         info!(%diag);
+                    }
+
+                    let i_meta = match metadata(&ca_dir_path) {
+                        Ok(m) => m,
+                        Err(e) => {
+                            error!(
+                                "Unable to read metadata for '{}' - {:?}",
+                                ca_dir.display(),
+                                e
+                            );
+                            let diag = kanidm_lib_file_permissions::diagnose_path(&ca_dir_path);
+                            info!(%diag);
+                            return ExitCode::FAILURE;
+                        }
+                    };
+                    if !i_meta.is_dir() {
+                        error!(
+                            "ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
+                            ca_dir.display()
+                        );
                         return ExitCode::FAILURE;
                     }
-                };
-                if !i_meta.is_dir() {
-                    error!(
-                        "ERROR: Refusing to run - TLS Client CA folder {} may not be a directory",
-                        ca_dir
-                    );
-                    return ExitCode::FAILURE;
-                }
-                if kanidm_lib_file_permissions::readonly(&i_meta) {
-                    warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir);
-                }
-                #[cfg(not(target_os = "windows"))]
-                if i_meta.mode() & 0o007 != 0 {
-                    warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir);
+                    if kanidm_lib_file_permissions::readonly(&i_meta) {
+                        warn!("WARNING: TLS Client CA folder permissions on {} indicate it may not be RW. This could cause the server start up to fail!", ca_dir.display());
+                    }
+                    #[cfg(not(target_os = "windows"))]
+                    if i_meta.mode() & 0o007 != 0 {
+                        warn!("WARNING: TLS Client CA folder {} has 'everyone' permission bits in the mode. This could be a security risk ...", ca_dir.display());
+                    }
                 }
             }
 
@@ -880,34 +851,19 @@ async fn kanidm_main(
         }
         KanidmdOpt::CertGenerate(_sopt) => {
             info!("Running in certificate generate mode ...");
-            config.update_config_for_server_mode(&sconfig);
             cert_generate_core(&config);
         }
         KanidmdOpt::Database {
             commands: DbCommands::Backup(bopt),
         } => {
             info!("Running in backup mode ...");
-            let p = match bopt.path.to_str() {
-                Some(p) => p,
-                None => {
-                    error!("Invalid backup path");
-                    return ExitCode::FAILURE;
-                }
-            };
-            backup_server_core(&config, p);
+            backup_server_core(&config, &bopt.path);
         }
         KanidmdOpt::Database {
             commands: DbCommands::Restore(ropt),
         } => {
             info!("Running in restore mode ...");
-            let p = match ropt.path.to_str() {
-                Some(p) => p,
-                None => {
-                    error!("Invalid restore path");
-                    return ExitCode::FAILURE;
-                }
-            };
-            restore_server_core(&config, p).await;
+            restore_server_core(&config, &ropt.path).await;
         }
         KanidmdOpt::Database {
             commands: DbCommands::Verify(_vopt),
@@ -1088,8 +1044,6 @@ async fn kanidm_main(
             vacuum_server_core(&config);
         }
         KanidmdOpt::HealthCheck(sopt) => {
-            config.update_config_for_server_mode(&sconfig);
-
             debug!("{sopt:?}");
 
             let healthcheck_url = match &sopt.check_origin {
@@ -1110,12 +1064,15 @@ async fn kanidm_main(
                 .danger_accept_invalid_hostnames(!sopt.verify_tls)
                 .https_only(true);
 
-            client = match &sconfig.tls_chain {
+            client = match &config.tls_config {
                 None => client,
-                Some(ca_cert) => {
-                    debug!("Trying to load {} to build a CA cert path", ca_cert);
+                Some(tls_config) => {
+                    debug!(
+                        "Trying to load {} to build a CA cert path",
+                        tls_config.chain.display()
+                    );
                     // if the ca_cert file exists, then we'll use it
-                    let ca_cert_path = PathBuf::from(ca_cert);
+                    let ca_cert_path = tls_config.chain.clone();
                     match ca_cert_path.exists() {
                         true => {
                             let mut cert_buf = Vec::new();
@@ -1148,7 +1105,10 @@ async fn kanidm_main(
                             client
                         }
                         false => {
-                            warn!("Couldn't find ca cert {} but carrying on...", ca_cert);
+                            warn!(
+                                "Couldn't find ca cert {} but carrying on...",
+                                tls_config.chain.display()
+                            );
                             client
                         }
                     }
diff --git a/server/lib/src/be/idl_sqlite.rs b/server/lib/src/be/idl_sqlite.rs
index 09531c90a..69c1b082b 100644
--- a/server/lib/src/be/idl_sqlite.rs
+++ b/server/lib/src/be/idl_sqlite.rs
@@ -1,27 +1,21 @@
-use std::collections::{BTreeMap, BTreeSet, VecDeque};
-use std::convert::{TryFrom, TryInto};
-use std::sync::Arc;
-use std::sync::Mutex;
-use std::time::Duration;
-
 use super::keystorage::{KeyHandle, KeyHandleId};
-
-// use crate::valueset;
-use hashbrown::HashMap;
-use idlset::v2::IDLBitRange;
-use kanidm_proto::internal::{ConsistencyError, OperationError};
-use rusqlite::vtab::array::Array;
-use rusqlite::{Connection, OpenFlags, OptionalExtension};
-use uuid::Uuid;
-
 use crate::be::dbentry::DbIdentSpn;
 use crate::be::dbvalue::DbCidV1;
 use crate::be::{BackendConfig, IdList, IdRawEntry, IdxKey, IdxSlope};
 use crate::entry::{Entry, EntryCommitted, EntrySealed};
 use crate::prelude::*;
 use crate::value::{IndexType, Value};
-
-// use uuid::Uuid;
+use hashbrown::HashMap;
+use idlset::v2::IDLBitRange;
+use kanidm_proto::internal::{ConsistencyError, OperationError};
+use rusqlite::vtab::array::Array;
+use rusqlite::{Connection, OpenFlags, OptionalExtension};
+use std::collections::{BTreeMap, BTreeSet, VecDeque};
+use std::convert::{TryFrom, TryInto};
+use std::sync::Arc;
+use std::sync::Mutex;
+use std::time::Duration;
+use uuid::Uuid;
 
 const DBV_ID2ENTRY: &str = "id2entry";
 const DBV_INDEXV: &str = "indexv";
@@ -1712,7 +1706,7 @@ impl IdlSqliteWriteTransaction {
 
 impl IdlSqlite {
     pub fn new(cfg: &BackendConfig, vacuum: bool) -> Result<Self, OperationError> {
-        if cfg.path.is_empty() {
+        if cfg.path.as_os_str().is_empty() {
             debug_assert_eq!(cfg.pool_size, 1);
         }
         // If provided, set the page size to match the tuning we want. By default we use 4096. The VACUUM
@@ -1734,8 +1728,7 @@ impl IdlSqlite {
 
         // Initial setup routines.
         {
-            let vconn =
-                Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
+            let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
 
             vconn
                 .execute_batch(
@@ -1764,8 +1757,7 @@ impl IdlSqlite {
             );
             */
 
-            let vconn =
-                Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
+            let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
 
             vconn
                 .execute_batch("PRAGMA wal_checkpoint(TRUNCATE);")
@@ -1786,8 +1778,7 @@ impl IdlSqlite {
                 OperationError::SqliteError
             })?;
 
-            let vconn =
-                Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error)?;
+            let vconn = Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error)?;
 
             vconn
                 .pragma_update(None, "page_size", cfg.fstype as u32)
@@ -1821,7 +1812,7 @@ impl IdlSqlite {
             .map(|i| {
                 trace!("Opening Connection {}", i);
                 let conn =
-                    Connection::open_with_flags(cfg.path.as_str(), flags).map_err(sqlite_error);
+                    Connection::open_with_flags(&cfg.path, flags).map_err(sqlite_error);
                 match conn {
                     Ok(conn) => {
                         // We need to set the cachesize at this point as well.
diff --git a/server/lib/src/be/mod.rs b/server/lib/src/be/mod.rs
index 757bfe7ba..e2ce78acc 100644
--- a/server/lib/src/be/mod.rs
+++ b/server/lib/src/be/mod.rs
@@ -4,20 +4,6 @@
 //! is to persist content safely to disk, load that content, and execute queries
 //! utilising indexes in the most effective way possible.
 
-use std::collections::BTreeMap;
-use std::fs;
-use std::ops::DerefMut;
-use std::sync::Arc;
-use std::time::Duration;
-
-use concread::cowcell::*;
-use hashbrown::{HashMap as Map, HashSet};
-use idlset::v2::IDLBitRange;
-use idlset::AndNot;
-use kanidm_proto::internal::{ConsistencyError, OperationError};
-use tracing::{trace, trace_span};
-use uuid::Uuid;
-
 use crate::be::dbentry::{DbBackup, DbEntry};
 use crate::be::dbrepl::DbReplMeta;
 use crate::entry::Entry;
@@ -31,6 +17,19 @@ use crate::repl::ruv::{
 };
 use crate::utils::trigraph_iter;
 use crate::value::{IndexType, Value};
+use concread::cowcell::*;
+use hashbrown::{HashMap as Map, HashSet};
+use idlset::v2::IDLBitRange;
+use idlset::AndNot;
+use kanidm_proto::internal::{ConsistencyError, OperationError};
+use std::collections::BTreeMap;
+use std::fs;
+use std::ops::DerefMut;
+use std::path::{Path, PathBuf};
+use std::sync::Arc;
+use std::time::Duration;
+use tracing::{trace, trace_span};
+use uuid::Uuid;
 
 pub(crate) mod dbentry;
 pub(crate) mod dbrepl;
@@ -132,7 +131,7 @@ impl IdxMeta {
 
 #[derive(Clone)]
 pub struct BackendConfig {
-    path: String,
+    path: PathBuf,
     pool_size: u32,
     db_name: &'static str,
     fstype: FsType,
@@ -141,10 +140,16 @@ pub struct BackendConfig {
 }
 
 impl BackendConfig {
-    pub fn new(path: &str, pool_size: u32, fstype: FsType, arcsize: Option<usize>) -> Self {
+    pub fn new(
+        path: Option<&Path>,
+        pool_size: u32,
+        fstype: FsType,
+        arcsize: Option<usize>,
+    ) -> Self {
         BackendConfig {
             pool_size,
-            path: path.to_string(),
+            // This means if path is None, that "" implies an sqlite in memory/ram only database.
+            path: path.unwrap_or_else(|| Path::new("")).to_path_buf(),
             db_name: "main",
             fstype,
             arcsize,
@@ -154,7 +159,7 @@ impl BackendConfig {
     pub(crate) fn new_test(db_name: &'static str) -> Self {
         BackendConfig {
             pool_size: 1,
-            path: "".to_string(),
+            path: PathBuf::from(""),
             db_name,
             fstype: FsType::Generic,
             arcsize: Some(2048),
@@ -936,7 +941,7 @@ pub trait BackendTransaction {
         self.get_ruv().verify(&entries, results);
     }
 
-    fn backup(&mut self, dst_path: &str) -> Result<(), OperationError> {
+    fn backup(&mut self, dst_path: &Path) -> Result<(), OperationError> {
         let repl_meta = self.get_ruv().to_db_backup_ruv();
 
         // load all entries into RAM, may need to change this later
@@ -1808,7 +1813,7 @@ impl<'a> BackendWriteTransaction<'a> {
         Ok(slope)
     }
 
-    pub fn restore(&mut self, src_path: &str) -> Result<(), OperationError> {
+    pub fn restore(&mut self, src_path: &Path) -> Result<(), OperationError> {
         let serialized_string = fs::read_to_string(src_path).map_err(|e| {
             admin_error!("fs::read_to_string {:?}", e);
             OperationError::FsError
@@ -2121,7 +2126,7 @@ impl Backend {
         debug!(db_tickets = ?cfg.pool_size, profile = %env!("KANIDM_PROFILE_NAME"), cpu_flags = %env!("KANIDM_CPU_FLAGS"));
 
         // If in memory, reduce pool to 1
-        if cfg.path.is_empty() {
+        if cfg.path.as_os_str().is_empty() {
             cfg.pool_size = 1;
         }
 
@@ -2207,13 +2212,6 @@ impl Backend {
 
 #[cfg(test)]
 mod tests {
-    use std::fs;
-    use std::iter::FromIterator;
-    use std::sync::Arc;
-    use std::time::Duration;
-
-    use idlset::v2::IDLBitRange;
-
     use super::super::entry::{Entry, EntryInit, EntryNew};
     use super::Limits;
     use super::{
@@ -2223,6 +2221,12 @@ mod tests {
     use crate::prelude::*;
     use crate::repl::cid::Cid;
     use crate::value::{IndexType, PartialValue, Value};
+    use idlset::v2::IDLBitRange;
+    use std::fs;
+    use std::iter::FromIterator;
+    use std::path::Path;
+    use std::sync::Arc;
+    use std::time::Duration;
 
     lazy_static! {
         static ref CID_ZERO: Cid = Cid::new_zero();
@@ -2597,11 +2601,9 @@ mod tests {
 
     #[test]
     fn test_be_backup_restore() {
-        let db_backup_file_name = format!(
-            "{}/.backup_test.json",
-            option_env!("OUT_DIR").unwrap_or("/tmp")
-        );
-        eprintln!(" ⚠️   {db_backup_file_name}");
+        let db_backup_file_name =
+            Path::new(option_env!("OUT_DIR").unwrap_or("/tmp")).join(".backup_test.json");
+        eprintln!(" ⚠️   {}", db_backup_file_name.display());
         run_test!(|be: &mut BackendWriteTransaction| {
             // Important! Need db metadata setup!
             be.reset_db_s_uuid().unwrap();
@@ -2656,11 +2658,9 @@ mod tests {
 
     #[test]
     fn test_be_backup_restore_tampered() {
-        let db_backup_file_name = format!(
-            "{}/.backup2_test.json",
-            option_env!("OUT_DIR").unwrap_or("/tmp")
-        );
-        eprintln!(" ⚠️   {db_backup_file_name}");
+        let db_backup_file_name =
+            Path::new(option_env!("OUT_DIR").unwrap_or("/tmp")).join(".backup2_test.json");
+        eprintln!(" ⚠️   {}", db_backup_file_name.display());
         run_test!(|be: &mut BackendWriteTransaction| {
             // Important! Need db metadata setup!
             be.reset_db_s_uuid().unwrap();
diff --git a/server/testkit-macros/src/entry.rs b/server/testkit-macros/src/entry.rs
index 0566973e4..81e1ef701 100644
--- a/server/testkit-macros/src/entry.rs
+++ b/server/testkit-macros/src/entry.rs
@@ -63,7 +63,7 @@ fn parse_attributes(
             "ldap" => {
                 flags.ldap = true;
                 field_modifications.extend(quote! {
-                ldapaddress: Some("on".to_string()),})
+                ldapbindaddress: Some("on".to_string()),})
             }
             _ => {
                 let field_name = p.value().left.to_token_stream(); // here we can use to_token_stream as we know we're iterating over ExprAssigns
diff --git a/server/testkit/src/lib.rs b/server/testkit/src/lib.rs
index 9b1edd3d8..7eef97a25 100644
--- a/server/testkit/src/lib.rs
+++ b/server/testkit/src/lib.rs
@@ -84,9 +84,9 @@ pub async fn setup_async_test(mut config: Configuration) -> AsyncTestEnvironment
 
     let addr = format!("http://localhost:{}", port);
 
-    let ldap_url = if config.ldapaddress.is_some() {
+    let ldap_url = if config.ldapbindaddress.is_some() {
         let ldapport = port_loop();
-        config.ldapaddress = Some(format!("127.0.0.1:{}", ldapport));
+        config.ldapbindaddress = Some(format!("127.0.0.1:{}", ldapport));
         Url::parse(&format!("ldap://127.0.0.1:{}", ldapport))
             .inspect_err(|err| error!(?err, "ldap address setup"))
             .ok()
diff --git a/unix_integration/resolver/tests/cache_layer_test.rs b/unix_integration/resolver/tests/cache_layer_test.rs
index 768547463..84c31a09a 100644
--- a/unix_integration/resolver/tests/cache_layer_test.rs
+++ b/unix_integration/resolver/tests/cache_layer_test.rs
@@ -70,7 +70,7 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) {
     });
 
     // Setup the config ...
-    let mut config = Configuration::new();
+    let mut config = Configuration::new_for_test();
     config.address = format!("127.0.0.1:{}", port);
     config.integration_test_config = Some(int_config);
     config.role = ServerRole::WriteReplicaNoUI;