1788 admin unix socket ()

This commit is contained in:
Firstyear 2023-07-24 10:05:10 +10:00 committed by GitHub
parent 261368058a
commit e17dcc0ddb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
36 changed files with 460 additions and 205 deletions

7
Cargo.lock generated
View file

@ -1273,6 +1273,7 @@ dependencies = [
"clap 4.3.19",
"clap_complete",
"fs2",
"futures",
"is-terminal",
"kanidm_lib_file_permissions",
"kanidm_proto",
@ -1283,6 +1284,7 @@ dependencies = [
"sketching",
"tikv-jemallocator",
"tokio",
"tokio-util",
"toml",
"users",
"whoami",
@ -2653,6 +2655,7 @@ dependencies = [
"clap_complete",
"csv",
"futures",
"hashbrown 0.14.0",
"kanidm_client",
"kanidm_lib_crypto",
"kanidm_lib_file_permissions",
@ -2689,9 +2692,11 @@ dependencies = [
"axum-csp",
"axum-macros",
"axum-server",
"bytes",
"chrono",
"compact_jwt",
"cron",
"futures",
"futures-util",
"http",
"http-types",
@ -2719,6 +2724,7 @@ dependencies = [
"tracing",
"tracing-subscriber",
"urlencoding",
"users",
"uuid",
]
@ -3501,6 +3507,7 @@ dependencies = [
"csv",
"dialoguer",
"futures-util",
"hashbrown 0.14.0",
"kanidm_client",
"kanidm_proto",
"ldap3_proto",

View file

@ -294,18 +294,20 @@ will create self-signed certificates in `/tmp/kanidm`.
You can now build and run the server with the commands below. It will use a database in
`/tmp/kanidm/kanidm.db`.
Create the initial database and generate an `admin` password:
Start the server
```bash
cd server/daemon
./run_insecure_dev_server.sh
```
While the server is running, you can use the admin socket to generate an `admin` password:
```bash
./run_insecure_dev_server.sh recover-account admin
```
Record the password above, then run the server start command:
```bash
./run_insecure_dev_server.sh
```
Record the password above.
In a new terminal, you can now build and run the client tools with:

View file

@ -46,21 +46,20 @@ docker cp server.toml kanidmd:/data/server.toml
```bash
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest \
kanidmd cert-generate -c /data/server.toml
kanidmd cert-generate
```
### Start Kanidmd Container
```bash
docker start kanidmd
```
### Recover the admin password
```bash
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest \
kanidmd recover-account admin -c /data/server.toml
```
### Start Kanidmd
```bash
docker start kanidmd
docker exec -i -t kanidmd \
kanidmd recover-account admin
```
### Setup the client configuration

View file

@ -24,40 +24,18 @@ text=You MUST set the `domain` name correctly, aligned with your `origin`, else
## Check the configuration is valid
You should test your configuration is valid before you proceed.
You should test your configuration is valid before you proceed. This defaults to using
`-c /data/server.toml`.
```bash
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest /sbin/kanidmd configtest -c /data/server.toml
kanidm/server:latest /sbin/kanidmd configtest
```
## Default Admin Account
Then you can setup the initial admin account and initialise the database into your volume. This
command will generate a new random password for the admin account.
<!-- deno-fmt-ignore-start -->
{{#template templates/kani-warning.md
imagepath=images
title=Warning!
text=The server must not be running at this point, as it requires exclusive access to the database.
}}
<!-- deno-fmt-ignore-end -->
```bash
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest /sbin/kanidmd recover-account -c /data/server.toml admin
# success - recovery of account password for admin: vv...
```
After the recovery is complete the server can be started again.
## Run the Server
Now we can run the server so that it can accept connections. This defaults to using
`-c /data/server.toml`
`-c /data/server.toml`.
```bash
docker run -p 443:8443 -v kanidmd:/data kanidm/server:latest
@ -83,3 +61,15 @@ text=However you choose to run your server, you should document and keep note of
}}
<!-- deno-fmt-ignore-end -->
## Default Admin Account
Now that the server is running, you can initialise the default admin account. This command will
generate a new random password for the admin account. You must run this command as the same user as
the kanidmd process or as root. This defaults to using `-c /data/server.toml`.
```bash
docker exec -i -t <container name> \
kanidmd recover-account admin
# new_password: "xjgG4..."
```

View file

@ -61,7 +61,7 @@ You can test that your configuration is correct, and the server should correctly
```bash
docker run --rm -i -t -v kanidmd:/data \
kanidm/server:latest /sbin/kanidmd configtest -c /data/server.toml
kanidm/server:latest /sbin/kanidmd configtest
```
You can then follow through with the upgrade by running the create / run command with your existing

View file

@ -1,3 +1,5 @@
web_ui_pkg_path = "/pkg"
# Don't set the cpu_flags to autodetect for this platform
# cpu_flags = "none"
admin_bind_path = "/data/kanidmd.sock"
default_config_path = "/data/server.toml"

View file

@ -1,3 +1,5 @@
web_ui_pkg_path = "../web_ui/pkg"
# Set to native for developer machines.
cpu_flags = "native"
admin_bind_path = "/tmp/kanidmd.sock"
default_config_path = "../../examples/insecure_server.toml"

View file

@ -1,3 +1,5 @@
web_ui_pkg_path = "/usr/share/kanidm/ui/pkg"
# Don't set the value for autodetect
# cpu_flags = "none"
admin_bind_path = "/var/run/kanidmd/sock"
default_config_path = "/etc/kanidm/server.toml"

View file

@ -52,6 +52,8 @@ struct ProfileConfig {
web_ui_pkg_path: String,
#[serde(default)]
cpu_flags: CpuOptLevel,
admin_bind_path: String,
default_config_path: String,
}
pub fn apply_profile() {
@ -87,4 +89,12 @@ pub fn apply_profile() {
"cargo:rustc-env=KANIDM_WEB_UI_PKG_PATH={}",
profile_cfg.web_ui_pkg_path
);
println!(
"cargo:rustc-env=KANIDM_ADMIN_BIND_PATH={}",
profile_cfg.admin_bind_path
);
println!(
"cargo:rustc-env=KANIDM_DEFAULT_CONFIG_PATH={}",
profile_cfg.default_config_path
);
}

View file

@ -19,9 +19,11 @@ axum-auth = "0.4.0"
axum-csp = { workspace = true }
axum-macros = "0.3.8"
axum-server = { version = "0.5.1", features = ["tls-openssl"] }
bytes = { workspace = true }
chrono = { workspace = true }
compact_jwt = { workspace = true }
cron = { workspace = true }
futures = { workspace = true }
futures-util = { workspace = true }
http = "0.2.9"
http-types = { workspace = true }
@ -48,6 +50,7 @@ tower-http = { version = "0.4.3", features = ["tokio", "tracing", "uuid", "compr
tracing = { workspace = true, features = ["attributes"] }
tracing-subscriber = { workspace = true, features = ["time", "json"] }
urlencoding = { workspace = true }
users = { workspace = true }
uuid = { workspace = true, features = ["serde", "v4" ] }
[build-dependencies]

View file

@ -1529,4 +1529,21 @@ impl QueryServerWriteV1 {
admin_info!(?res, "delayed action error");
}
}
#[instrument(
level = "info",
skip_all,
fields(uuid = ?eventid)
)]
pub(crate) async fn handle_admin_recover_account(
&self,
name: String,
eventid: Uuid,
) -> Result<String, OperationError> {
trace!(%name, "Begin admin recover account event");
let mut idms_prox_write = self.idms.proxy_write(duration_from_epoch_now()).await;
let pw = idms_prox_write.recover_account(name.as_str(), None)?;
idms_prox_write.commit().map(|()| pw)
}
}

213
server/core/src/admin.rs Normal file
View file

@ -0,0 +1,213 @@
use crate::actors::v1_write::QueryServerWriteV1;
use crate::CoreAction;
use bytes::{BufMut, BytesMut};
use futures::{SinkExt, StreamExt};
use serde::{Deserialize, Serialize};
use std::error::Error;
use std::io;
use std::path::Path;
use tokio::net::{UnixListener, UnixStream};
use tokio::sync::broadcast;
use tokio_util::codec::{Decoder, Encoder, Framed};
use tracing::{span, Level};
use users::get_current_uid;
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug)]
pub enum AdminTaskRequest {
RecoverAccount { name: String },
}
#[derive(Serialize, Deserialize, Debug)]
pub enum AdminTaskResponse {
RecoverAccount { password: String },
Error,
}
#[derive(Default)]
pub struct ClientCodec;
impl Decoder for ClientCodec {
type Error = io::Error;
type Item = AdminTaskResponse;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
trace!("Attempting to decode request ...");
match serde_json::from_slice::<AdminTaskResponse>(src) {
Ok(msg) => {
// Clear the buffer for the next message.
src.clear();
Ok(Some(msg))
}
_ => Ok(None),
}
}
}
impl Encoder<AdminTaskRequest> for ClientCodec {
type Error = io::Error;
fn encode(&mut self, msg: AdminTaskRequest, dst: &mut BytesMut) -> Result<(), Self::Error> {
trace!("Attempting to send response -> {:?} ...", msg);
let data = serde_json::to_vec(&msg).map_err(|e| {
error!("socket encoding error -> {:?}", e);
io::Error::new(io::ErrorKind::Other, "JSON encode error")
})?;
dst.put(data.as_slice());
Ok(())
}
}
#[derive(Default)]
struct ServerCodec;
impl Decoder for ServerCodec {
type Error = io::Error;
type Item = AdminTaskRequest;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
trace!("Attempting to decode request ...");
match serde_json::from_slice::<AdminTaskRequest>(src) {
Ok(msg) => {
// Clear the buffer for the next message.
src.clear();
Ok(Some(msg))
}
_ => Ok(None),
}
}
}
impl Encoder<AdminTaskResponse> for ServerCodec {
type Error = io::Error;
fn encode(&mut self, msg: AdminTaskResponse, dst: &mut BytesMut) -> Result<(), Self::Error> {
trace!("Attempting to send response -> {:?} ...", msg);
let data = serde_json::to_vec(&msg).map_err(|e| {
error!("socket encoding error -> {:?}", e);
io::Error::new(io::ErrorKind::Other, "JSON encode error")
})?;
dst.put(data.as_slice());
Ok(())
}
}
pub(crate) struct AdminActor;
impl AdminActor {
pub async fn create_admin_sock(
sock_path: &str,
server: &'static QueryServerWriteV1,
mut broadcast_rx: broadcast::Receiver<CoreAction>,
) -> Result<tokio::task::JoinHandle<()>, ()> {
debug!("🧹 Cleaning up sockets from previous invocations");
rm_if_exist(sock_path);
// Setup the unix socket.
let listener = match UnixListener::bind(sock_path) {
Ok(l) => l,
Err(e) => {
error!(err = ?e, "Failed to bind UNIX socket {}", sock_path);
return Err(());
}
};
// what is the uid we are running as?
let cuid = get_current_uid();
let handle = tokio::spawn(async move {
loop {
tokio::select! {
Ok(action) = broadcast_rx.recv() => {
match action {
CoreAction::Shutdown => break,
}
}
accept_res = listener.accept() => {
match accept_res {
Ok((socket, _addr)) => {
// Assert that the incoming connection is from root or
// our own uid.
// ⚠️ This underpins the security of this socket ⚠️
if let Ok(ucred) = socket.peer_cred() {
let incoming_uid = ucred.uid();
if incoming_uid == 0 || incoming_uid == cuid {
// all good!
info!(pid = ?ucred.pid(), "Allowing admin socket access");
} else {
warn!(%incoming_uid, "unauthorised user");
continue;
}
} else {
error!("unable to determine peer credentials");
continue;
};
// spawn the worker.
tokio::spawn(async move {
if let Err(e) = handle_client(socket, server).await {
error!(err = ?e, "admin client error");
}
});
}
Err(e) => {
warn!(err = ?e, "admin socket accept error");
}
}
}
}
}
info!("Stopped AdminActor");
});
Ok(handle)
}
}
fn rm_if_exist(p: &str) {
if Path::new(p).exists() {
debug!("Removing requested file {:?}", p);
let _ = std::fs::remove_file(p).map_err(|e| {
error!(
"Failure while attempting to attempting to remove {:?} -> {:?}",
p, e
);
});
} else {
debug!("Path {:?} doesn't exist, not attempting to remove.", p);
}
}
async fn handle_client(
sock: UnixStream,
server: &'static QueryServerWriteV1,
) -> Result<(), Box<dyn Error>> {
debug!("Accepted admin socket connection");
let mut reqs = Framed::new(sock, ServerCodec);
trace!("Waiting for requests ...");
while let Some(Ok(req)) = reqs.next().await {
// Setup the logging span
let eventid = Uuid::new_v4();
let nspan = span!(Level::INFO, "handle_admin_client_request", uuid = ?eventid);
let _span = nspan.enter();
let resp = match req {
AdminTaskRequest::RecoverAccount { name } => {
match server.handle_admin_recover_account(name, eventid).await {
Ok(password) => AdminTaskResponse::RecoverAccount { password },
Err(e) => {
error!(err = ?e, "error during recover-account");
AdminTaskResponse::Error
}
}
}
};
reqs.send(resp).await?;
reqs.flush().await?;
trace!("flushed response!");
}
debug!("Disconnecting client ...");
Ok(())
}

View file

@ -48,6 +48,7 @@ pub struct TlsConfiguration {
pub struct ServerConfig {
pub bindaddress: Option<String>,
pub ldapbindaddress: Option<String>,
pub adminbindpath: Option<String>,
pub trust_x_forward_for: Option<bool>,
// pub threads: Option<usize>,
pub db_path: String,
@ -162,13 +163,13 @@ impl From<LogLevel> for EnvFilter {
pub struct Configuration {
pub address: String,
pub ldapaddress: Option<String>,
pub adminbindpath: String,
pub threads: usize,
// db type later
pub db_path: String,
pub db_fs_type: Option<String>,
pub db_arc_size: Option<usize>,
pub maximum_request: usize,
pub secure_cookies: bool,
pub trust_x_forward_for: bool,
pub tls_config: Option<TlsConfiguration>,
pub integration_test_config: Option<Box<IntegrationTestConfig>>,
@ -182,12 +183,13 @@ pub struct Configuration {
impl fmt::Display for Configuration {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "address: {}, ", self.address)
.and_then(|_| write!(f, "domain: {}, ", self.domain))
write!(f, "address: {}, ", self.address)?;
write!(f, "domain: {}, ", self.domain)
.and_then(|_| match &self.ldapaddress {
Some(la) => write!(f, "ldap address: {}, ", la),
None => write!(f, "ldap address: disabled, "),
})
.and_then(|_| write!(f, "admin bind path: {}, ", self.adminbindpath))
.and_then(|_| write!(f, "thread count: {}, ", self.threads))
.and_then(|_| write!(f, "dbpath: {}, ", self.db_path))
.and_then(|_| match self.db_arc_size {
@ -195,7 +197,6 @@ impl fmt::Display for Configuration {
None => write!(f, "arcsize: AUTO, "),
})
.and_then(|_| write!(f, "max request size: {}b, ", self.maximum_request))
.and_then(|_| write!(f, "secure cookies: {}, ", self.secure_cookies))
.and_then(|_| write!(f, "trust X-Forwarded-For: {}, ", self.trust_x_forward_for))
.and_then(|_| write!(f, "with TLS: {}, ", self.tls_config.is_some()))
// TODO: include the backup timings
@ -221,19 +222,17 @@ impl Configuration {
Configuration {
address: String::from("127.0.0.1:8080"),
ldapaddress: None,
adminbindpath: env!("KANIDM_ADMIN_BIND_PATH").to_string(),
threads: std::thread::available_parallelism()
.map(|t| t.get())
.unwrap_or_else(|_e| {
eprintln!("WARNING: Unable to read number of available CPUs, defaulting to 1");
1
eprintln!("WARNING: Unable to read number of available CPUs, defaulting to 4");
4
}),
db_path: String::from(""),
db_fs_type: None,
db_arc_size: None,
maximum_request: 256 * 1024, // 256k
// log path?
// default true in prd
secure_cookies: !cfg!(test),
trust_x_forward_for: false,
tls_config: None,
integration_test_config: None,
@ -305,6 +304,12 @@ impl Configuration {
self.ldapaddress = l.clone();
}
pub fn update_admin_bind_path(&mut self, p: &Option<String>) {
if let Some(p) = p {
self.adminbindpath = p.clone();
}
}
pub fn update_origin(&mut self, o: &str) {
self.origin = o.to_string();
}

View file

@ -19,7 +19,7 @@ use crate::actors::v1_write::QueryServerWriteV1;
use kanidmd_lib::constants::PURGE_FREQUENCY;
use kanidmd_lib::event::{OnlineBackupEvent, PurgeRecycledEvent, PurgeTombstoneEvent};
pub struct IntervalActor;
pub(crate) struct IntervalActor;
impl IntervalActor {
pub fn start(

View file

@ -26,6 +26,7 @@ extern crate tracing;
extern crate kanidmd_lib;
pub mod actors;
pub mod admin;
pub mod config;
mod crypto;
mod https;
@ -36,7 +37,6 @@ use std::path::Path;
use std::sync::Arc;
use compact_jwt::JwsSigner;
use kanidm_proto::messages::{AccountChangeMessage, MessageStatus};
use kanidm_proto::v1::OperationError;
use kanidmd_lib::be::{Backend, BackendConfig, BackendTransaction, FsType};
use kanidmd_lib::idm::ldap::LdapServer;
@ -51,6 +51,7 @@ use tokio::sync::broadcast;
use crate::actors::v1_read::QueryServerReadV1;
use crate::actors::v1_write::QueryServerWriteV1;
use crate::admin::AdminActor;
use crate::config::{Configuration, ServerRole};
use crate::interval::IntervalActor;
@ -497,62 +498,6 @@ pub async fn verify_server_core(config: &Configuration) {
// Now add IDM server verifications?
}
pub async fn recover_account_core(config: &Configuration, name: &str) {
let schema = match Schema::new() {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to setup in memory schema: {:?}", e);
std::process::exit(1);
}
};
// Start the backend.
let be = match setup_backend(config, &schema) {
Ok(be) => be,
Err(e) => {
error!("Failed to setup BE: {:?}", e);
return;
}
};
// setup the qs - *with* init of the migrations and schema.
let (_qs, idms, _idms_delayed, _idms_audit) = match setup_qs_idms(be, schema, config).await {
Ok(t) => t,
Err(e) => {
error!("Unable to setup query server or idm server -> {:?}", e);
return;
}
};
// Run the password change.
let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now()).await;
let new_pw = match idms_prox_write.recover_account(name, None) {
Ok(new_pw) => match idms_prox_write.commit() {
Ok(_) => new_pw,
Err(e) => {
error!("A critical error during commit occurred {:?}", e);
std::process::exit(1);
}
},
Err(e) => {
error!("Error during password reset -> {:?}", e);
// abort the txn
std::mem::drop(idms_prox_write);
std::process::exit(1);
}
};
println!(
"{}",
AccountChangeMessage {
output_mode: config.output_mode,
status: MessageStatus::Success,
src_user: String::from("command-line invocation"),
dest_user: name.to_string(),
result: new_pw,
action: String::from("recovery of account password"),
}
);
}
pub fn cert_generate_core(config: &Configuration) {
// Get the cert root
@ -871,6 +816,22 @@ pub async fn create_server_core(
}
};
// If we are NOT in integration test mode, start the admin socket now
let maybe_admin_sock_handle = if config.integration_test_config.is_none() {
let broadcast_rx = broadcast_tx.subscribe();
let admin_handle = AdminActor::create_admin_sock(
config.adminbindpath.as_str(),
server_write_ref,
broadcast_rx,
)
.await?;
Some(admin_handle)
} else {
None
};
// If we have been requested to init LDAP, configure it now.
let maybe_ldap_acceptor_handle = match &config.ldapaddress {
Some(la) => {
@ -901,10 +862,6 @@ pub async fn create_server_core(
}
};
// TODO: Remove these when we go to auth bearer!
// Copy the max size
let _secure_cookies = config.secure_cookies;
let maybe_http_acceptor_handle = if config_test {
admin_info!("this config rocks! 🪨 ");
None
@ -939,6 +896,10 @@ pub async fn create_server_core(
handles.push(backup_handle)
}
if let Some(admin_sock_handle) = maybe_admin_sock_handle {
handles.push(admin_sock_handle)
}
if let Some(ldap_handle) = maybe_ldap_acceptor_handle {
handles.push(ldap_handle)
}

View file

@ -23,11 +23,13 @@ kanidmd_core = { workspace = true }
kanidm_lib_file_permissions = { workspace = true }
sketching = { workspace = true }
fs2 = { workspace = true }
futures = { workspace = true }
clap = { workspace = true, features = ["env"] }
reqwest = { workspace = true }
serde = { workspace = true, features = ["derive"] }
tokio = { workspace = true, features = ["rt-multi-thread", "macros", "signal"] }
tokio-util = { workspace = true, features = ["codec"] }
toml = { workspace = true }
is-terminal = "0.4.9"

View file

@ -26,13 +26,14 @@ if [ ! -f "${CONFIG_FILE}" ]; then
exit 1
fi
#shellcheck disable=SC2086
cargo run ${KANI_CARGO_OPTS} --bin kanidmd -- cert-generate -c "${CONFIG_FILE}"
COMMAND="server"
if [ -n "${1}" ]; then
COMMAND=$*
#shellcheck disable=SC2086
cargo run ${KANI_CARGO_OPTS} --bin kanidmd -- ${COMMAND} -c "${CONFIG_FILE}"
else
#shellcheck disable=SC2086
cargo run ${KANI_CARGO_OPTS} --bin kanidmd -- cert-generate -c "${CONFIG_FILE}"
#shellcheck disable=SC2086
cargo run ${KANI_CARGO_OPTS} --bin kanidmd -- server -c "${CONFIG_FILE}"
fi
#shellcheck disable=SC2086
cargo run ${KANI_CARGO_OPTS} --bin kanidmd -- ${COMMAND} -c "${CONFIG_FILE}"

View file

@ -15,6 +15,7 @@
static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
use std::fs::{metadata, File};
use std::str::FromStr;
// This works on both unix and windows.
use fs2::FileExt;
use kanidm_proto::messages::ConsoleOutputMode;
@ -24,16 +25,20 @@ use std::path::PathBuf;
use std::process::ExitCode;
use clap::{Args, Parser, Subcommand};
use futures::{SinkExt, StreamExt};
use kanidmd_core::admin::{AdminTaskRequest, AdminTaskResponse, ClientCodec};
use kanidmd_core::config::{Configuration, LogLevel, ServerConfig};
use kanidmd_core::{
backup_server_core, cert_generate_core, create_server_core, dbscan_get_id2entry_core,
dbscan_list_id2entry_core, dbscan_list_index_analysis_core, dbscan_list_index_core,
dbscan_list_indexes_core, domain_rename_core, recover_account_core, reindex_server_core,
restore_server_core, vacuum_server_core, verify_server_core,
dbscan_list_indexes_core, domain_rename_core, reindex_server_core, restore_server_core,
vacuum_server_core, verify_server_core,
};
use sketching::tracing_forest::traits::*;
use sketching::tracing_forest::util::*;
use sketching::tracing_forest::{self};
use tokio::net::UnixStream;
use tokio_util::codec::Framed;
#[cfg(not(target_family = "windows"))] // not needed for windows builds
use users::{get_current_gid, get_current_uid, get_effective_gid, get_effective_uid};
#[cfg(target_family = "windows")] // for windows builds
@ -62,7 +67,7 @@ impl KanidmdOpt {
KanidmdOpt::Database {
commands: DbCommands::Restore(ropt),
} => &ropt.commonopts,
KanidmdOpt::RecoverAccount(ropt) => &ropt.commonopts,
KanidmdOpt::RecoverAccount { commonopts, .. } => commonopts,
KanidmdOpt::DbScan {
commands: DbScanOpt::ListIndex(dopt),
} => &dopt.commonopts,
@ -97,6 +102,45 @@ fn get_user_details_windows() {
);
}
async fn submit_admin_req(path: &str, req: AdminTaskRequest, output_mode: ConsoleOutputMode) {
// Connect to the socket.
let stream = match UnixStream::connect(path).await {
Ok(s) => s,
Err(e) => {
error!(err = ?e, %path, "Unable to connect to socket path");
return;
}
};
let mut reqs = Framed::new(stream, ClientCodec);
if let Err(e) = reqs.send(req).await {
error!(err = ?e, "Unable to send request");
return;
};
if let Err(e) = reqs.flush().await {
error!(err = ?e, "Unable to flush request");
return;
}
trace!("flushed, waiting ...");
match reqs.next().await {
Some(Ok(AdminTaskResponse::RecoverAccount { password })) => match output_mode {
ConsoleOutputMode::JSON => {
eprintln!("{{\"password\":\"{}\"}}", password)
}
ConsoleOutputMode::Text => {
info!(new_password = ?password)
}
},
_ => {
error!("Error making request to admin socket");
}
}
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> ExitCode {
// Read CLI args, determine what the user has asked us to do.
@ -105,7 +149,17 @@ async fn main() -> ExitCode {
//we set up a list of these so we can set the log config THEN log out the errors.
let mut config_error: Vec<String> = Vec::new();
let mut config = Configuration::new();
let cfg_path = opt.commands.commonopt().config_path.clone();
let cfg_path = opt
.commands
.commonopt()
.config_path
.clone()
.or_else(|| PathBuf::from_str(env!("KANIDM_DEFAULT_CONFIG_PATH")).ok());
let Some(cfg_path) = cfg_path else {
eprintln!("Unable to start - can not locate any configuration file");
return ExitCode::FAILURE;
};
let sconfig = match cfg_path.exists() {
false => {
@ -139,10 +193,12 @@ async fn main() -> ExitCode {
.set_global(true)
.set_tag(sketching::event_tagger)
// Fall back to stderr
.map_sender(|sender| sender.or_stderr())
.map_sender(|sender| {
sender.or_stderr()
})
.build_on(|subscriber|{
subscriber.with(log_filter)
})
.on(async {
// Get information on the windows username
@ -274,6 +330,7 @@ async fn main() -> ExitCode {
config.update_role(sconfig.role);
config.update_output_mode(opt.commands.commonopt().output_mode.to_owned().into());
config.update_trust_x_forward_for(sconfig.trust_x_forward_for);
config.update_admin_bind_path(&sconfig.adminbindpath);
match &opt.commands {
// we aren't going to touch the DB so we can carry on
@ -300,13 +357,6 @@ async fn main() -> ExitCode {
}
}
/*
// Apply any cli overrides, normally debug level.
if opt.commands.commonopt().debug.as_ref() {
// ::std::env::set_var("RUST_LOG", ",kanidm=info,webauthn=debug");
}
*/
match &opt.commands {
KanidmdOpt::Server(_sopt) | KanidmdOpt::ConfigTest(_sopt) => {
let config_test = matches!(&opt.commands, KanidmdOpt::ConfigTest(_));
@ -468,9 +518,15 @@ async fn main() -> ExitCode {
info!("Running in db verification mode ...");
verify_server_core(&config).await;
}
KanidmdOpt::RecoverAccount(raopt) => {
KanidmdOpt::RecoverAccount {
name, commonopts
} => {
info!("Running account recovery ...");
recover_account_core(&config, &raopt.name).await;
let output_mode: ConsoleOutputMode = commonopts.output_mode.to_owned().into();
submit_admin_req(config.adminbindpath.as_str(),
AdminTaskRequest::RecoverAccount { name: name.to_owned() },
output_mode,
).await;
}
KanidmdOpt::Database {
commands: DbCommands::Reindex(_copt),
@ -525,7 +581,6 @@ async fn main() -> ExitCode {
debug!("{sopt:?}");
let healthcheck_url = match &sopt.check_origin {
true => format!("{}/status", config.origin),
false => format!("https://{}/status", config.address),
@ -533,8 +588,6 @@ async fn main() -> ExitCode {
debug!("Checking {healthcheck_url}");
let mut client = reqwest::ClientBuilder::new()
.danger_accept_invalid_certs(!sopt.verify_tls)
.danger_accept_invalid_hostnames(!sopt.verify_tls)
@ -590,7 +643,6 @@ async fn main() -> ExitCode {
.build()
.unwrap();
let req = match client.get(&healthcheck_url).send().await {
Ok(val) => val,
Err(error) => {

View file

@ -2,7 +2,7 @@
struct CommonOpt {
/// Path to the server's configuration file. If it does not exist, it will be created.
#[clap(short, long = "config", env = "KANIDM_CONFIG")]
config_path: PathBuf,
config_path: Option<PathBuf>,
/// Log format (still in very early development)
#[clap(short, long = "output", env = "KANIDM_OUTPUT", default_value="text")]
output_mode: String,
@ -26,16 +26,6 @@ struct RestoreOpt {
commonopts: CommonOpt,
}
#[derive(Debug, Args)]
struct RecoverAccountOpt {
#[clap(value_parser)]
/// The account name to recover credentials for.
name: String,
#[clap(flatten)]
commonopts: CommonOpt,
}
#[derive(Debug, Subcommand)]
enum DomainSettingsCmds {
#[clap(name = "rename")]
@ -83,18 +73,6 @@ struct HealthCheckArgs {
commonopts: CommonOpt,
}
/*
#[derive(Debug, Args)]
struct DbScanGetIndex {
/// The name of the index to list
index_name: String,
/// The name of the index key to retrieve
key: String,
#[clap(flatten)]
commonopts: CommonOpt,
}
*/
#[derive(Debug, Args)]
struct DbScanGetId2Entry {
/// The id of the entry to display
@ -147,7 +125,13 @@ enum KanidmdOpt {
CertGenerate(CommonOpt),
#[clap(name = "recover-account")]
/// Recover an account's password
RecoverAccount(RecoverAccountOpt),
RecoverAccount {
#[clap(value_parser)]
/// The account name to recover credentials for.
name: String,
#[clap(flatten)]
commonopts: CommonOpt,
},
// #[clap(name = "reset_server_id")]
// ResetServerId(CommonOpt),
#[clap(name = "db-scan")]
@ -168,6 +152,7 @@ enum KanidmdOpt {
#[clap(subcommand)]
commands: DomainSettingsCmds,
},
/// Load the server config and check services are listening
#[clap(name = "healthcheck")]
HealthCheck(HealthCheckArgs),

View file

@ -64,7 +64,6 @@ pub async fn setup_async_test() -> (KanidmClient, CoreHandle) {
// Setup the config ...
let mut config = Configuration::new();
config.address = format!("127.0.0.1:{}", port);
config.secure_cookies = false;
config.integration_test_config = Some(int_config);
config.role = ServerRole::WriteReplica;
config.domain = "localhost".to_string();

View file

@ -10,6 +10,8 @@
#![deny(clippy::await_holding_lock)]
#![deny(clippy::needless_pass_by_value)]
#![deny(clippy::trivially_copy_pass_by_ref)]
// Needed as yew-router::Routable uses std::collection::HashMap
#![allow(clippy::disallowed_types)]
use error::FetchError;
use serde::{Deserialize, Serialize};

View file

@ -1,5 +1,5 @@
use serde::Deserialize;
use std::collections::HashMap;
use std::collections::BTreeMap;
use url::Url;
use uuid::Uuid;
@ -16,7 +16,7 @@ pub struct Config {
// pub entry: Option<Vec<EntryConfig>>,
#[serde(flatten)]
pub entry_map: HashMap<Uuid, EntryConfig>,
pub entry_map: BTreeMap<Uuid, EntryConfig>,
}
#[derive(Debug, Deserialize, Default, Clone)]

View file

@ -23,7 +23,7 @@ use base64urlsafedata::Base64UrlSafeData;
use chrono::Utc;
use clap::Parser;
use cron::Schedule;
use std::collections::{BTreeMap, HashMap};
use std::collections::BTreeMap;
use std::fs::metadata;
use std::fs::File;
use std::io::Read;
@ -504,7 +504,7 @@ async fn process_ipa_sync_result(
ipa_client: &mut LdapClient,
basedn: String,
ldap_entries: Vec<LdapSyncReplEntry>,
entry_config_map: &HashMap<Uuid, EntryConfig>,
entry_config_map: &BTreeMap<Uuid, EntryConfig>,
is_initialise: bool,
) -> Result<Vec<ScimEntry>, ()> {
// Because of how TOTP works with freeipa it's a soft referral from

View file

@ -1,5 +1,5 @@
use serde::Deserialize;
use std::collections::HashMap;
use std::collections::BTreeMap;
use url::Url;
use uuid::Uuid;
@ -100,7 +100,7 @@ pub struct Config {
pub group_attr_member: String,
#[serde(flatten)]
pub entry_map: HashMap<Uuid, EntryConfig>,
pub entry_map: BTreeMap<Uuid, EntryConfig>,
}
#[derive(Debug, Deserialize, Default, Clone)]

View file

@ -21,6 +21,7 @@ crossbeam = { workspace = true }
csv = { workspace = true }
dialoguer = { workspace = true }
futures-util = { workspace = true, features = ["sink"] }
hashbrown = { workspace = true }
kanidm_client = { workspace = true }
kanidm_proto = { workspace = true }
ldap3_proto = { workspace = true }

View file

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use hashbrown::{HashMap, HashSet};
use std::time::Duration;
use rand::distributions::Alphanumeric;

View file

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use hashbrown::{HashMap, HashSet};
use std::time::{Duration, Instant};
use ldap3_proto::proto::*;

View file

@ -3,7 +3,7 @@ use std::path::Path;
use std::time::Duration;
use uuid::Uuid;
use std::collections::{HashMap, HashSet};
use hashbrown::{HashMap, HashSet};
use crate::data::*;

View file

@ -1,5 +1,5 @@
use hashbrown::{HashMap, HashSet};
use ldap3_proto::proto::*;
use std::collections::{HashMap, HashSet};
use std::time::{Duration, Instant};
use uuid::Uuid;

View file

@ -1,4 +1,4 @@
use std::collections::{HashMap, HashSet};
use hashbrown::{HashMap, HashSet};
use std::time::{Duration, Instant};
use kanidm_client::{ClientError, KanidmClient, KanidmClientBuilder, StatusCode};

View file

@ -15,7 +15,7 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[macro_use]
extern crate tracing;
use std::collections::{HashMap, HashSet};
use hashbrown::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};

View file

@ -1,5 +1,6 @@
use hashbrown::{HashMap, HashSet};
use std::cmp::Ordering;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::fs::File;
use std::io::BufReader;
@ -330,7 +331,7 @@ pub fn doit(input: &Path, output: &Path) {
.map(|uuid| all_entities.get(uuid).unwrap().get_entity_type())
.collect();
if let Some(ac) = access.get_mut(id) {
if let Some(ac) = access.get_mut(*id) {
ac.append(&mut nlist);
} else {
access.insert(**id, nlist);
@ -347,7 +348,7 @@ pub fn doit(input: &Path, output: &Path) {
.map(|v| all_entities.get(&v.0).unwrap().get_entity_type())
.collect();
if let Some(ac) = access.get_mut(id) {
if let Some(ac) = access.get_mut(*id) {
ac.append(&mut nlist);
} else {
access.insert(**id, nlist);

View file

@ -47,6 +47,7 @@ bytes = { workspace = true }
clap = { workspace = true, features = ["derive", "env"] }
csv = { workspace = true }
futures = { workspace = true }
hashbrown = { workspace = true }
libc = { workspace = true }
libsqlite3-sys = { workspace = true }
lru = { workspace = true }

View file

@ -1,4 +1,5 @@
use std::collections::{BTreeSet, HashSet};
use hashbrown::HashSet;
use std::collections::BTreeSet;
use std::num::NonZeroUsize;
use std::ops::{Add, Sub};
use std::path::Path;

View file

@ -51,6 +51,7 @@ use notify_debouncer_full::{new_debouncer, notify::RecursiveMode, notify::Watche
type AsyncTaskRequest = (TaskRequest, oneshot::Sender<()>);
#[derive(Default)]
struct ClientCodec;
impl Decoder for ClientCodec {
@ -84,12 +85,7 @@ impl Encoder<ClientResponse> for ClientCodec {
}
}
impl ClientCodec {
fn new() -> Self {
ClientCodec
}
}
#[derive(Default)]
struct TaskCodec;
impl Decoder for TaskCodec {
@ -122,12 +118,6 @@ impl Encoder<TaskRequest> for TaskCodec {
}
}
impl TaskCodec {
fn new() -> Self {
TaskCodec
}
}
/// Pass this a file path and it'll look for the file and remove it if it's there.
fn rm_if_exist(p: &str) {
if Path::new(p).exists() {
@ -149,7 +139,7 @@ async fn handle_task_client(
task_channel_rx: &mut Receiver<AsyncTaskRequest>,
) -> Result<(), Box<dyn Error>> {
// setup the codec
let mut reqs = Framed::new(stream, TaskCodec::new());
let mut reqs = Framed::new(stream, TaskCodec);
loop {
// TODO wait on the channel OR the task handler, so we know
@ -195,7 +185,12 @@ async fn handle_client(
task_channel_tx: &Sender<AsyncTaskRequest>,
) -> Result<(), Box<dyn Error>> {
debug!("Accepted connection");
let mut reqs = Framed::new(sock, ClientCodec::new());
let Ok(ucred) = sock.peer_cred() else {
return Err(Box::new(IoError::new(ErrorKind::Other, "Unable to verify peer credentials.")));
};
let mut reqs = Framed::new(sock, ClientCodec);
trace!("Waiting for requests ...");
while let Some(Ok(req)) = reqs.next().await {
@ -346,11 +341,16 @@ async fn handle_client(
}
ClientRequest::ClearCache => {
debug!("clear cache");
cachelayer
.clear_cache()
.await
.map(|_| ClientResponse::Ok)
.unwrap_or(ClientResponse::Error)
if ucred.uid() == 0 {
cachelayer
.clear_cache()
.await
.map(|_| ClientResponse::Ok)
.unwrap_or(ClientResponse::Error)
} else {
error!("Only root may clear the cache");
ClientResponse::Error
}
}
ClientRequest::Status => {
debug!("status check");
@ -725,16 +725,14 @@ async fn main() -> ExitCode {
Ok((socket, _addr)) => {
// Did it come from root?
if let Ok(ucred) = socket.peer_cred() {
if ucred.uid() == 0 {
// all good!
} else {
if ucred.uid() != 0 {
// move along.
debug!("Task handler not running as root, ignoring ...");
warn!("Task handler not running as root, ignoring ...");
continue;
}
} else {
// move along.
debug!("Task handler not running as root, ignoring ...");
warn!("Unable to determine socked peer cred, ignoring ...");
continue;
};
debug!("A task handler has connected.");

View file

@ -65,7 +65,6 @@ async fn setup_test(fix_fn: Fixture) -> (CacheLayer, KanidmClient) {
// Setup the config ...
let mut config = Configuration::new();
config.address = format!("127.0.0.1:{}", port);
config.secure_cookies = false;
config.integration_test_config = Some(int_config);
config.role = ServerRole::WriteReplicaNoUI;
config.threads = 1;