Added orca flag to extend privileged authentication expiry (#2949)

This commit is contained in:
Sebastiano Tocci 2024-08-03 02:37:49 +02:00 committed by GitHub
parent 3ae8453375
commit 12f297e526
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 92 additions and 9 deletions

View file

@ -53,8 +53,11 @@ pub async fn populate(_client: &KanidmOrcaClient, profile: Profile) -> Result<St
let thread_count = profile.thread_count(); let thread_count = profile.thread_count();
// PHASE 0 - For now, set require MFA off. // PHASE 0 - For now, set require MFA off and extend the privilege expiry.
let preflight_flags = vec![Flag::DisableAllPersonsMFAPolicy]; let preflight_flags = vec![
Flag::DisableAllPersonsMFAPolicy,
Flag::ExtendPrivilegedAuthExpiry,
];
// PHASE 1 - generate a pool of persons that are not-yet created for future import. // PHASE 1 - generate a pool of persons that are not-yet created for future import.

View file

@ -64,6 +64,24 @@ impl KanidmOrcaClient {
}) })
} }
pub async fn extend_privilege_expiry(&self) -> Result<(), Error> {
self.idm_admin_client
.group_account_policy_privilege_expiry_set("idm_all_persons", 3600)
.await
.map_err(|err| {
error!(?err, "Unable to modify idm_all_persons policy");
Error::KanidmClient
})?;
self.idm_admin_client
.group_account_policy_privilege_expiry_set("idm_all_accounts", 3600)
.await
.map_err(|err| {
error!(?err, "Unable to modify idm_all_accounts policy");
Error::KanidmClient
})
}
pub async fn person_exists(&self, username: &str) -> Result<bool, Error> { pub async fn person_exists(&self, username: &str) -> Result<bool, Error> {
self.idm_admin_client self.idm_admin_client
.idm_person_account_get(username) .idm_person_account_get(username)

View file

@ -77,6 +77,7 @@ fn main() -> ExitCode {
profile_path, profile_path,
threads, threads,
model, model,
dump_raw_data,
} => { } => {
// For now I hardcoded some dimensions, but we should prompt // For now I hardcoded some dimensions, but we should prompt
// the user for these later. // the user for these later.
@ -98,6 +99,7 @@ fn main() -> ExitCode {
idm_admin_password, idm_admin_password,
model, model,
threads, threads,
dump_raw_data,
) )
.seed(seed); .seed(seed);

View file

@ -32,6 +32,7 @@ impl Transition {
} }
} }
#[derive(Eq, PartialEq, Ord, PartialOrd)]
pub enum TransitionResult { pub enum TransitionResult {
// Success // Success
Ok, Ok,

View file

@ -70,6 +70,10 @@ pub enum OrcaOpt {
#[clap(long, default_value_t, value_enum)] #[clap(long, default_value_t, value_enum)]
// Optional model to run the benchmark, defaults to the `Basic` model // Optional model to run the benchmark, defaults to the `Basic` model
model: Model, model: Model,
#[clap(long, default_value_t)]
/// Dump raw data to a separate csv file, defaults to false
dump_raw_data: bool,
}, },
#[clap(name = "conntest")] #[clap(name = "conntest")]

View file

@ -12,6 +12,7 @@ async fn apply_flags(client: Arc<kani::KanidmOrcaClient>, flags: &[Flag]) -> Res
for flag in flags { for flag in flags {
match flag { match flag {
Flag::DisableAllPersonsMFAPolicy => client.disable_mfa_requirement().await?, Flag::DisableAllPersonsMFAPolicy => client.disable_mfa_requirement().await?,
Flag::ExtendPrivilegedAuthExpiry => client.extend_privilege_expiry().await?,
} }
} }
Ok(()) Ok(())

View file

@ -36,6 +36,8 @@ pub struct Profile {
thread_count: Option<usize>, thread_count: Option<usize>,
model: Model, model: Model,
group: BTreeMap<String, GroupProperties>, group: BTreeMap<String, GroupProperties>,
#[serde(default)]
dump_raw_data: bool,
} }
impl Profile { impl Profile {
@ -91,6 +93,10 @@ impl Profile {
pub fn test_time(&self) -> Option<Duration> { pub fn test_time(&self) -> Option<Duration> {
self.test_time.map(Duration::from_secs) self.test_time.map(Duration::from_secs)
} }
pub fn dump_raw_data(&self) -> bool {
self.dump_raw_data
}
} }
pub struct ProfileBuilder { pub struct ProfileBuilder {
@ -106,6 +112,7 @@ pub struct ProfileBuilder {
pub person_count: Option<u64>, pub person_count: Option<u64>,
pub thread_count: Option<usize>, pub thread_count: Option<usize>,
pub model: Model, pub model: Model,
pub dump_raw_data: bool,
} }
fn validate_u64_bound(value: Option<u64>, default: u64) -> Result<u64, Error> { fn validate_u64_bound(value: Option<u64>, default: u64) -> Result<u64, Error> {
@ -129,6 +136,7 @@ impl ProfileBuilder {
idm_admin_password: String, idm_admin_password: String,
model: Model, model: Model,
thread_count: Option<usize>, thread_count: Option<usize>,
dump_raw_data: bool,
) -> Self { ) -> Self {
ProfileBuilder { ProfileBuilder {
control_uri, control_uri,
@ -142,6 +150,7 @@ impl ProfileBuilder {
person_count: None, person_count: None,
thread_count, thread_count,
model, model,
dump_raw_data,
} }
} }
@ -187,6 +196,7 @@ impl ProfileBuilder {
person_count, person_count,
thread_count, thread_count,
model, model,
dump_raw_data,
} = self; } = self;
let seed: u64 = seed.unwrap_or_else(|| { let seed: u64 = seed.unwrap_or_else(|| {
@ -224,6 +234,7 @@ impl ProfileBuilder {
thread_count, thread_count,
group, group,
model, model,
dump_raw_data,
}) })
} }
} }

View file

@ -11,6 +11,7 @@ use crossbeam::queue::{ArrayQueue, SegQueue};
use kanidm_client::{KanidmClient, KanidmClientBuilder}; use kanidm_client::{KanidmClient, KanidmClientBuilder};
use serde::Serialize;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
@ -48,7 +49,7 @@ pub struct EventRecord {
pub details: EventDetail, pub details: EventDetail,
} }
#[derive(Debug)] #[derive(Debug, Serialize, Clone)]
pub enum EventDetail { pub enum EventDetail {
Login, Login,
Logout, Logout,
@ -145,8 +146,11 @@ pub async fn execute(state: State, control_rx: broadcast::Receiver<Signal>) -> R
let mut dyn_data_collector = let mut dyn_data_collector =
BasicStatistics::new(state.persons.len(), state.groups.len(), node_count); BasicStatistics::new(state.persons.len(), state.groups.len(), node_count);
let stats_task = let dump_raw_data = state.profile.dump_raw_data();
tokio::task::spawn_blocking(move || dyn_data_collector.run(c_stats_queue, c_stats_ctrl));
let stats_task = tokio::task::spawn_blocking(move || {
dyn_data_collector.run(c_stats_queue, c_stats_ctrl, dump_raw_data)
});
// Create clients. Note, we actually seed these deterministically too, so that // Create clients. Note, we actually seed these deterministically too, so that
// or persons are spread over the clients that exist, in a way that is also // or persons are spread over the clients that exist, in a way that is also

View file

@ -58,6 +58,7 @@ impl TryFrom<&Path> for State {
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub enum Flag { pub enum Flag {
DisableAllPersonsMFAPolicy, DisableAllPersonsMFAPolicy,
ExtendPrivilegedAuthExpiry,
} }
#[derive(Default, Debug, Serialize, Deserialize)] #[derive(Default, Debug, Serialize, Deserialize)]

View file

@ -22,6 +22,7 @@ pub trait DataCollector {
&mut self, &mut self,
stats_queue: Arc<SegQueue<EventRecord>>, stats_queue: Arc<SegQueue<EventRecord>>,
ctrl: Arc<ArrayQueue<TestPhase>>, ctrl: Arc<ArrayQueue<TestPhase>>,
dump_raw_data: bool,
) -> Result<(), Error>; ) -> Result<(), Error>;
} }
@ -30,6 +31,7 @@ enum OpKind {
ReadOp, ReadOp,
ReplicationDelay, ReplicationDelay,
Auth, //TODO! does this make sense? Auth, //TODO! does this make sense?
Error,
} }
impl From<EventDetail> for OpKind { impl From<EventDetail> for OpKind {
@ -42,11 +44,9 @@ impl From<EventDetail> for OpKind {
| EventDetail::PersonSetSelfPassword | EventDetail::PersonSetSelfPassword
| EventDetail::PersonCreateGroup | EventDetail::PersonCreateGroup
| EventDetail::PersonAddGroupMembers => OpKind::WriteOp, | EventDetail::PersonAddGroupMembers => OpKind::WriteOp,
EventDetail::Error EventDetail::Login | EventDetail::Logout | EventDetail::PersonReauth => OpKind::Auth,
| EventDetail::Login
| EventDetail::Logout
| EventDetail::PersonReauth => OpKind::Auth,
EventDetail::GroupReplicationDelay => OpKind::ReplicationDelay, EventDetail::GroupReplicationDelay => OpKind::ReplicationDelay,
EventDetail::Error => OpKind::Error,
} }
} }
} }
@ -76,6 +76,7 @@ impl DataCollector for BasicStatistics {
&mut self, &mut self,
stats_queue: Arc<SegQueue<EventRecord>>, stats_queue: Arc<SegQueue<EventRecord>>,
ctrl: Arc<ArrayQueue<TestPhase>>, ctrl: Arc<ArrayQueue<TestPhase>>,
dump_raw_data: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
debug!("Started statistics collector"); debug!("Started statistics collector");
@ -123,6 +124,7 @@ impl DataCollector for BasicStatistics {
let mut readop_times = Vec::new(); let mut readop_times = Vec::new();
let mut writeop_times = Vec::new(); let mut writeop_times = Vec::new();
let mut replication_delays = Vec::new(); let mut replication_delays = Vec::new();
let mut raw_stats = Vec::new();
// We will drain this now. // We will drain this now.
while let Some(event_record) = stats_queue.pop() { while let Some(event_record) = stats_queue.pop() {
@ -131,6 +133,13 @@ impl DataCollector for BasicStatistics {
continue; continue;
} }
if dump_raw_data {
raw_stats.push(SerializableEventRecord::from_event_record(
&event_record,
start,
));
}
match OpKind::from(event_record.details) { match OpKind::from(event_record.details) {
OpKind::ReadOp => { OpKind::ReadOp => {
readop_times.push(event_record.duration.as_secs_f64()); readop_times.push(event_record.duration.as_secs_f64());
@ -142,6 +151,7 @@ impl DataCollector for BasicStatistics {
replication_delays.push(event_record.duration.as_secs_f64()) replication_delays.push(event_record.duration.as_secs_f64())
} }
OpKind::Auth => {} OpKind::Auth => {}
OpKind::Error => {}
} }
} }
@ -196,12 +206,40 @@ impl DataCollector for BasicStatistics {
let mut wrt = Writer::from_path(filepath).map_err(|_| Error::Io)?; let mut wrt = Writer::from_path(filepath).map_err(|_| Error::Io)?;
wrt.serialize(stats).map_err(|_| Error::Io)?; wrt.serialize(stats).map_err(|_| Error::Io)?;
if dump_raw_data {
let raw_data_filepath = format!("orca-run-{}-raw.csv", now.to_rfc3339());
info!("Now saving raw data as '{raw_data_filepath}'");
let mut wrt = Writer::from_path(raw_data_filepath).map_err(|_| Error::Io)?;
for record in raw_stats.iter() {
wrt.serialize(record).map_err(|_| Error::Io)?;
}
}
debug!("Ended statistics collector"); debug!("Ended statistics collector");
Ok(()) Ok(())
} }
} }
#[derive(Serialize)]
struct SerializableEventRecord {
time_from_start_ms: u128,
duration_ms: u128,
details: EventDetail,
}
impl SerializableEventRecord {
fn from_event_record(event_record: &EventRecord, test_start: Instant) -> Self {
SerializableEventRecord {
time_from_start_ms: event_record.start.duration_since(test_start).as_millis(),
duration_ms: event_record.duration.as_millis(),
details: event_record.details.clone(),
}
}
}
#[derive(Serialize)] #[derive(Serialize)]
struct StatsContainer { struct StatsContainer {
node_count: usize, node_count: usize,