Improve errors, tagging, logging and more across the codebase. (#243)

This commit is contained in:
Firstyear 2020-06-05 14:01:20 +10:00 committed by GitHub
parent 5cb837a1bf
commit 4bed9c87bf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
41 changed files with 3033 additions and 2253 deletions

2736
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,4 @@
use crate::{ClientError, KanidmClientBuilder, APPLICATION_JSON};
use crate::{ClientError, KanidmClientBuilder, APPLICATION_JSON, KOPID};
use reqwest::header::CONTENT_TYPE;
use serde::de::DeserializeOwned;
use serde::Serialize;
@ -34,6 +34,12 @@ impl KanidmAsyncClient {
.await
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
@ -66,6 +72,12 @@ impl KanidmAsyncClient {
.await
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
@ -88,6 +100,12 @@ impl KanidmAsyncClient {
.await
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),
@ -108,6 +126,12 @@ impl KanidmAsyncClient {
.await
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().await.ok())),

View file

@ -28,6 +28,7 @@ pub mod asynchronous;
use crate::asynchronous::KanidmAsyncClient;
pub const APPLICATION_JSON: &str = "application/json";
pub const KOPID: &str = "X-KANIDM-OPID";
#[derive(Debug)]
pub enum ClientError {
@ -302,6 +303,12 @@ impl KanidmClient {
.send()
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
@ -330,6 +337,12 @@ impl KanidmClient {
.send()
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
@ -349,6 +362,12 @@ impl KanidmClient {
.send()
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),
@ -368,6 +387,12 @@ impl KanidmClient {
.send()
.map_err(ClientError::Transport)?;
let opid = response.headers().get(KOPID);
debug!(
"opid -> {:?}",
opid.expect("Missing opid? Refusing to proceed ...")
);
match response.status() {
reqwest::StatusCode::OK => {}
unexpect => return Err(ClientError::Http(unexpect, response.json().ok())),

View file

@ -15,7 +15,12 @@ static PORT_ALLOC: AtomicUsize = AtomicUsize::new(8080);
pub fn run_test(test_fn: fn(KanidmClient) -> ()) {
// ::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug");
let _ = env_logger::builder().is_test(true).try_init();
let _ = env_logger::builder()
.format_timestamp(None)
.format_level(false)
.is_test(true)
.try_init();
let (tx, rx) = mpsc::channel();
let port = PORT_ALLOC.fetch_add(1, Ordering::SeqCst);
@ -33,16 +38,16 @@ pub fn run_test(test_fn: fn(KanidmClient) -> ()) {
// Spawn a thread for the test runner, this should have a unique
// port....
System::run(move || {
create_server_core(config);
let sctx = create_server_core(config);
// This appears to be bind random ...
// let srv = srv.bind("127.0.0.1:0").unwrap();
let _ = tx.send(System::current());
let _ = tx.send(sctx);
})
.expect("unable to start system");
});
let sys = rx.recv().unwrap();
System::set_current(sys.clone());
let sctx = rx.recv().unwrap().expect("failed to start ctx");
System::set_current(sctx.current());
// Do we need any fixtures?
// Yes probably, but they'll need to be futures as well ...
@ -59,5 +64,5 @@ pub fn run_test(test_fn: fn(KanidmClient) -> ()) {
// We DO NOT need teardown, as sqlite is in mem
// let the tables hit the floor
sys.stop();
sctx.stop();
}

View file

@ -98,8 +98,8 @@ fn is_attr_writable(rsclient: &KanidmClient, id: &str, attr: &str) -> Option<boo
),
entry => {
let new_value = match entry {
"acp_receiver" => "{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000011\"]}".to_string(),
"acp_targetscope" => "{\"And\": [{\"Eq\": [\"class\",\"access_control_profile\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}".to_string(),
"acp_receiver" => "{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000011\"]}".to_string(),
"acp_targetscope" => "{\"and\": [{\"eq\": [\"class\",\"access_control_profile\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}".to_string(),
_ => id.to_string(),
};
let m = ModifyList::new_list(vec![

View file

@ -16,6 +16,7 @@ uuid = { version = "0.8", features = ["serde", "v4"] }
# actix = { version = "0.9", optional = true }
zxcvbn = { version = "2.0", features = ["ser"] }
base32 = "0.4"
thiserror = "1.0"
[dev-dependencies]
serde_json = "1.0"

View file

@ -7,19 +7,31 @@ use uuid::Uuid;
/* ===== errors ===== */
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[derive(Serialize, Deserialize, Debug, PartialEq, thiserror::Error)]
#[serde(rename_all = "lowercase")]
pub enum SchemaError {
#[error("Not Implemented")]
NotImplemented,
InvalidClass,
MissingMustAttribute(String),
InvalidAttribute,
#[error("This entry does not have any classes, which means it can not have structure.")]
NoClassFound,
#[error("A class or classes are found that do not exist in schema.")]
InvalidClass(Vec<String>),
#[error("")]
MissingMustAttribute(Vec<String>),
#[error("")]
InvalidAttribute(String),
#[error("")]
InvalidAttributeSyntax,
#[error("")]
EmptyFilter,
#[error("The schema has become internally inconsistent. You must restart and investigate.")]
Corrupted,
PhantomAttribute,
#[error("Phantom attribute types may not be persisted on an entry")]
PhantomAttribute(String),
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum PluginError {
AttrUnique(String),
Base(String),
@ -28,6 +40,7 @@ pub enum PluginError {
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum ConsistencyError {
Unknown,
// Class, Attribute
@ -46,6 +59,7 @@ pub enum ConsistencyError {
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(rename_all = "lowercase")]
pub enum OperationError {
EmptyRequest,
Backend,
@ -224,6 +238,7 @@ pub struct Entry {
}
#[derive(Debug, Serialize, Deserialize, Clone, Ord, PartialOrd, Eq, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum Filter {
// This is attr - value
Eq(String, String),
@ -232,11 +247,12 @@ pub enum Filter {
Or(Vec<Filter>),
And(Vec<Filter>),
AndNot(Box<Filter>),
#[serde(rename = "Self")]
#[serde(rename = "self")]
SelfUUID,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
#[serde(rename_all = "lowercase")]
pub enum Modify {
Present(String, String),
Removed(String, String),
@ -350,6 +366,7 @@ impl fmt::Debug for AuthCredential {
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AuthStep {
// name, application id?
Init(String, Option<String>),
@ -372,6 +389,7 @@ pub struct AuthRequest {
// Respond with the list of auth types and nonce, etc.
// It can also contain a denied, or success.
#[derive(Debug, PartialEq, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AuthAllowed {
Anonymous,
Password,
@ -380,6 +398,7 @@ pub enum AuthAllowed {
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum AuthState {
// Everything is good, your cookie has been issued, and a token is set here
// for the client to view.
@ -398,6 +417,7 @@ pub struct AuthResponse {
// Types needed for setting credentials
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SetCredentialRequest {
Password(String),
GeneratePassword,
@ -408,6 +428,7 @@ pub enum SetCredentialRequest {
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum TOTPAlgo {
Sha1,
Sha256,
@ -460,6 +481,7 @@ impl TOTPSecret {
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum SetCredentialResponse {
Success,
Token(String),
@ -470,6 +492,7 @@ pub enum SetCredentialResponse {
// Only two actions on recycled is possible. Search and Revive.
/*
pub struct SearchRecycledRequest {
pub filter: Filter,
}
@ -479,9 +502,11 @@ impl SearchRecycledRequest {
SearchRecycledRequest { filter }
}
}
*/
// Need a search response here later.
/*
pub struct ReviveRecycledRequest {
pub filter: Filter,
}
@ -491,8 +516,10 @@ impl ReviveRecycledRequest {
ReviveRecycledRequest { filter }
}
}
*/
// This doesn't need seralise because it's only accessed via a "get".
/*
#[derive(Debug, Default)]
pub struct WhoamiRequest {}
@ -501,6 +528,7 @@ impl WhoamiRequest {
Default::default()
}
}
*/
#[derive(Debug, Serialize, Deserialize)]
pub struct WhoamiResponse {

View file

@ -38,13 +38,13 @@ fn run_test(fix_fn: fn(&KanidmClient) -> (), test_fn: fn(CacheLayer, KanidmAsync
// Spawn a thread for the test runner, this should have a unique
// port....
System::run(move || {
create_server_core(config);
let _ = tx.send(System::current());
let sctx = create_server_core(config);
let _ = tx.send(sctx);
})
.expect("Failed to start system");
});
let sys = rx.recv().unwrap();
System::set_current(sys.clone());
let sctx = rx.recv().unwrap().expect("Failed to start server core");
System::set_current(sctx.current());
// Setup the client, and the address we selected.
let addr = format!("http://127.0.0.1:{}", port);
@ -78,7 +78,7 @@ fn run_test(fix_fn: fn(&KanidmClient) -> (), test_fn: fn(CacheLayer, KanidmAsync
// We DO NOT need teardown, as sqlite is in mem
// let the tables hit the floor
sys.stop();
sctx.stop();
}
fn test_fixture(rsclient: &KanidmClient) -> () {

View file

@ -29,6 +29,9 @@ path = "src/server/main.rs"
kanidm_proto = { path = "../kanidm_proto", version = "0.1" }
actix = "0.9"
# actix = { version = "0.9", path = "../../actix" }
actix-rt = "1.1"
actix-web = { version = "2.0", features = ["openssl"] }
actix-session = "0.3"
actix-files = "0.2"
@ -60,6 +63,7 @@ time = "0.1"
concread = "0.1"
# concread = { path = "../../concread" }
crossbeam = "0.7"
openssl = "0.10"
sshkeys = "0.1"

View file

@ -482,6 +482,12 @@ pub trait AccessControlsTransaction {
})
.collect();
if allowed_entries.len() > 0 {
lsecurity_access!(audit, "allowed {} entries ✅", allowed_entries.len());
} else {
lsecurity_access!(audit, "denied ❌");
}
Ok(allowed_entries)
}
@ -639,6 +645,12 @@ pub trait AccessControlsTransaction {
e.reduce_attributes(f_allowed_attrs)
})
.collect();
lsecurity_access!(
audit,
"attribute set reduced on {} entries",
allowed_entries.len()
);
Ok(allowed_entries)
}
@ -652,6 +664,7 @@ pub trait AccessControlsTransaction {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &me.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
@ -833,6 +846,11 @@ pub trait AccessControlsTransaction {
result
} // if acc == false
});
if r {
lsecurity_access!(audit, "allowed ✅");
} else {
lsecurity_access!(audit, "denied ❌");
}
Ok(r)
}
@ -846,6 +864,7 @@ pub trait AccessControlsTransaction {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &ce.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
@ -998,6 +1017,12 @@ pub trait AccessControlsTransaction {
// if no acp allows, fail operation.
});
if r {
lsecurity_access!(audit, "allowed ✅");
} else {
lsecurity_access!(audit, "denied ❌");
}
Ok(r)
}
@ -1011,6 +1036,7 @@ pub trait AccessControlsTransaction {
let rec_entry: &Entry<EntrySealed, EntryCommitted> = match &de.event.origin {
EventOrigin::Internal => {
lsecurity_access!(audit, "Internal operation, bypassing access check");
// No need to check ACS
return Ok(true);
}
@ -1098,6 +1124,11 @@ pub trait AccessControlsTransaction {
}) // fold related_acp
} // if/else
});
if r {
lsecurity_access!(audit, "allowed ✅");
} else {
lsecurity_access!(audit, "denied ❌");
}
Ok(r)
}
}
@ -1377,10 +1408,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
]
}
}"#,
@ -1405,10 +1436,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
]
}
}"#,
@ -1426,10 +1457,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
]
}
}"#,
@ -1456,10 +1487,10 @@ mod tests {
"name": ["acp_invalid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_search_attr": ["name", "class"]
}
@ -1479,10 +1510,10 @@ mod tests {
"name": ["acp_invalid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_search_attr": ["name", "class"]
}
@ -1502,10 +1533,10 @@ mod tests {
"name": ["acp_invalid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
]
}
}"#,
@ -1524,10 +1555,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_search_attr": ["name", "class"]
}
@ -1554,10 +1585,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_modify_removedattr": ["name"],
"acp_modify_presentattr": ["name"],
@ -1578,10 +1609,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
]
}
}"#,
@ -1599,10 +1630,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_modify_removedattr": ["name"],
"acp_modify_presentattr": ["name"],
@ -1631,10 +1662,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_create_class": ["object"],
"acp_create_attr": ["name"]
@ -1654,10 +1685,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
]
}
}"#,
@ -1675,10 +1706,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_create_class": ["object"],
"acp_create_attr": ["name"]
@ -1713,10 +1744,10 @@ mod tests {
"name": ["acp_valid"],
"uuid": ["cc8e95b4-c24f-4d68-ba54-8bed76f63930"],
"acp_receiver": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"name\",\"a\"]}"
"{\"eq\":[\"name\",\"a\"]}"
],
"acp_search_attr": ["name"],
"acp_create_class": ["object"],
@ -1746,12 +1777,12 @@ mod tests {
acw.update_search($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_search");
let mut audit = AuditScope::new("test_acp_search", uuid::Uuid::new_v4());
let res = acw
.search_filter_entries(&mut audit, $se, $entries)
.expect("op failed");
println!("result --> {:?}", res);
println!("expect --> {:?}", $expect);
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
assert!(res == $expect);
}};
@ -1849,7 +1880,7 @@ mod tests {
acw.update_search($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_search_reduce");
let mut audit = AuditScope::new("test_acp_search_reduce", uuid::Uuid::new_v4());
// We still have to reduce the entries to be sure that we are good.
let res = acw
.search_filter_entries(&mut audit, $se, $entries)
@ -1865,8 +1896,8 @@ mod tests {
.map(|e| unsafe { e.into_reduced() })
.collect();
println!("expect --> {:?}", expect_set);
println!("result --> {:?}", reduced);
debug!("expect --> {:?}", expect_set);
debug!("result --> {:?}", reduced);
// should be ok, and same as expect.
assert!(reduced == expect_set);
}};
@ -1972,12 +2003,12 @@ mod tests {
acw.update_modify($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_modify");
let mut audit = AuditScope::new("test_acp_modify", uuid::Uuid::new_v4());
let res = acw
.modify_allow_operation(&mut audit, $me, $entries)
.expect("op failed");
println!("result --> {:?}", res);
println!("expect --> {:?}", $expect);
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
assert!(res == $expect);
}};
@ -2134,12 +2165,12 @@ mod tests {
acw.update_create($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_create");
let mut audit = AuditScope::new("test_acp_create", uuid::Uuid::new_v4());
let res = acw
.create_allow_operation(&mut audit, $ce, $entries)
.expect("op failed");
println!("result --> {:?}", res);
println!("expect --> {:?}", $expect);
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
assert!(res == $expect);
}};
@ -2260,12 +2291,12 @@ mod tests {
acw.update_delete($controls).expect("Failed to update");
let acw = acw;
let mut audit = AuditScope::new("test_acp_delete");
let mut audit = AuditScope::new("test_acp_delete", uuid::Uuid::new_v4());
let res = acw
.delete_allow_operation(&mut audit, $de, $entries)
.expect("op failed");
println!("result --> {:?}", res);
println!("expect --> {:?}", $expect);
debug!("result --> {:?}", res);
debug!("expect --> {:?}", $expect);
// should be ok, and same as expect.
assert!(res == $expect);
}};

View file

@ -1,8 +1,8 @@
use crossbeam::channel::Sender;
use std::sync::Arc;
use crate::audit::AuditScope;
use crate::async_log::EventLog;
use crate::event::{AuthEvent, SearchEvent, SearchResult, WhoamiResult};
use crate::idm::event::{
RadiusAuthTokenEvent, UnixGroupTokenEvent, UnixUserAuthEvent, UnixUserTokenEvent,
@ -33,11 +33,12 @@ use uuid::Uuid;
pub struct WhoamiMessage {
pub uat: Option<UserAuthToken>,
pub eventid: Uuid,
}
impl WhoamiMessage {
pub fn new(uat: Option<UserAuthToken>) -> Self {
WhoamiMessage { uat }
pub fn new(uat: Option<UserAuthToken>, eventid: Uuid) -> Self {
WhoamiMessage { uat, eventid }
}
}
@ -49,11 +50,16 @@ impl Message for WhoamiMessage {
pub struct AuthMessage {
pub sessionid: Option<Uuid>,
pub req: AuthRequest,
pub eventid: Uuid,
}
impl AuthMessage {
pub fn new(req: AuthRequest, sessionid: Option<Uuid>) -> Self {
AuthMessage { sessionid, req }
pub fn new(req: AuthRequest, sessionid: Option<Uuid>, eventid: Uuid) -> Self {
AuthMessage {
sessionid,
req,
eventid,
}
}
}
@ -64,11 +70,12 @@ impl Message for AuthMessage {
pub struct SearchMessage {
pub uat: Option<UserAuthToken>,
pub req: SearchRequest,
pub eventid: Uuid,
}
impl SearchMessage {
pub fn new(uat: Option<UserAuthToken>, req: SearchRequest) -> Self {
SearchMessage { uat, req }
pub fn new(uat: Option<UserAuthToken>, req: SearchRequest, eventid: Uuid) -> Self {
SearchMessage { uat, req, eventid }
}
}
@ -80,6 +87,7 @@ pub struct InternalSearchMessage {
pub uat: Option<UserAuthToken>,
pub filter: Filter<FilterInvalid>,
pub attrs: Option<Vec<String>>,
pub eventid: Uuid,
}
impl Message for InternalSearchMessage {
@ -90,6 +98,7 @@ pub struct InternalSearchRecycledMessage {
pub uat: Option<UserAuthToken>,
pub filter: Filter<FilterInvalid>,
pub attrs: Option<Vec<String>>,
pub eventid: Uuid,
}
impl Message for InternalSearchRecycledMessage {
@ -99,6 +108,7 @@ impl Message for InternalSearchRecycledMessage {
pub struct InternalRadiusReadMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl Message for InternalRadiusReadMessage {
@ -108,6 +118,7 @@ impl Message for InternalRadiusReadMessage {
pub struct InternalRadiusTokenReadMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl Message for InternalRadiusTokenReadMessage {
@ -117,6 +128,7 @@ impl Message for InternalRadiusTokenReadMessage {
pub struct InternalUnixUserTokenReadMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl Message for InternalUnixUserTokenReadMessage {
@ -126,6 +138,7 @@ impl Message for InternalUnixUserTokenReadMessage {
pub struct InternalUnixGroupTokenReadMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl Message for InternalUnixGroupTokenReadMessage {
@ -135,6 +148,7 @@ impl Message for InternalUnixGroupTokenReadMessage {
pub struct InternalSshKeyReadMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl Message for InternalSshKeyReadMessage {
@ -145,6 +159,7 @@ pub struct InternalSshKeyTagReadMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub tag: String,
pub eventid: Uuid,
}
impl Message for InternalSshKeyTagReadMessage {
@ -155,6 +170,7 @@ pub struct IdmAccountUnixAuthMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub cred: String,
pub eventid: Uuid,
}
impl Message for IdmAccountUnixAuthMessage {
@ -164,7 +180,7 @@ impl Message for IdmAccountUnixAuthMessage {
// ===========================================================
pub struct QueryServerReadV1 {
log: actix::Addr<EventLog>,
log: Sender<Option<AuditScope>>,
qs: QueryServer,
idms: Arc<IdmServer>,
}
@ -178,13 +194,13 @@ impl Actor for QueryServerReadV1 {
}
impl QueryServerReadV1 {
pub fn new(log: actix::Addr<EventLog>, qs: QueryServer, idms: Arc<IdmServer>) -> Self {
log_event!(log, "Starting query server v1 worker ...");
pub fn new(log: Sender<Option<AuditScope>>, qs: QueryServer, idms: Arc<IdmServer>) -> Self {
info!("Starting query server v1 worker ...");
QueryServerReadV1 { log, qs, idms }
}
pub fn start(
log: actix::Addr<EventLog>,
log: Sender<Option<AuditScope>>,
query_server: QueryServer,
idms: Arc<IdmServer>,
threads: usize,
@ -204,7 +220,7 @@ impl Handler<SearchMessage> for QueryServerReadV1 {
type Result = Result<SearchResponse, OperationError>;
fn handle(&mut self, msg: SearchMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("search");
let mut audit = AuditScope::new("search", msg.eventid.clone());
let res = lperf_segment!(&mut audit, "actors::v1_read::handle<SearchMessage>", || {
// Begin a read
let mut qs_read = self.qs.read();
@ -227,7 +243,10 @@ impl Handler<SearchMessage> for QueryServerReadV1 {
}
});
// At the end of the event we send it for logging.
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -240,7 +259,7 @@ impl Handler<AuthMessage> for QueryServerReadV1 {
// "on top" of the db server concept. In this case we check if
// the credentials provided is sufficient to say if someone is
// "authenticated" or not.
let mut audit = AuditScope::new("auth");
let mut audit = AuditScope::new("auth", msg.eventid.clone());
let res = lperf_segment!(&mut audit, "actors::v1_read::handle<AuthMessage>", || {
lsecurity!(audit, "Begin auth event {:?}", msg);
@ -259,20 +278,27 @@ impl Handler<AuthMessage> for QueryServerReadV1 {
// Trigger a session clean *before* we take any auth steps.
// It's important to do this before to ensure that timeouts on
// the session are enforced.
idm_write.expire_auth_sessions(ct);
lperf_segment!(
audit,
"actors::v1_read::handle<AuthMessage> -> expire_auth_sessions",
|| { idm_write.expire_auth_sessions(ct) }
);
// Generally things like auth denied are in Ok() msgs
// so true errors should always trigger a rollback.
let r = idm_write
.auth(&mut audit, &ae, ct)
.and_then(|r| idm_write.commit().map(|_| r));
.and_then(|r| idm_write.commit(&mut audit).map(|_| r));
lsecurity!(audit, "Sending result -> {:?}", r);
lsecurity!(audit, "Sending auth result -> {:?}", r);
// Build the result.
r.map(|r| r.response())
});
// At the end of the event we send it for logging.
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -281,7 +307,7 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
type Result = Result<WhoamiResponse, OperationError>;
fn handle(&mut self, msg: WhoamiMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("whoami");
let mut audit = AuditScope::new("whoami", msg.eventid.clone());
let res = lperf_segment!(&mut audit, "actors::v1_read::handle<WhoamiMessage>", || {
// TODO #62: Move this to IdmServer!!!
// Begin a read
@ -327,7 +353,10 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
});
// Should we log the final result?
// At the end of the event we send it for logging.
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -336,7 +365,7 @@ impl Handler<InternalSearchMessage> for QueryServerReadV1 {
type Result = Result<Vec<ProtoEntry>, OperationError>;
fn handle(&mut self, msg: InternalSearchMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_search_message");
let mut audit = AuditScope::new("internal_search_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalSearchMessage>",
@ -361,7 +390,10 @@ impl Handler<InternalSearchMessage> for QueryServerReadV1 {
}
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -374,7 +406,7 @@ impl Handler<InternalSearchRecycledMessage> for QueryServerReadV1 {
msg: InternalSearchRecycledMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_search_recycle_message");
let mut audit = AuditScope::new("internal_search_recycle_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalSearchRecycledMessage>",
@ -401,7 +433,10 @@ impl Handler<InternalSearchRecycledMessage> for QueryServerReadV1 {
}
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -410,7 +445,7 @@ impl Handler<InternalRadiusReadMessage> for QueryServerReadV1 {
type Result = Result<Option<String>, OperationError>;
fn handle(&mut self, msg: InternalRadiusReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_radius_read_message");
let mut audit = AuditScope::new("internal_radius_read_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalRadiusReadMessage>",
@ -459,7 +494,10 @@ impl Handler<InternalRadiusReadMessage> for QueryServerReadV1 {
}
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -472,7 +510,7 @@ impl Handler<InternalRadiusTokenReadMessage> for QueryServerReadV1 {
msg: InternalRadiusTokenReadMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_radius_token_read_message");
let mut audit = AuditScope::new("internal_radius_token_read_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalRadiusTokenReadMessage>",
@ -509,7 +547,10 @@ impl Handler<InternalRadiusTokenReadMessage> for QueryServerReadV1 {
idm_read.get_radiusauthtoken(&mut audit, &rate)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -522,7 +563,7 @@ impl Handler<InternalUnixUserTokenReadMessage> for QueryServerReadV1 {
msg: InternalUnixUserTokenReadMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_unix_token_read_message");
let mut audit = AuditScope::new("internal_unix_token_read_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalUnixUserTokenReadMessage>",
@ -558,7 +599,10 @@ impl Handler<InternalUnixUserTokenReadMessage> for QueryServerReadV1 {
idm_read.get_unixusertoken(&mut audit, &rate)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -571,7 +615,8 @@ impl Handler<InternalUnixGroupTokenReadMessage> for QueryServerReadV1 {
msg: InternalUnixGroupTokenReadMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("internal_unixgroup_token_read_message");
let mut audit =
AuditScope::new("internal_unixgroup_token_read_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalUnixGroupTokenReadMessage>",
@ -607,7 +652,10 @@ impl Handler<InternalUnixGroupTokenReadMessage> for QueryServerReadV1 {
idm_read.get_unixgrouptoken(&mut audit, &rate)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -616,7 +664,7 @@ impl Handler<InternalSshKeyReadMessage> for QueryServerReadV1 {
type Result = Result<Vec<String>, OperationError>;
fn handle(&mut self, msg: InternalSshKeyReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_sshkey_read_message");
let mut audit = AuditScope::new("internal_sshkey_read_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalSshKeyReadMessage>",
@ -668,7 +716,10 @@ impl Handler<InternalSshKeyReadMessage> for QueryServerReadV1 {
}
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -677,19 +728,19 @@ impl Handler<InternalSshKeyTagReadMessage> for QueryServerReadV1 {
type Result = Result<Option<String>, OperationError>;
fn handle(&mut self, msg: InternalSshKeyTagReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_sshkey_tag_read_message");
let InternalSshKeyTagReadMessage {
uat,
uuid_or_name,
tag,
eventid,
} = msg;
let mut audit = AuditScope::new("internal_sshkey_tag_read_message", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<InternalSshKeyTagReadMessage>",
|| {
let mut qs_read = self.qs.read();
let InternalSshKeyTagReadMessage {
uat,
uuid_or_name,
tag,
} = msg;
let target_uuid = match Uuid::parse_str(uuid_or_name.as_str()) {
Ok(u) => u,
Err(_) => qs_read
@ -741,7 +792,10 @@ impl Handler<InternalSshKeyTagReadMessage> for QueryServerReadV1 {
}
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -750,7 +804,7 @@ impl Handler<IdmAccountUnixAuthMessage> for QueryServerReadV1 {
type Result = Result<Option<UnixUserToken>, OperationError>;
fn handle(&mut self, msg: IdmAccountUnixAuthMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_unix_auth");
let mut audit = AuditScope::new("idm_account_unix_auth", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_read::handle<IdmAccountUnixAuthMessage>",
@ -790,13 +844,16 @@ impl Handler<IdmAccountUnixAuthMessage> for QueryServerReadV1 {
let r = idm_write
.auth_unix(&mut audit, &uuae, ct)
.and_then(|r| idm_write.commit().map(|_| r));
.and_then(|r| idm_write.commit(&mut audit).map(|_| r));
lsecurity!(audit, "Sending result -> {:?}", r);
r
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}

View file

@ -1,7 +1,7 @@
use crate::audit::AuditScope;
use crossbeam::channel::Sender;
use std::sync::Arc;
use crate::async_log::EventLog;
use crate::event::{
CreateEvent, DeleteEvent, ModifyEvent, PurgeRecycledEvent, PurgeTombstoneEvent,
ReviveRecycledEvent,
@ -34,17 +34,19 @@ use uuid::Uuid;
pub struct CreateMessage {
pub uat: Option<UserAuthToken>,
pub req: CreateRequest,
pub eventid: Uuid,
}
impl CreateMessage {
pub fn new(uat: Option<UserAuthToken>, req: CreateRequest) -> Self {
CreateMessage { uat, req }
pub fn new(uat: Option<UserAuthToken>, req: CreateRequest, eventid: Uuid) -> Self {
CreateMessage { uat, req, eventid }
}
pub fn new_entry(uat: Option<UserAuthToken>, req: ProtoEntry) -> Self {
pub fn new_entry(uat: Option<UserAuthToken>, req: ProtoEntry, eventid: Uuid) -> Self {
CreateMessage {
uat,
req: CreateRequest { entries: vec![req] },
eventid,
}
}
}
@ -56,11 +58,12 @@ impl Message for CreateMessage {
pub struct DeleteMessage {
pub uat: Option<UserAuthToken>,
pub req: DeleteRequest,
pub eventid: Uuid,
}
impl DeleteMessage {
pub fn new(uat: Option<UserAuthToken>, req: DeleteRequest) -> Self {
DeleteMessage { uat, req }
pub fn new(uat: Option<UserAuthToken>, req: DeleteRequest, eventid: Uuid) -> Self {
DeleteMessage { uat, req, eventid }
}
}
@ -71,6 +74,7 @@ impl Message for DeleteMessage {
pub struct InternalDeleteMessage {
pub uat: Option<UserAuthToken>,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for InternalDeleteMessage {
@ -80,11 +84,12 @@ impl Message for InternalDeleteMessage {
pub struct ModifyMessage {
pub uat: Option<UserAuthToken>,
pub req: ModifyRequest,
pub eventid: Uuid,
}
impl ModifyMessage {
pub fn new(uat: Option<UserAuthToken>, req: ModifyRequest) -> Self {
ModifyMessage { uat, req }
pub fn new(uat: Option<UserAuthToken>, req: ModifyRequest, eventid: Uuid) -> Self {
ModifyMessage { uat, req, eventid }
}
}
@ -95,6 +100,7 @@ impl Message for ModifyMessage {
pub struct ReviveRecycledMessage {
pub uat: Option<UserAuthToken>,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for ReviveRecycledMessage {
@ -104,13 +110,15 @@ impl Message for ReviveRecycledMessage {
pub struct IdmAccountSetPasswordMessage {
pub uat: Option<UserAuthToken>,
pub cleartext: String,
pub eventid: Uuid,
}
impl IdmAccountSetPasswordMessage {
pub fn new(uat: Option<UserAuthToken>, req: SingleStringRequest) -> Self {
pub fn new(uat: Option<UserAuthToken>, req: SingleStringRequest, eventid: Uuid) -> Self {
IdmAccountSetPasswordMessage {
uat,
cleartext: req.value,
eventid,
}
}
}
@ -122,6 +130,7 @@ impl Message for IdmAccountSetPasswordMessage {
pub struct IdmAccountPersonExtendMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl Message for IdmAccountPersonExtendMessage {
@ -133,16 +142,23 @@ pub struct IdmAccountUnixExtendMessage {
pub uuid_or_name: String,
pub gidnumber: Option<u32>,
pub shell: Option<String>,
pub eventid: Uuid,
}
impl IdmAccountUnixExtendMessage {
pub fn new(uat: Option<UserAuthToken>, uuid_or_name: String, ux: AccountUnixExtend) -> Self {
pub fn new(
uat: Option<UserAuthToken>,
uuid_or_name: String,
ux: AccountUnixExtend,
eventid: Uuid,
) -> Self {
let AccountUnixExtend { gidnumber, shell } = ux;
IdmAccountUnixExtendMessage {
uat,
uuid_or_name,
gidnumber,
shell,
eventid,
}
}
}
@ -155,15 +171,22 @@ pub struct IdmGroupUnixExtendMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub gidnumber: Option<u32>,
pub eventid: Uuid,
}
impl IdmGroupUnixExtendMessage {
pub fn new(uat: Option<UserAuthToken>, uuid_or_name: String, gx: GroupUnixExtend) -> Self {
pub fn new(
uat: Option<UserAuthToken>,
uuid_or_name: String,
gx: GroupUnixExtend,
eventid: Uuid,
) -> Self {
let GroupUnixExtend { gidnumber } = gx;
IdmGroupUnixExtendMessage {
uat,
uuid_or_name,
gidnumber,
eventid,
}
}
}
@ -176,6 +199,7 @@ pub struct IdmAccountUnixSetCredMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub cred: String,
pub eventid: Uuid,
}
impl Message for IdmAccountUnixSetCredMessage {
@ -187,6 +211,7 @@ pub struct InternalCredentialSetMessage {
pub uuid_or_name: String,
pub appid: Option<String>,
pub sac: SetCredentialRequest,
pub eventid: Uuid,
}
impl Message for InternalCredentialSetMessage {
@ -196,11 +221,16 @@ impl Message for InternalCredentialSetMessage {
pub struct InternalRegenerateRadiusMessage {
pub uat: Option<UserAuthToken>,
pub uuid_or_name: String,
pub eventid: Uuid,
}
impl InternalRegenerateRadiusMessage {
pub fn new(uat: Option<UserAuthToken>, uuid_or_name: String) -> Self {
InternalRegenerateRadiusMessage { uat, uuid_or_name }
pub fn new(uat: Option<UserAuthToken>, uuid_or_name: String, eventid: Uuid) -> Self {
InternalRegenerateRadiusMessage {
uat,
uuid_or_name,
eventid,
}
}
}
@ -214,6 +244,7 @@ pub struct InternalSshKeyCreateMessage {
pub tag: String,
pub key: String,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for InternalSshKeyCreateMessage {
@ -227,6 +258,7 @@ pub struct PurgeAttributeMessage {
pub uuid_or_name: String,
pub attr: String,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for PurgeAttributeMessage {
@ -240,6 +272,7 @@ pub struct RemoveAttributeValueMessage {
pub attr: String,
pub value: String,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for RemoveAttributeValueMessage {
@ -252,6 +285,7 @@ pub struct AppendAttributeMessage {
pub attr: String,
pub values: Vec<String>,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for AppendAttributeMessage {
@ -264,6 +298,7 @@ pub struct SetAttributeMessage {
pub attr: String,
pub values: Vec<String>,
pub filter: Filter<FilterInvalid>,
pub eventid: Uuid,
}
impl Message for SetAttributeMessage {
@ -271,7 +306,7 @@ impl Message for SetAttributeMessage {
}
pub struct QueryServerWriteV1 {
log: actix::Addr<EventLog>,
log: Sender<Option<AuditScope>>,
qs: QueryServer,
idms: Arc<IdmServer>,
}
@ -287,13 +322,13 @@ impl Actor for QueryServerWriteV1 {
}
impl QueryServerWriteV1 {
pub fn new(log: actix::Addr<EventLog>, qs: QueryServer, idms: Arc<IdmServer>) -> Self {
log_event!(log, "Starting query server v1 worker ...");
pub fn new(log: Sender<Option<AuditScope>>, qs: QueryServer, idms: Arc<IdmServer>) -> Self {
info!("Starting query server v1 worker ...");
QueryServerWriteV1 { log, qs, idms }
}
pub fn start(
log: actix::Addr<EventLog>,
log: Sender<Option<AuditScope>>,
query_server: QueryServer,
idms: Arc<IdmServer>,
) -> actix::Addr<QueryServerWriteV1> {
@ -386,7 +421,7 @@ impl Handler<CreateMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: CreateMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("create");
let mut audit = AuditScope::new("create", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<CreateMessage>",
@ -396,7 +431,7 @@ impl Handler<CreateMessage> for QueryServerWriteV1 {
let crt = match CreateEvent::from_message(&mut audit, msg, &mut qs_write) {
Ok(c) => c,
Err(e) => {
ladmin_error!(audit, "Failed to begin create: {:?}", e);
ladmin_warning!(audit, "Failed to begin create: {:?}", e);
return Err(e);
}
};
@ -409,7 +444,10 @@ impl Handler<CreateMessage> for QueryServerWriteV1 {
}
);
// At the end of the event we send it for logging.
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -418,7 +456,7 @@ impl Handler<ModifyMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: ModifyMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("modify");
let mut audit = AuditScope::new("modify", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<ModifyMessage>",
@ -439,7 +477,10 @@ impl Handler<ModifyMessage> for QueryServerWriteV1 {
.and_then(|_| qs_write.commit(&mut audit).map(|_| OperationResponse {}))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -448,7 +489,7 @@ impl Handler<DeleteMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: DeleteMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("delete");
let mut audit = AuditScope::new("delete", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<DeleteMessage>",
@ -470,7 +511,10 @@ impl Handler<DeleteMessage> for QueryServerWriteV1 {
.and_then(|_| qs_write.commit(&mut audit).map(|_| OperationResponse {}))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -479,7 +523,7 @@ impl Handler<InternalDeleteMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: InternalDeleteMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("delete");
let mut audit = AuditScope::new("delete", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<InternalDeleteMessage>",
@ -502,7 +546,10 @@ impl Handler<InternalDeleteMessage> for QueryServerWriteV1 {
.and_then(|_| qs_write.commit(&mut audit).map(|_| ()))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -511,7 +558,7 @@ impl Handler<ReviveRecycledMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: ReviveRecycledMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("revive");
let mut audit = AuditScope::new("revive", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<ReviveRecycledMessage>",
@ -538,7 +585,10 @@ impl Handler<ReviveRecycledMessage> for QueryServerWriteV1 {
.and_then(|_| qs_write.commit(&mut audit).map(|_| ()))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -548,7 +598,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
type Result = Result<SetCredentialResponse, OperationError>;
fn handle(&mut self, msg: InternalCredentialSetMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_credential_set_message");
let mut audit = AuditScope::new("internal_credential_set_message", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<InternalCredentialSetMessage>",
@ -665,7 +715,10 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
}
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -674,7 +727,7 @@ impl Handler<IdmAccountSetPasswordMessage> for QueryServerWriteV1 {
type Result = Result<OperationResponse, OperationError>;
fn handle(&mut self, msg: IdmAccountSetPasswordMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_set_password");
let mut audit = AuditScope::new("idm_account_set_password", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountSetPasswordMessage>",
@ -699,7 +752,10 @@ impl Handler<IdmAccountSetPasswordMessage> for QueryServerWriteV1 {
.map(|_| OperationResponse::new(()))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -712,7 +768,7 @@ impl Handler<InternalRegenerateRadiusMessage> for QueryServerWriteV1 {
msg: InternalRegenerateRadiusMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("idm_account_regenerate_radius");
let mut audit = AuditScope::new("idm_account_regenerate_radius", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<InternalRegenerateRadiusMessage>",
@ -752,7 +808,10 @@ impl Handler<InternalRegenerateRadiusMessage> for QueryServerWriteV1 {
.and_then(|r| idms_prox_write.commit(&mut audit).map(|_| r))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -761,7 +820,7 @@ impl Handler<PurgeAttributeMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: PurgeAttributeMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("purge_attribute");
let mut audit = AuditScope::new("purge_attribute", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<PurgeAttributeMessage>",
@ -792,14 +851,17 @@ impl Handler<PurgeAttributeMessage> for QueryServerWriteV1 {
}
};
ladmin_error!(audit, "Begin modify event {:?}", mdf);
ltrace!(audit, "Begin modify event {:?}", mdf);
qs_write
.modify(&mut audit, &mdf)
.and_then(|_| qs_write.commit(&mut audit).map(|_| ()))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -808,7 +870,7 @@ impl Handler<RemoveAttributeValueMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: RemoveAttributeValueMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("remove_attribute_value");
let mut audit = AuditScope::new("remove_attribute_value", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<RemoveAttributeValueMessage>",
@ -849,7 +911,10 @@ impl Handler<RemoveAttributeValueMessage> for QueryServerWriteV1 {
.and_then(|_| qs_write.commit(&mut audit).map(|_| ()))
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -858,18 +923,19 @@ impl Handler<AppendAttributeMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: AppendAttributeMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("append_attribute");
let AppendAttributeMessage {
uat,
uuid_or_name,
attr,
values,
filter,
eventid,
} = msg;
let mut audit = AuditScope::new("append_attribute", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<AppendAttributeMessage>",
|| {
let AppendAttributeMessage {
uat,
uuid_or_name,
attr,
values,
filter,
} = msg;
// We need to turn these into proto modlists so they can be converted
// and validated.
let proto_ml = ProtoModifyList::new_list(
@ -881,7 +947,10 @@ impl Handler<AppendAttributeMessage> for QueryServerWriteV1 {
self.modify_from_parts(&mut audit, uat, uuid_or_name, proto_ml, filter)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -890,18 +959,19 @@ impl Handler<SetAttributeMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: SetAttributeMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("set_attribute");
let SetAttributeMessage {
uat,
uuid_or_name,
attr,
values,
filter,
eventid,
} = msg;
let mut audit = AuditScope::new("set_attribute", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<SetAttributeMessage>",
|| {
let SetAttributeMessage {
uat,
uuid_or_name,
attr,
values,
filter,
} = msg;
// We need to turn these into proto modlists so they can be converted
// and validated.
let proto_ml = ProtoModifyList::new_list(
@ -916,7 +986,10 @@ impl Handler<SetAttributeMessage> for QueryServerWriteV1 {
self.modify_from_parts(&mut audit, uat, uuid_or_name, proto_ml, filter)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -925,19 +998,19 @@ impl Handler<InternalSshKeyCreateMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: InternalSshKeyCreateMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_sshkey_create");
let InternalSshKeyCreateMessage {
uat,
uuid_or_name,
tag,
key,
filter,
eventid,
} = msg;
let mut audit = AuditScope::new("internal_sshkey_create", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<InternalSshKeyCreateMessage>",
|| {
let InternalSshKeyCreateMessage {
uat,
uuid_or_name,
tag,
key,
filter,
} = msg;
// Because this is from internal, we can generate a real modlist, rather
// than relying on the proto ones.
let ml = ModifyList::new_append("ssh_publickey", Value::new_sshkey(tag, key));
@ -945,7 +1018,10 @@ impl Handler<InternalSshKeyCreateMessage> for QueryServerWriteV1 {
self.modify_from_internal_parts(&mut audit, uat, uuid_or_name, ml, filter)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -958,13 +1034,16 @@ impl Handler<IdmAccountPersonExtendMessage> for QueryServerWriteV1 {
msg: IdmAccountPersonExtendMessage,
_: &mut Self::Context,
) -> Self::Result {
let mut audit = AuditScope::new("idm_account_person_extend");
let IdmAccountPersonExtendMessage {
uat,
uuid_or_name,
eventid,
} = msg;
let mut audit = AuditScope::new("idm_account_person_extend", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountPersonExtendMessage>",
|| {
let IdmAccountPersonExtendMessage { uat, uuid_or_name } = msg;
// The filter_map here means we only create the mods if the gidnumber or shell are set
// in the actual request.
let mods: Vec<_> = vec![Some(Modify::Present(
@ -982,7 +1061,10 @@ impl Handler<IdmAccountPersonExtendMessage> for QueryServerWriteV1 {
self.modify_from_internal_parts(&mut audit, uat, uuid_or_name, ml, filter)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -991,18 +1073,18 @@ impl Handler<IdmAccountUnixExtendMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: IdmAccountUnixExtendMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_unix_extend");
let IdmAccountUnixExtendMessage {
uat,
uuid_or_name,
gidnumber,
shell,
eventid,
} = msg;
let mut audit = AuditScope::new("idm_account_unix_extend", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountUnixExtendMessage>",
|| {
let IdmAccountUnixExtendMessage {
uat,
uuid_or_name,
gidnumber,
shell,
} = msg;
// The filter_map here means we only create the mods if the gidnumber or shell are set
// in the actual request.
let mods: Vec<_> = vec![
@ -1025,7 +1107,10 @@ impl Handler<IdmAccountUnixExtendMessage> for QueryServerWriteV1 {
self.modify_from_internal_parts(&mut audit, uat, uuid_or_name, ml, filter)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -1034,17 +1119,17 @@ impl Handler<IdmGroupUnixExtendMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: IdmGroupUnixExtendMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_group_unix_extend");
let IdmGroupUnixExtendMessage {
uat,
uuid_or_name,
gidnumber,
eventid,
} = msg;
let mut audit = AuditScope::new("idm_group_unix_extend", eventid);
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<IdmGroupUnixExtendMessage>",
|| {
let IdmGroupUnixExtendMessage {
uat,
uuid_or_name,
gidnumber,
} = msg;
// The filter_map here means we only create the mods if the gidnumber or shell are set
// in the actual request.
let mods: Vec<_> = vec![
@ -1066,7 +1151,10 @@ impl Handler<IdmGroupUnixExtendMessage> for QueryServerWriteV1 {
self.modify_from_internal_parts(&mut audit, uat, uuid_or_name, ml, filter)
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -1075,7 +1163,7 @@ impl Handler<IdmAccountUnixSetCredMessage> for QueryServerWriteV1 {
type Result = Result<(), OperationError>;
fn handle(&mut self, msg: IdmAccountUnixSetCredMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("idm_account_unix_set_cred");
let mut audit = AuditScope::new("idm_account_unix_set_cred", msg.eventid.clone());
let res = lperf_segment!(
&mut audit,
"actors::v1_write::handle<IdmAccountUnixSetCredMessage>",
@ -1111,7 +1199,10 @@ impl Handler<IdmAccountUnixSetCredMessage> for QueryServerWriteV1 {
.map(|_| ())
}
);
self.log.do_send(audit);
self.log.send(Some(audit)).map_err(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
OperationError::InvalidState
})?;
res
}
}
@ -1122,7 +1213,7 @@ impl Handler<PurgeTombstoneEvent> for QueryServerWriteV1 {
type Result = ();
fn handle(&mut self, msg: PurgeTombstoneEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("purge tombstones");
let mut audit = AuditScope::new("purge tombstones", msg.eventid.clone());
lperf_segment!(
&mut audit,
"actors::v1_write::handle<PurgeTombstoneEvent>",
@ -1138,7 +1229,9 @@ impl Handler<PurgeTombstoneEvent> for QueryServerWriteV1 {
}
);
// At the end of the event we send it for logging.
self.log.do_send(audit);
self.log.send(Some(audit)).unwrap_or_else(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
});
}
}
@ -1146,7 +1239,7 @@ impl Handler<PurgeRecycledEvent> for QueryServerWriteV1 {
type Result = ();
fn handle(&mut self, msg: PurgeRecycledEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("purge recycled");
let mut audit = AuditScope::new("purge recycled", msg.eventid.clone());
lperf_segment!(
&mut audit,
"actors::v1_write::handle<PurgeRecycledEvent>",
@ -1162,6 +1255,8 @@ impl Handler<PurgeRecycledEvent> for QueryServerWriteV1 {
}
);
// At the end of the event we send it for logging.
self.log.do_send(audit);
self.log.send(Some(audit)).unwrap_or_else(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
});
}
}

View file

@ -1,81 +1,37 @@
use actix::prelude::*;
use crate::audit::AuditScope;
use crossbeam::channel::Receiver;
// Helper for internal logging.
// Should only be used at startup/shutdown
#[macro_export]
macro_rules! log_event {
($log_addr:expr, $($arg:tt)*) => ({
use crate::async_log::LogEvent;
use std::fmt;
$log_addr.do_send(
LogEvent {
msg: fmt::format(
format_args!($($arg)*)
)
pub fn run(rx: Receiver<Option<AuditScope>>) {
info!("Log thread started ...");
loop {
match rx.recv() {
Ok(Some(al)) => {
al.write_log();
}
)
})
}
// We need to pass in config for this later
// Or we need to pass in the settings for it IE level and dest?
// Is there an efficent way to set a log level filter in the macros
// so that we don't msg unless it's the correct level?
// Do we need config in the log macro?
pub fn start() -> actix::Addr<EventLog> {
SyncArbiter::start(1, move || EventLog {})
}
pub struct EventLog {}
impl Actor for EventLog {
type Context = SyncContext<Self>;
/*
fn started(&mut self, ctx: &mut Self::Context) {
ctx.set_mailbox_capacity(1 << 31);
Ok(None) => {
// Prep to shutdown, finish draining.
break;
}
Err(_) => {
// we're cooked.
error!("CRITICAL: log thread is cooked.");
}
}
}
*/
}
// What messages can we be sent. Basically this is all the possible
// inputs we *could* recieve.
// Add a macro for easy msg write
pub struct LogEvent {
pub msg: String,
}
impl Message for LogEvent {
type Result = ();
}
impl Handler<LogEvent> for EventLog {
type Result = ();
fn handle(&mut self, event: LogEvent, _: &mut SyncContext<Self>) -> Self::Result {
info!("logevent: {}", event.msg);
loop {
match rx.try_recv() {
Ok(Some(al)) => {
al.write_log();
}
Ok(None) => {
// Skip this, it's a shutdown msg.
}
Err(_) => {
// we've drained.
break;
}
}
}
info!("Log thread shutdown complete.");
}
impl Handler<AuditScope> for EventLog {
type Result = ();
fn handle(&mut self, event: AuditScope, _: &mut SyncContext<Self>) -> Self::Result {
info!("{}", event);
}
}
/*
impl Handler<Event> for EventLog {
type Result = ();
fn handle(&mut self, event: Event, _: &mut SyncContext<Self>) -> Self::Result {
println!("EVENT: {:?}", event)
}
}
*/

View file

@ -31,12 +31,12 @@ impl fmt::Display for LogTag {
LogTag::AdminWarning => write!(f, "admin::warning 🚧"),
LogTag::AdminInfo => write!(f, "admin::info"),
LogTag::RequestError => write!(f, "request::error 🚨"),
LogTag::Security => write!(f, "security 🔒"),
LogTag::SecurityAccess => write!(f, "security::access 🔐"),
LogTag::Security => write!(f, "security 🔐"),
LogTag::SecurityAccess => write!(f, "security::access 🔓"),
LogTag::Filter => write!(f, "filter"),
LogTag::FilterWarning => write!(f, "filter::warning 🚧"),
LogTag::FilterError => write!(f, "filter::error 🚨"),
LogTag::Trace => write!(f, "Trace"),
LogTag::Trace => write!(f, "trace ⌦"),
}
}
}
@ -45,9 +45,11 @@ macro_rules! audit_log {
($audit:expr, $($arg:tt)*) => ({
use std::fmt;
use crate::audit::LogTag;
/*
if cfg!(test) || cfg!(debug_assertions) {
debug!($($arg)*)
error!($($arg)*)
}
*/
$audit.log_event(
LogTag::AdminError,
fmt::format(
@ -59,17 +61,21 @@ macro_rules! audit_log {
macro_rules! ltrace {
($au:expr, $($arg:tt)*) => ({
use std::fmt;
use crate::audit::LogTag;
if cfg!(test) || cfg!(debug_assertions) {
debug!($($arg)*)
}
$au.log_event(
LogTag::Trace,
fmt::format(
format_args!($($arg)*)
if log_enabled!(log::Level::Debug) {
/*
if cfg!(test) || cfg!(debug_assertions) {
error!($($arg)*)
}
*/
use std::fmt;
use crate::audit::LogTag;
$au.log_event(
LogTag::Trace,
fmt::format(
format_args!($($arg)*)
)
)
)
}
})
}
@ -125,6 +131,19 @@ macro_rules! ladmin_error {
})
}
macro_rules! ladmin_warning {
($au:expr, $($arg:tt)*) => ({
use std::fmt;
use crate::audit::LogTag;
$au.log_event(
LogTag::AdminWarning,
fmt::format(
format_args!($($arg)*)
)
)
})
}
macro_rules! ladmin_info {
($au:expr, $($arg:tt)*) => ({
use std::fmt;
@ -331,24 +350,23 @@ impl PartialEq for PerfProcessed {
* | |--> another layer
*/
impl PerfProcessed {
fn int_write_fmt(
&self,
f: &mut fmt::Formatter,
parents: usize,
uuid: &HyphenatedRef,
) -> fmt::Result {
write!(f, "[- {} perf::trace] ", uuid)?;
fn int_write_fmt(&self, parents: usize, uuid: &HyphenatedRef) {
let mut prefix = String::new();
prefix.push_str(format!("[- {} perf::trace] ", uuid).as_str());
let d = &self.duration;
let df = d.as_secs() as f64 + d.subsec_nanos() as f64 * 1e-9;
if parents > 0 {
for _i in 0..(parents - 1) {
write!(f, "| ")?;
for _i in 0..parents {
prefix.push_str("| ");
}
};
writeln!(f, "|--> {} {1:.9} {2:.3}%", self.id, df, self.percent)?;
debug!(
"{}|--> {} {2:.9} {3:.3}%",
prefix, self.id, df, self.percent
);
self.contains
.iter()
.try_for_each(|pe| pe.int_write_fmt(f, parents + 1, uuid))
.for_each(|pe| pe.int_write_fmt(parents + 1, uuid))
}
}
@ -368,37 +386,20 @@ pub struct AuditScope {
active_perf: Option<&'static mut PerfEvent>,
}
// unsafe impl Sync for AuditScope {}
// Allow us to be sent to the log subsystem
impl Message for AuditScope {
type Result = ();
}
impl fmt::Display for AuditScope {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let uuid_ref = self.uuid.to_hyphenated_ref();
self.events
.iter()
.try_for_each(|e| writeln!(f, "[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data))?;
// First, we pre-process all the perf events to order them
let mut proc_perf: Vec<_> = self.perf.iter().map(|pe| pe.process()).collect();
// We still sort them by duration.
proc_perf.sort_unstable();
// Now write the perf events
proc_perf
.iter()
.try_for_each(|pe| pe.int_write_fmt(f, 0, &uuid_ref))
}
}
impl AuditScope {
pub fn new(name: &str) -> Self {
pub fn new(name: &str, eventid: Uuid) -> Self {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
AuditScope {
uuid: Uuid::new_v4(),
uuid: eventid,
events: vec![AuditLog {
time: datetime.to_rfc3339(),
tag: LogTag::AdminInfo,
@ -409,8 +410,31 @@ impl AuditScope {
}
}
pub fn get_uuid(&self) -> &Uuid {
&self.uuid
pub fn write_log(self) {
let uuid_ref = self.uuid.to_hyphenated_ref();
self.events.iter().for_each(|e| match e.tag {
LogTag::AdminError | LogTag::RequestError | LogTag::FilterError => {
error!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data)
}
LogTag::AdminWarning
| LogTag::Security
| LogTag::SecurityAccess
| LogTag::FilterWarning => warn!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data),
LogTag::AdminInfo | LogTag::Filter => {
info!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data)
}
LogTag::Trace => debug!("[{} {} {}] {}", e.time, uuid_ref, e.tag, e.data),
});
// First, we pre-process all the perf events to order them
let mut proc_perf: Vec<_> = self.perf.iter().map(|pe| pe.process()).collect();
// We still sort them by duration.
proc_perf.sort_unstable();
// Now write the perf events
proc_perf
.iter()
.for_each(|pe| pe.int_write_fmt(0, &uuid_ref))
}
pub fn log_event(&mut self, tag: LogTag, data: String) {
@ -484,8 +508,8 @@ mod tests {
// Create and remove. Perhaps add some core details?
#[test]
fn test_audit_simple() {
let au = AuditScope::new("au");
let au = AuditScope::new("au", uuid::Uuid::new_v4());
let d = serde_json::to_string_pretty(&au).expect("Json serialise failure");
println!("{}", d);
debug!("{}", d);
}
}

View file

@ -80,16 +80,19 @@ macro_rules! get_identry {
}
});
// Now, get anything from nidl that is needed.
let mut db_result = $self.db.get_identry($au, &IDL::Partial(nidl))?;
if !nidl.is_empty() {
ladmin_warning!($au, "idl_arc_sqlite cache miss detected - if this occurs frequently you SHOULD adjust your cache tuning.");
// Now, get anything from nidl that is needed.
let mut db_result = $self.db.get_identry($au, &IDL::Partial(nidl))?;
// Clone everything from db_result into the cache.
db_result.iter().for_each(|e| {
$self.entry_cache.insert(e.get_id(), Box::new(e.clone()));
});
// Clone everything from db_result into the cache.
db_result.iter().for_each(|e| {
$self.entry_cache.insert(e.get_id(), Box::new(e.clone()));
});
// Merge the two vecs
result.append(&mut db_result);
// Merge the two vecs
result.append(&mut db_result);
}
// Return
Ok(result)
@ -142,7 +145,7 @@ macro_rules! get_idl {
let cache_r = $self.idl_cache.get(&cache_key);
// If hit, continue.
if let Some(ref data) = cache_r {
lfilter!(
ltrace!(
$audit,
"Got cached idl for index {:?} {:?} -> {}",
$itype,
@ -500,4 +503,13 @@ impl IdlArcSqlite {
idl_cache: idl_cache_write,
}
}
/*
pub fn stats_audit(&self, audit: &mut AuditScope) {
let entry_stats = self.entry_cache.view_stats();
let idl_stats = self.idl_cache.view_stats();
ladmin_info!(audit, "entry_cache stats -> {:?}", *entry_stats);
ladmin_info!(audit, "idl_cache stats -> {:?}", *idl_stats);
}
*/
}

View file

@ -239,7 +239,7 @@ pub trait IdlSqliteTransaction {
// have a corrupted index .....
None => IDLBitRange::new(),
};
lfilter!(audit, "Got idl for index {:?} {:?} -> {}", itype, attr, idl);
ltrace!(audit, "Got idl for index {:?} {:?} -> {}", itype, attr, idl);
Ok(Some(idl))
})
@ -316,25 +316,22 @@ pub trait IdlSqliteTransaction {
// Allow this as it actually extends the life of stmt
let r = match stmt.query(NO_PARAMS) {
Ok(mut rows) => {
match rows.next() {
Ok(Some(v)) => {
// println!("{:?}", v.column_names());
let r: Result<String, _> = v.get(0);
match r {
Ok(t) => {
if t == "ok" {
Vec::new()
} else {
vec![Err(ConsistencyError::SqliteIntegrityFailure)]
}
Ok(mut rows) => match rows.next() {
Ok(Some(v)) => {
let r: Result<String, _> = v.get(0);
match r {
Ok(t) => {
if t == "ok" {
Vec::new()
} else {
vec![Err(ConsistencyError::SqliteIntegrityFailure)]
}
Err(_) => vec![Err(ConsistencyError::SqliteIntegrityFailure)],
}
Err(_) => vec![Err(ConsistencyError::SqliteIntegrityFailure)],
}
_ => vec![Err(ConsistencyError::SqliteIntegrityFailure)],
}
}
_ => vec![Err(ConsistencyError::SqliteIntegrityFailure)],
},
Err(_) => vec![Err(ConsistencyError::SqliteIntegrityFailure)],
};
r
@ -351,7 +348,6 @@ impl Drop for IdlSqliteReadTransaction {
// Abort - so far this has proven reliable to use drop here.
fn drop(self: &mut Self) {
if !self.committed {
debug!("Aborting BE RO txn");
self.conn
.execute("ROLLBACK TRANSACTION", NO_PARAMS)
// We can't do this without expect.
@ -365,7 +361,7 @@ impl Drop for IdlSqliteReadTransaction {
impl IdlSqliteReadTransaction {
pub fn new(conn: r2d2::PooledConnection<SqliteConnectionManager>) -> Self {
// Start the transaction
debug!("Starting BE RO txn ...");
//
// I'm happy for this to be an expect, because this is a huge failure
// of the server ... but if it happens a lot we should consider making
// this a Result<>
@ -390,7 +386,6 @@ impl Drop for IdlSqliteWriteTransaction {
// Abort
fn drop(self: &mut Self) {
if !self.committed {
debug!("Aborting BE WR txn");
self.conn
.execute("ROLLBACK TRANSACTION", NO_PARAMS)
.expect("Unable to rollback transaction! Can not proceed!!!");
@ -401,7 +396,6 @@ impl Drop for IdlSqliteWriteTransaction {
impl IdlSqliteWriteTransaction {
pub fn new(conn: r2d2::PooledConnection<SqliteConnectionManager>) -> Self {
// Start the transaction
debug!("Starting BE WR txn ...");
conn.execute("BEGIN TRANSACTION", NO_PARAMS)
.expect("Unable to begin transaction!");
IdlSqliteWriteTransaction {
@ -412,7 +406,7 @@ impl IdlSqliteWriteTransaction {
pub fn commit(mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
lperf_segment!(audit, "be::idl_sqlite::commit", || {
ltrace!(audit, "Commiting BE txn");
// ltrace!(audit, "Commiting BE WR txn");
assert!(!self.committed);
self.committed = true;
@ -420,7 +414,7 @@ impl IdlSqliteWriteTransaction {
.execute("COMMIT TRANSACTION", NO_PARAMS)
.map(|_| ())
.map_err(|e| {
println!("{:?}", e);
ladmin_error!(audit, "CRITICAL: failed to commit sqlite txn -> {:?}", e);
OperationError::BackendEngine
})
})
@ -510,12 +504,13 @@ impl IdlSqliteWriteTransaction {
I: Iterator<Item = u64>,
{
lperf_segment!(au, "be::idl_sqlite::delete_identry", || {
let mut stmt = try_audit!(
au,
self.conn.prepare("DELETE FROM id2entry WHERE id = :id"),
"SQLite Error {:?}",
OperationError::SQLiteError
);
let mut stmt = self
.conn
.prepare("DELETE FROM id2entry WHERE id = :id")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
idl.try_for_each(|id| {
let iid: i64 = id
@ -531,9 +526,10 @@ impl IdlSqliteWriteTransaction {
debug_assert!(iid > 0);
stmt.execute(&[&iid])
.map(|_| ())
.map_err(|_| OperationError::SQLiteError)
stmt.execute(&[&iid]).map(|_| ()).map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
})
})
}
@ -709,7 +705,7 @@ impl IdlSqliteWriteTransaction {
)
.map(|_| ())
.map_err(|e| {
debug!("rusqlite error {:?}", e);
error!("rusqlite error {:?}", e);
OperationError::SQLiteError
})
@ -725,7 +721,7 @@ impl IdlSqliteWriteTransaction {
)
.map(|_| ())
.map_err(|e| {
debug!("rusqlite error {:?}", e);
error!("rusqlite error {:?}", e);
OperationError::SQLiteError
})
@ -762,7 +758,7 @@ impl IdlSqliteWriteTransaction {
pub(crate) fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> {
self.set_db_version_key(DBV_INDEXV, v).map_err(|e| {
debug!("sqlite error {:?}", e);
error!("sqlite error {:?}", e);
OperationError::SQLiteError
})
}
@ -926,7 +922,7 @@ mod tests {
#[test]
fn test_idl_sqlite_verify() {
let mut audit = AuditScope::new("run_test");
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let be = IdlSqlite::new(&mut audit, "", 1).unwrap();
let be_w = be.write();
let r = be_w.verify();

View file

@ -8,7 +8,7 @@ use std::sync::Arc;
use crate::audit::AuditScope;
use crate::be::dbentry::DbEntry;
use crate::entry::{Entry, EntryCommitted, EntryNew, EntrySealed};
use crate::filter::{Filter, FilterResolved, FilterValidResolved};
use crate::filter::{Filter, FilterPlan, FilterResolved, FilterValidResolved};
use idlset::AndNot;
use idlset::IDLBitRange;
use kanidm_proto::v1::{ConsistencyError, OperationError};
@ -69,14 +69,14 @@ pub trait BackendTransaction {
type IdlLayerType: IdlArcSqliteTransaction;
fn get_idlayer(&mut self) -> &mut Self::IdlLayerType;
/// Recursively apply a filter, transforming into IDL's on the way.
/// Recursively apply a filter, transforming into IDL's on the way. This builds a query
/// execution log, so that it can be examined how an operation proceeded.
fn filter2idl(
&mut self,
au: &mut AuditScope,
filt: &FilterResolved,
thres: usize,
) -> Result<IDL, OperationError> {
// debug!("testing filter -> {:?}", filt);
) -> Result<(IDL, FilterPlan), OperationError> {
let fr = Ok(match filt {
FilterResolved::Eq(attr, value, idx) => {
if *idx {
@ -87,12 +87,15 @@ pub trait BackendTransaction {
.get_idlayer()
.get_idl(au, attr, &IndexType::EQUALITY, &idx_key)?
{
Some(idl) => IDL::Indexed(idl),
None => IDL::ALLIDS,
Some(idl) => (
IDL::Indexed(idl),
FilterPlan::EqIndexed(attr.to_string(), idx_key),
),
None => (IDL::ALLIDS, FilterPlan::EqCorrupt(attr.to_string())),
}
} else {
// Schema believes this is not indexed
IDL::ALLIDS
(IDL::ALLIDS, FilterPlan::EqUnindexed(attr.to_string()))
}
}
FilterResolved::Sub(attr, subvalue, idx) => {
@ -104,12 +107,15 @@ pub trait BackendTransaction {
.get_idlayer()
.get_idl(au, attr, &IndexType::SUBSTRING, &idx_key)?
{
Some(idl) => IDL::Indexed(idl),
None => IDL::ALLIDS,
Some(idl) => (
IDL::Indexed(idl),
FilterPlan::SubIndexed(attr.to_string(), idx_key),
),
None => (IDL::ALLIDS, FilterPlan::SubCorrupt(attr.to_string())),
}
} else {
// Schema believes this is not indexed
IDL::ALLIDS
(IDL::ALLIDS, FilterPlan::SubUnindexed(attr.to_string()))
}
}
FilterResolved::Pres(attr, idx) => {
@ -121,21 +127,22 @@ pub trait BackendTransaction {
&IndexType::PRESENCE,
&"_".to_string(),
)? {
Some(idl) => IDL::Indexed(idl),
None => IDL::ALLIDS,
Some(idl) => (IDL::Indexed(idl), FilterPlan::PresIndexed(attr.to_string())),
None => (IDL::ALLIDS, FilterPlan::PresCorrupt(attr.to_string())),
}
} else {
// Schema believes this is not indexed
IDL::ALLIDS
(IDL::ALLIDS, FilterPlan::PresUnindexed(attr.to_string()))
}
}
FilterResolved::LessThan(_attr, _subvalue, _idx) => {
FilterResolved::LessThan(attr, _subvalue, _idx) => {
// We have no process for indexing this right now.
IDL::ALLIDS
(IDL::ALLIDS, FilterPlan::LessThanUnindexed(attr.to_string()))
}
FilterResolved::Or(l) => {
// Importantly if this has no inner elements, this returns
// an empty list.
let mut plan = Vec::new();
let mut result = IDLBitRange::new();
let mut partial = false;
let mut threshold = false;
@ -143,54 +150,72 @@ pub trait BackendTransaction {
for f in l.iter() {
// get their idls
match self.filter2idl(au, f, thres)? {
IDL::Indexed(idl) => {
(IDL::Indexed(idl), fp) => {
plan.push(fp);
// now union them (if possible)
result = result | idl;
}
IDL::Partial(idl) => {
(IDL::Partial(idl), fp) => {
plan.push(fp);
// now union them (if possible)
result = result | idl;
partial = true;
}
IDL::PartialThreshold(idl) => {
(IDL::PartialThreshold(idl), fp) => {
plan.push(fp);
// now union them (if possible)
result = result | idl;
partial = true;
threshold = true;
}
IDL::ALLIDS => {
(IDL::ALLIDS, fp) => {
plan.push(fp);
// If we find anything unindexed, the whole term is unindexed.
lfilter_error!(au, "Term {:?} is ALLIDS, shortcut return", f);
return Ok(IDL::ALLIDS);
let setplan = FilterPlan::OrUnindexed(plan);
return Ok((IDL::ALLIDS, setplan));
}
}
} // end or.iter()
// If we got here, every term must have been indexed or partial indexed.
if partial {
if threshold {
IDL::Partial(result)
let setplan = FilterPlan::OrPartialThreshold(plan);
(IDL::PartialThreshold(result), setplan)
} else {
IDL::PartialThreshold(result)
let setplan = FilterPlan::OrPartial(plan);
(IDL::Partial(result), setplan)
}
} else {
IDL::Indexed(result)
let setplan = FilterPlan::OrIndexed(plan);
(IDL::Indexed(result), setplan)
}
}
FilterResolved::And(l) => {
// This algorithm is a little annoying. I couldn't get it to work with iter and
// folds due to the logic needed ...
// First, setup the two filter lists.
let (f_andnot, mut f_rem): (Vec<_>, Vec<_>) = l.iter().partition(|f| f.is_andnot());
// First, setup the two filter lists. We always apply AndNot after positive
// and terms.
let (f_andnot, f_rem): (Vec<_>, Vec<_>) = l.iter().partition(|f| f.is_andnot());
// We make this an iter, so everything comes off in order. Using pop means we
// pull from the tail, which is the WORST item to start with!
let mut f_rem_iter = f_rem.iter();
// Setup the initial result.
let mut cand_idl = match f_rem.pop() {
let (mut cand_idl, fp) = match f_rem_iter.next() {
Some(f) => self.filter2idl(au, f, thres)?,
None => {
lfilter_error!(au, "WARNING: And filter was empty, or contains only AndNot, can not evaluate.");
return Ok(IDL::Indexed(IDLBitRange::new()));
return Ok((IDL::Indexed(IDLBitRange::new()), FilterPlan::Invalid));
}
};
// Setup the query plan tracker
let mut plan = Vec::new();
plan.push(fp);
match &cand_idl {
IDL::Indexed(idl) | IDL::Partial(idl) | IDL::PartialThreshold(idl) => {
// When below thres, we have to return partials to trigger the entry_no_match_filter check.
@ -201,14 +226,26 @@ pub trait BackendTransaction {
au,
"NOTICE: Cand set shorter than threshold, early return"
);
return Ok(IDL::PartialThreshold(idl.clone()));
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(idl.clone()), setplan));
} else if idl.len() == 0 {
// Regardless of the input state, if it's empty, this can never
// be satisfied, so return we are indexed and complete.
lfilter_warning!(
au,
"NOTICE: empty candidate set, shortcutting return."
);
let setplan = FilterPlan::AndEmptyCand(plan);
return Ok((IDL::Indexed(IDLBitRange::new()), setplan));
}
}
IDL::ALLIDS => {}
}
for f in f_rem.iter() {
let inter = self.filter2idl(au, f, thres)?;
// Now, for all remaining,
for f in f_rem_iter {
let (inter, fp) = self.filter2idl(au, f, thres)?;
plan.push(fp);
cand_idl = match (cand_idl, inter) {
(IDL::Indexed(ia), IDL::Indexed(ib)) => {
let r = ia & ib;
@ -218,7 +255,17 @@ pub trait BackendTransaction {
au,
"NOTICE: Cand set shorter than threshold, early return"
);
return Ok(IDL::PartialThreshold(r));
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(r), setplan));
} else if r.len() == 0 {
// Regardless of the input state, if it's empty, this can never
// be satisfied, so return we are indexed and complete.
lfilter_warning!(
au,
"NOTICE: empty candidate set, shortcutting return."
);
let setplan = FilterPlan::AndEmptyCand(plan);
return Ok((IDL::Indexed(IDLBitRange::new()), setplan));
} else {
IDL::Indexed(r)
}
@ -233,7 +280,8 @@ pub trait BackendTransaction {
au,
"NOTICE: Cand set shorter than threshold, early return"
);
return Ok(IDL::PartialThreshold(r));
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(r), setplan));
} else {
IDL::Partial(r)
}
@ -250,7 +298,8 @@ pub trait BackendTransaction {
au,
"NOTICE: Cand set shorter than threshold, early return"
);
return Ok(IDL::PartialThreshold(r));
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(r), setplan));
} else {
IDL::PartialThreshold(r)
}
@ -278,7 +327,9 @@ pub trait BackendTransaction {
return Err(OperationError::InvalidState);
}
};
let inter = self.filter2idl(au, f_in, thres)?;
let (inter, fp) = self.filter2idl(au, f_in, thres)?;
// It's an and not, so we need to wrap the plan accordingly.
plan.push(FilterPlan::AndNot(Box::new(fp)));
cand_idl = match (cand_idl, inter) {
(IDL::Indexed(ia), IDL::Indexed(ib)) => {
let r = ia.andnot(ib);
@ -305,7 +356,8 @@ pub trait BackendTransaction {
au,
"NOTICE: Cand set shorter than threshold, early return"
);
return Ok(IDL::PartialThreshold(r));
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(r), setplan));
} else {
IDL::Partial(r)
}
@ -323,7 +375,8 @@ pub trait BackendTransaction {
au,
"NOTICE: Cand set shorter than threshold, early return"
);
return Ok(IDL::PartialThreshold(r));
let setplan = FilterPlan::AndPartialThreshold(plan);
return Ok((IDL::PartialThreshold(r), setplan));
} else {
IDL::PartialThreshold(r)
}
@ -344,9 +397,16 @@ pub trait BackendTransaction {
};
}
// What state is the final cand idl in?
let setplan = match cand_idl {
IDL::Indexed(_) => FilterPlan::AndIndexed(plan),
IDL::Partial(_) | IDL::PartialThreshold(_) => FilterPlan::AndPartial(plan),
IDL::ALLIDS => FilterPlan::AndUnindexed(plan),
};
// Finally, return the result.
// debug!("final cand set ==> {:?}", cand_idl);
cand_idl
(cand_idl, setplan)
} // end and
// So why does this return empty? Normally we actually process an AndNot in the context
// of an "AND" query, but if it's used anywhere else IE the root filter, then there is
@ -357,9 +417,9 @@ pub trait BackendTransaction {
// now do andnot?
lfilter_error!(
au,
"WARNING: Requested a top level or isolated AndNot, returning empty"
"ERROR: Requested a top level or isolated AndNot, returning empty"
);
IDL::Indexed(IDLBitRange::new())
(IDL::Indexed(IDLBitRange::new()), FilterPlan::Invalid)
}
});
// debug!("result of {:?} -> {:?}", filt, fr);
@ -377,15 +437,18 @@ pub trait BackendTransaction {
// to the in-memory filter test and be done.
lperf_segment!(au, "be::search", || {
// Do a final optimise of the filter
lfilter!(au, "filter unoptimised form --> {:?}", filt);
let filt = filt.optimise();
lfilter!(au, "filter optimised to --> {:?}", filt);
// Using the indexes, resolve the IDL here, or ALLIDS.
// Also get if the filter was 100% resolved or not.
let idl = lperf_segment!(au, "be::search -> filter2idl", || {
let (idl, fplan) = lperf_segment!(au, "be::search -> filter2idl", || {
self.filter2idl(au, filt.to_inner(), FILTER_SEARCH_TEST_THRESHOLD)
})?;
lfilter!(au, "filter executed plan -> {:?}", fplan);
let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
// Do other things
// Now, de-serialise the raw_entries back to entries, and populate their ID's
@ -455,15 +518,18 @@ pub trait BackendTransaction {
) -> Result<bool, OperationError> {
lperf_segment!(au, "be::exists", || {
// Do a final optimise of the filter
lfilter!(au, "filter unoptimised form --> {:?}", filt);
let filt = filt.optimise();
lfilter!(au, "filter optimised to --> {:?}", filt);
// Using the indexes, resolve the IDL here, or ALLIDS.
// Also get if the filter was 100% resolved or not.
let idl = lperf_segment!(au, "be::exists -> filter2idl", || {
let (idl, fplan) = lperf_segment!(au, "be::exists -> filter2idl", || {
self.filter2idl(au, filt.to_inner(), FILTER_EXISTS_TEST_THRESHOLD)
})?;
lfilter!(au, "filter executed plan -> {:?}", fplan);
// Now, check the idl -- if it's fully resolved, we can skip this because the query
// was fully indexed.
match &idl {
@ -1085,9 +1151,13 @@ mod tests {
($test_fn:expr) => {{
use env_logger;
::std::env::set_var("RUST_LOG", "kanidm=debug");
let _ = env_logger::builder().is_test(true).try_init();
let _ = env_logger::builder()
.format_timestamp(None)
.format_level(false)
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test");
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let be = Backend::new(&mut audit, "", 1).expect("Failed to setup backend");
@ -1106,7 +1176,7 @@ mod tests {
let r = $test_fn(&mut audit, &mut be_txn);
// Commit, to guarantee it worked.
assert!(be_txn.commit(&mut audit).is_ok());
println!("{}", audit);
audit.write_log();
r
}};
}
@ -1415,7 +1485,7 @@ mod tests {
assert!(missing.len() == 7);
assert!(be.reindex(audit).is_ok());
let missing = be.missing_idxs(audit).unwrap();
println!("{:?}", missing);
debug!("{:?}", missing);
assert!(missing.is_empty());
});
}
@ -1444,7 +1514,7 @@ mod tests {
assert!(missing.len() == 7);
assert!(be.reindex(audit).is_ok());
let missing = be.missing_idxs(audit).unwrap();
println!("{:?}", missing);
debug!("{:?}", missing);
assert!(missing.is_empty());
// check name and uuid ids on eq, sub, pres
@ -1801,7 +1871,7 @@ mod tests {
let f_un =
unsafe { filter_resolved!(f_eq("no-index", PartialValue::new_utf8s("william"))) };
let r = be.filter2idl(audit, f_un.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_un.to_inner(), 0).unwrap();
match r {
IDL::ALLIDS => {}
_ => {
@ -1813,7 +1883,7 @@ mod tests {
let f_eq =
unsafe { filter_resolved!(f_eq("name", PartialValue::new_utf8s("william"))) };
let r = be.filter2idl(audit, f_eq.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_eq.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![1]));
@ -1835,7 +1905,7 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_in_and.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_in_and.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![1]));
@ -1860,7 +1930,7 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_p1.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_p1.to_inner(), 0).unwrap();
match r {
IDL::Partial(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![1]));
@ -1870,7 +1940,7 @@ mod tests {
}
}
let r = be.filter2idl(audit, f_p2.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_p2.to_inner(), 0).unwrap();
match r {
IDL::Partial(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![1]));
@ -1888,7 +1958,7 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_no_and.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_no_and.to_inner(), 0).unwrap();
match r {
IDL::ALLIDS => {}
_ => {
@ -1901,7 +1971,7 @@ mod tests {
filter_resolved!(f_or!([f_eq("name", PartialValue::new_utf8s("william"))]))
};
let r = be.filter2idl(audit, f_in_or.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_in_or.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![1]));
@ -1918,7 +1988,7 @@ mod tests {
)]))
};
let r = be.filter2idl(audit, f_un_or.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_un_or.to_inner(), 0).unwrap();
match r {
IDL::ALLIDS => {}
_ => {
@ -1931,7 +2001,7 @@ mod tests {
filter_resolved!(f_andnot(f_eq("name", PartialValue::new_utf8s("william"))))
};
let r = be.filter2idl(audit, f_r_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_r_andnot.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(Vec::new()));
@ -1949,7 +2019,7 @@ mod tests {
))]))
};
let r = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(Vec::new()));
@ -1966,7 +2036,7 @@ mod tests {
))]))
};
let r = be.filter2idl(audit, f_or_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_or_andnot.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(Vec::new()));
@ -1984,10 +2054,10 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
println!("{:?}", idl);
debug!("{:?}", idl);
assert!(idl == IDLBitRange::from_iter(vec![1]));
}
_ => {
@ -2002,7 +2072,7 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![1]));
@ -2019,7 +2089,7 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
match r {
IDL::ALLIDS => {}
_ => {
@ -2034,7 +2104,7 @@ mod tests {
]))
};
let r = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_and_andnot.to_inner(), 0).unwrap();
match r {
IDL::ALLIDS => {}
_ => {
@ -2045,7 +2115,7 @@ mod tests {
// empty or
let f_e_or = unsafe { filter_resolved!(f_or!([])) };
let r = be.filter2idl(audit, f_e_or.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_e_or.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![]));
@ -2057,7 +2127,7 @@ mod tests {
let f_e_and = unsafe { filter_resolved!(f_and!([])) };
let r = be.filter2idl(audit, f_e_and.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_e_and.to_inner(), 0).unwrap();
match r {
IDL::Indexed(idl) => {
assert!(idl == IDLBitRange::from_iter(vec![]));
@ -2079,7 +2149,7 @@ mod tests {
let f_eq =
unsafe { filter_resolved!(f_eq("name", PartialValue::new_utf8s("william"))) };
let r = be.filter2idl(audit, f_eq.to_inner(), 0).unwrap();
let (r, _plan) = be.filter2idl(audit, f_eq.to_inner(), 0).unwrap();
match r {
IDL::ALLIDS => {}
_ => {

View file

@ -15,10 +15,10 @@ pub const JSON_IDM_ACP_XX_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff0000XX"],
"description": ["Builtin IDM Control for xx"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-0000000000XX\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-0000000000XX\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"attr\",\"value\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"attr\",\"value\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
@ -49,10 +49,10 @@ pub const JSON_IDM_ADMINS_ACP_RECYCLE_SEARCH_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000002"],
"description": ["Builtin IDM admin recycle bin search permission."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000019\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000019\"]}"
],
"acp_targetscope": [
"{\"Eq\": [\"class\", \"recycled\"]}"
"{\"eq\": [\"class\", \"recycled\"]}"
],
"acp_search_attr": ["name", "class", "uuid", "last_modified_cid"]
}
@ -65,10 +65,10 @@ pub const JSON_IDM_ADMINS_ACP_REVIVE_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000003"],
"description": ["Builtin IDM Administrators Access Controls."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000019\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000019\"]}"
],
"acp_targetscope": [
"{\"Eq\":[\"class\",\"recycled\"]}"
"{\"eq\":[\"class\",\"recycled\"]}"
],
"acp_modify_removedattr": ["class"],
"acp_modify_class": ["recycled"]
@ -82,10 +82,10 @@ pub const JSON_IDM_SELF_ACP_READ_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000004"],
"description": ["Builtin IDM Control for self read - required for whoami and many other functions."],
"acp_receiver": [
"{\"And\": [\"Self\", {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [\"self\", {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_targetscope": [
"\"Self\""
"\"self\""
],
"acp_search_attr": [
"name",
@ -109,10 +109,10 @@ pub const JSON_IDM_SELF_ACP_WRITE_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000021"],
"description": ["Builtin IDM Control for self write - required for people to update their own identities and credentials in line with best practices."],
"acp_receiver": [
"{\"And\": [\"Self\", {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}, {\"Eq\": [\"uuid\", \"00000000-0000-0000-0000-ffffffffffff\"]}]}}]}"
"{\"and\": [\"self\", {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}, {\"eq\": [\"uuid\", \"00000000-0000-0000-0000-ffffffffffff\"]}]}}]}"
],
"acp_targetscope": [
"\"Self\""
"\"self\""
],
"acp_modify_removedattr": [
"name", "displayname", "legalname", "radius_secret", "primary_credential", "ssh_publickey", "unix_password"
@ -131,10 +131,10 @@ pub const JSON_IDM_ALL_ACP_READ_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000006"],
"description": ["Builtin IDM Control for all read - IE anonymous and all authenticated accounts."],
"acp_receiver": [
"{\"Pres\":\"class\"}"
"{\"pres\":\"class\"}"
],
"acp_targetscope": [
"{\"And\": [{\"Pres\": \"class\"}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"pres\": \"class\"}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"name",
@ -163,10 +163,10 @@ pub const JSON_IDM_ACP_PEOPLE_READ_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000007"],
"description": ["Builtin IDM Control for reading personal sensitive data."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000002\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000002\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"name", "displayname", "legalname", "mail"
@ -185,10 +185,10 @@ pub const JSON_IDM_ACP_PEOPLE_WRITE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000008"],
"description": ["Builtin IDM Control for managing personal and sensitive data."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000003\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000003\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"person\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"person\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_modify_removedattr": [
"name", "displayname", "legalname", "mail"
@ -211,10 +211,10 @@ pub const JSON_IDM_ACP_PEOPLE_MANAGE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000013"],
"description": ["Builtin IDM Control for creating person (user) accounts"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000013\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000013\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"Eq\": [\"class\",\"person\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"eq\": [\"class\",\"person\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_create_attr": [
"class",
@ -245,10 +245,10 @@ pub const JSON_IDM_ACP_PEOPLE_ACCOUNT_PASSWORD_IMPORT_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000031"],
"description": ["Builtin IDM Control for allowing imports of passwords to people+account types."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000023\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000023\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"person\"]}, {\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"person\"]}, {\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_modify_removedattr": [
"password_import"
@ -271,10 +271,10 @@ pub const JSON_IDM_ACP_PEOPLE_EXTEND_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000032"],
"description": ["Builtin IDM Control for allowing person class extension"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000024\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000024\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_modify_presentattr": [
"class"
@ -298,10 +298,10 @@ pub const JSON_IDM_ACP_GROUP_WRITE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000009"],
"description": ["Builtin IDM Control for managing groups"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000004\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000004\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"group\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"group\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class", "name", "spn", "uuid", "description", "member"
@ -326,10 +326,10 @@ pub const JSON_IDM_ACP_ACCOUNT_READ_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000010"],
"description": ["Builtin IDM Control for accounts."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000005\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000005\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class", "name", "spn", "uuid", "displayname", "ssh_publickey", "primary_credential", "memberof", "mail", "gidnumber"
@ -348,10 +348,10 @@ pub const JSON_IDM_ACP_ACCOUNT_WRITE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000011"],
"description": ["Builtin IDM Control for managing accounts."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000006\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000006\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_modify_removedattr": [
"name", "displayname", "ssh_publickey", "primary_credential", "mail"
@ -374,10 +374,10 @@ pub const JSON_IDM_ACP_ACCOUNT_MANAGE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000012"],
"description": ["Builtin IDM Control for creating and deleting (service) accounts"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000014\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000014\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_create_attr": [
"class",
@ -407,10 +407,10 @@ pub const JSON_IDM_ACP_RADIUS_SERVERS_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000014"],
"description": ["Builtin IDM Control for RADIUS servers to read credentials and other needed details."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000007\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000007\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Pres\": \"class\"}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"pres\": \"class\"}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"name", "spn", "uuid", "radius_secret"
@ -429,10 +429,10 @@ pub const JSON_IDM_ACP_HP_ACCOUNT_READ_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000015"],
"description": ["Builtin IDM Control for reading high privilege accounts."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000009\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000009\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class", "name", "spn", "uuid", "displayname", "ssh_publickey", "primary_credential", "memberof"
@ -451,10 +451,10 @@ pub const JSON_IDM_ACP_HP_ACCOUNT_WRITE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000016"],
"description": ["Builtin IDM Control for managing high privilege accounts."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000009\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000009\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_modify_removedattr": [
"name", "displayname", "ssh_publickey", "primary_credential"
@ -478,10 +478,10 @@ pub const JSON_IDM_ACP_HP_GROUP_WRITE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000017"],
"description": ["Builtin IDM Control for managing high privilege groups"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000012\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000012\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"group\"]}, {\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"group\"]}, {\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class", "name", "uuid", "description", "member"
@ -509,10 +509,10 @@ pub const JSON_IDM_ACP_SCHEMA_WRITE_ATTRS_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000018"],
"description": ["Builtin IDM Control for management of schema attributes."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000010\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000010\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"attributetype\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"attributetype\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class",
@ -570,10 +570,10 @@ pub const JSON_IDM_ACP_ACP_MANAGE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000019"],
"description": ["Builtin IDM Control for access profiles management."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000011\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000011\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"access_control_profile\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"access_control_profile\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"name",
@ -661,10 +661,10 @@ pub const JSON_IDM_ACP_SCHEMA_WRITE_CLASSES_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000020"],
"description": ["Builtin IDM Control for management of schema classes."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000010\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000010\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"classtype\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"classtype\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class",
@ -718,10 +718,10 @@ pub const JSON_IDM_ACP_GROUP_MANAGE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000022"],
"description": ["Builtin IDM Control for creating and deleting groups in the directory"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000015\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000015\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"group\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"group\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_create_attr": [
"class",
@ -748,10 +748,10 @@ pub const JSON_IDM_ACP_HP_ACCOUNT_MANAGE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000023"],
"description": ["Builtin IDM Control for creating and deleting hp and regular (service) accounts"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000016\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000016\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_create_attr": [
"class",
@ -780,10 +780,10 @@ pub const JSON_IDM_ACP_HP_GROUP_MANAGE_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000024"],
"description": ["Builtin IDM Control for creating and deleting hp and regular groups in the directory"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000017\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000017\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"group\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"group\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_create_attr": [
"class",
@ -810,10 +810,10 @@ pub const JSON_IDM_ACP_DOMAIN_ADMIN_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000026"],
"description": ["Builtin IDM Control for granting domain info administration locally"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000020\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000020\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"uuid\",\"00000000-0000-0000-0000-ffffff000025\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"uuid\",\"00000000-0000-0000-0000-ffffff000025\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"name",
@ -844,10 +844,10 @@ pub const JSON_IDM_ACP_SYSTEM_CONFIG_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000028"],
"description": ["Builtin IDM Control for granting system configuration rights"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000019\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000019\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"uuid\",\"00000000-0000-0000-0000-ffffff000027\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"uuid\",\"00000000-0000-0000-0000-ffffff000027\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"name",
@ -874,10 +874,10 @@ pub const JSON_IDM_ACP_ACCOUNT_UNIX_EXTEND_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000029"],
"description": ["Builtin IDM Control for managing accounts."],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000021\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000021\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"account\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"account\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class", "name", "spn", "uuid", "description", "gidnumber", "loginshell", "unix_password"
@ -904,10 +904,10 @@ pub const JSON_IDM_ACP_GROUP_UNIX_EXTEND_PRIV_V1: &str = r#"{
"uuid": ["00000000-0000-0000-0000-ffffff000030"],
"description": ["Builtin IDM Control for managing and extending unix groups"],
"acp_receiver": [
"{\"Eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000022\"]}"
"{\"eq\":[\"memberof\",\"00000000-0000-0000-0000-000000000022\"]}"
],
"acp_targetscope": [
"{\"And\": [{\"Eq\": [\"class\",\"group\"]}, {\"AndNot\": {\"Or\": [{\"Eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"Eq\": [\"class\", \"tombstone\"]}, {\"Eq\": [\"class\", \"recycled\"]}]}}]}"
"{\"and\": [{\"eq\": [\"class\",\"group\"]}, {\"andnot\": {\"or\": [{\"eq\": [\"memberof\",\"00000000-0000-0000-0000-000000001000\"]}, {\"eq\": [\"class\", \"tombstone\"]}, {\"eq\": [\"class\", \"recycled\"]}]}}]}"
],
"acp_search_attr": [
"class", "name", "spn", "uuid", "description", "member", "gidnumber"

View file

@ -0,0 +1,38 @@
use crate::audit::AuditScope;
use actix::prelude::*;
use crossbeam::channel::Sender;
use std::thread;
pub struct ServerCtx {
system: System,
log_tx: Sender<Option<AuditScope>>,
log_thread: thread::JoinHandle<()>,
}
impl ServerCtx {
pub fn new(
system: System,
log_tx: Sender<Option<AuditScope>>,
log_thread: thread::JoinHandle<()>,
) -> Self {
ServerCtx {
system,
log_tx,
log_thread,
}
}
pub fn current(&self) -> System {
self.system.clone()
}
pub fn stop(self) {
// stop the actix system
self.system.stop();
// drain the log thread
self.log_tx
.send(None)
.expect("unable to shutdown log thread!");
self.log_thread.join().expect("failed to stop log thread");
}
}

File diff suppressed because it is too large Load diff

View file

@ -213,7 +213,7 @@ mod tests {
let otp = TOTP::new("".to_string(), key.clone(), step, algo.clone());
let d = Duration::from_secs(secs);
let r = otp.do_totp_duration_from_epoch(&d);
println!(
debug!(
"key: {:?}, algo: {:?}, time: {:?}, step: {:?}, expect: {:?} == {:?}",
key, algo, secs, step, expect, r
);

View file

@ -570,11 +570,11 @@ impl<STATE> Entry<EntryInvalid, STATE> {
Some(vs) => match vs.iter().take(1).next() {
Some(uuid_v) => match uuid_v.to_uuid() {
Some(uuid) => *uuid,
None => return Err(SchemaError::InvalidAttribute),
None => return Err(SchemaError::InvalidAttribute("uuid".to_string())),
},
None => return Err(SchemaError::MissingMustAttribute("uuid".to_string())),
None => return Err(SchemaError::MissingMustAttribute(vec!["uuid".to_string()])),
},
None => return Err(SchemaError::MissingMustAttribute("uuid".to_string())),
None => return Err(SchemaError::MissingMustAttribute(vec!["uuid".to_string()])),
};
// Build the new valid entry ...
@ -592,28 +592,32 @@ impl<STATE> Entry<EntryInvalid, STATE> {
{
// First, check we have class on the object ....
if !ne.attribute_pres("class") {
debug!("Missing attribute class");
return Err(SchemaError::InvalidClass);
// lrequest_error!("Missing attribute class");
return Err(SchemaError::NoClassFound);
}
// Do we have extensible?
let extensible = ne.attribute_value_pres("class", &CLASS_EXTENSIBLE);
let entry_classes = ne.classes().ok_or(SchemaError::InvalidClass)?;
let entry_classes_size = entry_classes.len();
let entry_classes = ne.classes().ok_or(SchemaError::NoClassFound)?;
let mut invalid_classes = Vec::with_capacity(0);
let classes: Vec<&SchemaClass> = entry_classes
let mut classes: Vec<&SchemaClass> = Vec::with_capacity(entry_classes.len());
entry_classes.for_each(|c: &Value| {
// we specify types here to help me clarify a few things in the
// development process :)
.filter_map(|c: &Value| {
let x: Option<&SchemaClass> = c.as_string().and_then(|s| schema_classes.get(s));
x
})
.collect();
match c.as_string() {
Some(s) => match schema_classes.get(s) {
Some(x) => classes.push(x),
None => invalid_classes.push(s.clone()),
},
None => invalid_classes.push("corrupt classname".to_string()),
}
});
if classes.len() != entry_classes_size {
debug!("Class on entry not found in schema?");
return Err(SchemaError::InvalidClass);
if invalid_classes.len() != 0 {
// lrequest_error!("Class on entry not found in schema?");
return Err(SchemaError::InvalidClass(invalid_classes));
};
// What this is really doing is taking a set of classes, and building an
@ -643,41 +647,47 @@ impl<STATE> Entry<EntryInvalid, STATE> {
// Check that all must are inplace
// for each attr in must, check it's present on our ent
for attr in must {
let mut missing_must = Vec::with_capacity(0);
must.iter().for_each(|attr| {
let avas = ne.get_ava(&attr.name);
if avas.is_none() {
return Err(SchemaError::MissingMustAttribute(attr.name.clone()));
missing_must.push(attr.name.clone());
}
});
if missing_must.len() != 0 {
return Err(SchemaError::MissingMustAttribute(missing_must));
}
debug!("Extensible object -> {}", extensible);
if extensible {
// ladmin_warning!("Extensible Object In Use!");
for (attr_name, avas) in ne.avas() {
match schema_attributes.get(attr_name) {
Some(a_schema) => {
// Now, for each type we do a *full* check of the syntax
// and validity of the ava.
if a_schema.phantom {
debug!(
/*
lrequest_error!(
"Attempt to add phantom attribute to extensible: {}",
attr_name
);
return Err(SchemaError::PhantomAttribute);
*/
return Err(SchemaError::PhantomAttribute(attr_name.clone()));
}
let r = a_schema.validate_ava(avas);
match r {
Ok(_) => {}
Err(e) => {
debug!("Failed to validate: {}", attr_name);
// lrequest_error!("Failed to validate: {}", attr_name);
return Err(e);
}
}
}
None => {
debug!("Invalid Attribute {} for extensible object", attr_name);
return Err(SchemaError::InvalidAttribute);
// lrequest_error!("Invalid Attribute {} for extensible object", attr_name);
return Err(SchemaError::InvalidAttribute(attr_name.clone()));
}
}
}
@ -725,14 +735,14 @@ impl<STATE> Entry<EntryInvalid, STATE> {
match r {
Ok(_) => {}
Err(e) => {
debug!("Failed to validate: {}", attr_name);
// lrequest_error!("Failed to validate: {}", attr_name);
return Err(e);
}
}
}
None => {
debug!("Invalid Attribute {} for may+must set", attr_name);
return Err(SchemaError::InvalidAttribute);
// lrequest_error!("Invalid Attribute {} for may+must set", attr_name);
return Err(SchemaError::InvalidAttribute(attr_name.clone()));
}
}
}
@ -1533,10 +1543,7 @@ impl<VALID, STATE> Entry<VALID, STATE> {
pub fn get_ava_single_protofilter(&self, attr: &str) -> Option<ProtoFilter> {
self.get_ava_single(attr)
.and_then(|v: &Value| {
debug!("get_ava_single_protofilter -> {:?}", v);
v.as_json_filter()
})
.and_then(|v: &Value| v.as_json_filter())
.map(|f: &ProtoFilter| (*f).clone())
}
@ -2086,12 +2093,12 @@ mod tests {
// When we do None, None, we get nothing back.
let r1 = Entry::idx_diff(&idxmeta, None, None);
println!("{:?}", r1);
debug!("{:?}", r1);
assert!(r1 == Vec::new());
// Check generating a delete diff
let del_r = Entry::idx_diff(&idxmeta, Some(&e1), None);
println!("{:?}", del_r);
debug!("{:?}", del_r);
assert!(
del_r[0]
== Err((
@ -2104,7 +2111,7 @@ mod tests {
// Check generating an add diff
let add_r = Entry::idx_diff(&idxmeta, None, Some(&e1));
println!("{:?}", add_r);
debug!("{:?}", add_r);
assert!(
add_r[0]
== Ok((
@ -2162,6 +2169,6 @@ mod tests {
"claire".to_string()
))
);
println!("{:?}", chg_r);
debug!("{:?}", chg_r);
}
}

View file

@ -988,6 +988,7 @@ impl WhoamiResult {
#[derive(Debug)]
pub struct PurgeTombstoneEvent {
pub event: Event,
pub eventid: Uuid,
}
impl Message for PurgeTombstoneEvent {
@ -998,6 +999,7 @@ impl PurgeTombstoneEvent {
pub fn new() -> Self {
PurgeTombstoneEvent {
event: Event::from_internal(),
eventid: Uuid::new_v4(),
}
}
}
@ -1005,6 +1007,7 @@ impl PurgeTombstoneEvent {
#[derive(Debug)]
pub struct PurgeRecycledEvent {
pub event: Event,
pub eventid: Uuid,
}
impl Message for PurgeRecycledEvent {
@ -1015,6 +1018,7 @@ impl PurgeRecycledEvent {
pub fn new() -> Self {
PurgeRecycledEvent {
event: Event::from_internal(),
eventid: Uuid::new_v4(),
}
}
}

View file

@ -138,6 +138,31 @@ pub struct FilterValidResolved {
inner: FilterResolved,
}
#[derive(Debug)]
pub enum FilterPlan {
Invalid,
EqIndexed(String, String),
EqUnindexed(String),
EqCorrupt(String),
SubIndexed(String, String),
SubUnindexed(String),
SubCorrupt(String),
PresIndexed(String),
PresUnindexed(String),
PresCorrupt(String),
LessThanUnindexed(String),
OrUnindexed(Vec<FilterPlan>),
OrIndexed(Vec<FilterPlan>),
OrPartial(Vec<FilterPlan>),
OrPartialThreshold(Vec<FilterPlan>),
AndEmptyCand(Vec<FilterPlan>),
AndIndexed(Vec<FilterPlan>),
AndUnindexed(Vec<FilterPlan>),
AndPartial(Vec<FilterPlan>),
AndPartialThreshold(Vec<FilterPlan>),
AndNot(Box<FilterPlan>),
}
/// A `Filter` is a logical set of assertions about the state of an [`Entry`] and
/// it's avas. `Filter`s are built from a set of possible assertions.
///
@ -476,7 +501,7 @@ impl FilterComp {
.map(|_| FilterComp::Eq(attr_norm, value.clone()))
// On error, pass the error back out.
}
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
FilterComp::Sub(attr, value) => {
@ -491,7 +516,7 @@ impl FilterComp {
.map(|_| FilterComp::Sub(attr_norm, value.clone()))
// On error, pass the error back out.
}
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
FilterComp::Pres(attr) => {
@ -502,7 +527,7 @@ impl FilterComp {
// Return our valid data
Ok(FilterComp::Pres(attr_norm))
}
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
FilterComp::LessThan(attr, value) => {
@ -517,7 +542,7 @@ impl FilterComp {
.map(|_| FilterComp::LessThan(attr_norm, value.clone()))
// On error, pass the error back out.
}
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
FilterComp::Or(filters) => {
@ -895,33 +920,46 @@ impl FilterResolved {
}
});
// finally, optimise this list by sorting.
f_list_new.sort_unstable();
f_list_new.dedup();
// return!
FilterResolved::And(f_list_new)
// If the f_list_or only has one element, pop it and return.
if f_list_new.len() == 1 {
f_list_new.pop().expect("corrupt?")
} else {
// finally, optimise this list by sorting.
f_list_new.sort_unstable();
f_list_new.dedup();
// return!
FilterResolved::And(f_list_new)
}
}
FilterResolved::Or(f_list) => {
let (f_list_or, mut f_list_new): (Vec<_>, Vec<_>) = f_list
.iter()
// Optimise all inner items.
.map(|f_ref| f_ref.optimise())
// Split out inner-or terms to fold into this term.
.partition(|f| match f {
FilterResolved::Or(_) => true,
_ => false,
});
// Append the inner terms.
f_list_or.into_iter().for_each(|fc| {
if let FilterResolved::Or(mut l) = fc {
f_list_new.append(&mut l)
}
});
// sort, but reverse so that sub-optimal elements are later!
f_list_new.sort_unstable_by(|a, b| b.cmp(a));
f_list_new.dedup();
// If the f_list_or only has one element, pop it and return.
if f_list_new.len() == 1 {
f_list_new.pop().expect("corrupt?")
} else {
// sort, but reverse so that sub-optimal elements are earlier
// to promote fast-failure.
f_list_new.sort_unstable_by(|a, b| b.cmp(a));
f_list_new.dedup();
FilterResolved::Or(f_list_new)
FilterResolved::Or(f_list_new)
}
}
f => f.clone(),
}
@ -972,23 +1010,30 @@ mod tests {
let f_init_r = unsafe { f_init.into_valid_resolved() };
let f_init_o = f_init_r.optimise();
let f_init_e = unsafe { f_expect.into_valid_resolved() };
println!("--");
println!("init --> {:?}", f_init_r);
println!("opt --> {:?}", f_init_o);
println!("expect --> {:?}", f_init_e);
debug!("--");
debug!("init --> {:?}", f_init_r);
debug!("opt --> {:?}", f_init_o);
debug!("expect --> {:?}", f_init_e);
assert!(f_init_o == f_init_e);
}};
}
#[test]
fn test_filter_optimise() {
use env_logger;
::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug");
let _ = env_logger::builder()
.format_timestamp(None)
.format_level(false)
.is_test(true)
.try_init();
// Given sets of "optimisable" filters, optimise them.
filter_optimise_assert!(
f_and(vec![f_and(vec![f_eq(
"class",
PartialValue::new_class("test")
)])]),
f_and(vec![f_eq("class", PartialValue::new_class("test"))])
f_eq("class", PartialValue::new_class("test"))
);
filter_optimise_assert!(
@ -996,7 +1041,7 @@ mod tests {
"class",
PartialValue::new_class("test")
)])]),
f_or(vec![f_eq("class", PartialValue::new_class("test"))])
f_eq("class", PartialValue::new_class("test"))
);
filter_optimise_assert!(
@ -1004,10 +1049,7 @@ mod tests {
"class",
PartialValue::new_class("test")
)])])]),
f_and(vec![f_or(vec![f_and(vec![f_eq(
"class",
PartialValue::new_class("test")
)])])])
f_eq("class", PartialValue::new_class("test"))
);
// Later this can test duplicate filter detection.
@ -1073,8 +1115,7 @@ mod tests {
f_or(vec![
f_and(vec![
f_eq("class", PartialValue::new_class("test")),
f_eq("term", PartialValue::new_class("test")),
f_or(vec![f_eq("class", PartialValue::new_class("test"))])
f_eq("term", PartialValue::new_class("test"))
]),
f_eq("class", PartialValue::new_class("test")),
])

View file

@ -98,8 +98,10 @@ impl Account {
value: Entry<EntrySealed, EntryCommitted>,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
let groups = Group::try_from_account_entry_ro(au, &value, qs)?;
try_from_entry!(value, groups)
lperf_segment!(au, "idm::account::try_from_entry_ro", || {
let groups = Group::try_from_account_entry_ro(au, &value, qs)?;
try_from_entry!(value, groups)
})
}
pub(crate) fn try_from_entry_rw(
@ -210,7 +212,7 @@ mod tests {
#[test]
fn test_idm_account_from_anonymous() {
let anon_e = entry_str_to_account!(JSON_ANONYMOUS_V1);
println!("{:?}", anon_e);
debug!("{:?}", anon_e);
// I think that's it? we may want to check anonymous mech ...
}

View file

@ -17,6 +17,7 @@ use std::time::Duration;
const BAD_PASSWORD_MSG: &str = "incorrect password";
const BAD_TOTP_MSG: &str = "incorrect totp";
const BAD_AUTH_TYPE_MSG: &str = "invalid authentication method in this context";
const BAD_CREDENTIALS: &str = "invalid credential message";
enum CredState {
Success(Vec<Claim>),
@ -72,10 +73,16 @@ impl TryFrom<&Credential> for CredHandler {
}
impl CredHandler {
pub fn validate(&mut self, creds: &[AuthCredential], ts: &Duration) -> CredState {
pub fn validate(
&mut self,
au: &mut AuditScope,
creds: &[AuthCredential],
ts: &Duration,
) -> CredState {
match self {
CredHandler::Denied => {
// Sad trombone.
lsecurity!(au, "Handler::Denied -> Result::Denied");
CredState::Denied("authentication denied")
}
CredHandler::Anonymous => {
@ -87,7 +94,10 @@ impl CredHandler {
// the session to continue up to some timelimit.
match acc {
// If denied, continue returning denied.
CredState::Denied(_) => acc,
CredState::Denied(_) => {
lsecurity!(au, "Handler::Anonymous -> Result::Denied - already denied");
acc
}
// We have a continue or success, it's important we keep checking here
// after the success, because if they sent "multiple" anonymous or
// they sent anon + password, we need to handle both cases. Double anon
@ -97,9 +107,13 @@ impl CredHandler {
match cred {
AuthCredential::Anonymous => {
// For anonymous, no claims will ever be issued.
lsecurity!(au, "Handler::Anonymous -> Result::Success");
CredState::Success(Vec::new())
}
_ => CredState::Denied(BAD_AUTH_TYPE_MSG),
_ => {
lsecurity!(au, "Handler::Anonymous -> Result::Denied - invalid cred type for handler");
CredState::Denied(BAD_AUTH_TYPE_MSG)
}
}
}
} // end match acc
@ -113,18 +127,26 @@ impl CredHandler {
|acc, cred| {
match acc {
// If failed, continue to fail.
CredState::Denied(_) => acc,
CredState::Denied(_) => {
lsecurity!(au, "Handler::Password -> Result::Denied - already denied");
acc
}
_ => {
match cred {
AuthCredential::Password(cleartext) => {
if pw.verify(cleartext.as_str()) {
lsecurity!(au, "Handler::Password -> Result::Success");
CredState::Success(Vec::new())
} else {
lsecurity!(au, "Handler::Password -> Result::Denied - incorrect password");
CredState::Denied(BAD_PASSWORD_MSG)
}
}
// All other cases fail.
_ => CredState::Denied(BAD_AUTH_TYPE_MSG),
_ => {
lsecurity!(au, "Handler::Anonymous -> Result::Denied - invalid cred type for handler");
CredState::Denied(BAD_AUTH_TYPE_MSG)
}
}
}
} // end match acc
@ -138,7 +160,10 @@ impl CredHandler {
CredState::Continue(vec![AuthAllowed::TOTP, AuthAllowed::Password]),
|acc, cred| {
match acc {
CredState::Denied(_) => acc,
CredState::Denied(_) => {
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - already denied");
acc
}
_ => {
match cred {
AuthCredential::Password(cleartext) => {
@ -149,15 +174,18 @@ impl CredHandler {
CredVerifyState::Init => {
// TOTP hasn't been run yet, we need it before
// we indicate the pw status.
lsecurity!(au, "Handler::TOTPPassword -> Result::Continue - TOTP -, password OK");
CredState::Continue(vec![AuthAllowed::TOTP])
}
CredVerifyState::Success => {
// The totp is success, and password good, let's go!
lsecurity!(au, "Handler::TOTPPassword -> Result::Success - TOTP OK, password OK");
CredState::Success(Vec::new())
}
CredVerifyState::Fail => {
// The totp already failed, send that message now.
// Should be impossible state.
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - TOTP Fail, password OK");
CredState::Denied(BAD_TOTP_MSG)
}
}
@ -167,15 +195,18 @@ impl CredHandler {
CredVerifyState::Init => {
// TOTP hasn't been run yet, we need it before
// we indicate the pw status.
lsecurity!(au, "Handler::TOTPPassword -> Result::Continue - TOTP -, password Fail");
CredState::Continue(vec![AuthAllowed::TOTP])
}
CredVerifyState::Success => {
// The totp is success, but password bad.
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - TOTP OK, password Fail");
CredState::Denied(BAD_PASSWORD_MSG)
}
CredVerifyState::Fail => {
// The totp already failed, remind.
// this should be an impossible state.
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - TOTP Fail, password Fail");
CredState::Denied(BAD_TOTP_MSG)
}
}
@ -187,22 +218,29 @@ impl CredHandler {
pw_totp.totp_state = CredVerifyState::Success;
match pw_totp.pw_state {
CredVerifyState::Init => {
lsecurity!(au, "Handler::TOTPPassword -> Result::Continue - TOTP OK, password -");
CredState::Continue(vec![AuthAllowed::Password])
}
CredVerifyState::Success => {
lsecurity!(au, "Handler::TOTPPassword -> Result::Success - TOTP OK, password OK");
CredState::Success(Vec::new())
}
CredVerifyState::Fail => {
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - TOTP OK, password Fail");
CredState::Denied(BAD_PASSWORD_MSG)
}
}
} else {
pw_totp.totp_state = CredVerifyState::Fail;
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - TOTP Fail, password -");
CredState::Denied(BAD_TOTP_MSG)
}
}
// All other cases fail.
_ => CredState::Denied(BAD_AUTH_TYPE_MSG),
_ => {
lsecurity!(au, "Handler::TOTPPassword -> Result::Denied - invalid cred type for handler");
CredState::Denied(BAD_AUTH_TYPE_MSG)
}
} // end match cred
}
} // end match acc
@ -306,7 +344,16 @@ impl AuthSession {
));
}
match self.handler.validate(creds, time) {
if creds.len() > 4 {
lsecurity!(
au,
"Credentials denied: potential flood/dos/bruteforce attempt. {} creds were sent.",
creds.len()
);
return Ok(AuthState::Denied(BAD_CREDENTIALS.to_string()));
}
match self.handler.validate(au, creds, time) {
CredState::Success(claims) => {
lsecurity!(au, "Successful cred handling");
self.finished = true;
@ -354,7 +401,9 @@ mod tests {
use crate::constants::{JSON_ADMIN_V1, JSON_ANONYMOUS_V1};
use crate::credential::totp::{TOTP, TOTP_DEFAULT_STEP};
use crate::credential::Credential;
use crate::idm::authsession::{AuthSession, BAD_AUTH_TYPE_MSG, BAD_PASSWORD_MSG, BAD_TOTP_MSG};
use crate::idm::authsession::{
AuthSession, BAD_AUTH_TYPE_MSG, BAD_CREDENTIALS, BAD_PASSWORD_MSG, BAD_TOTP_MSG,
};
use kanidm_proto::v1::{AuthAllowed, AuthCredential, AuthState};
use std::time::Duration;
@ -374,6 +423,29 @@ mod tests {
);
}
#[test]
fn test_idm_authsession_floodcheck_mech() {
let mut audit =
AuditScope::new("test_idm_authsession_floodcheck_mech", uuid::Uuid::new_v4());
let anon_account = entry_str_to_account!(JSON_ANONYMOUS_V1);
let mut session = AuthSession::new(anon_account, None);
let attempt = vec![
AuthCredential::Anonymous,
AuthCredential::Anonymous,
AuthCredential::Anonymous,
AuthCredential::Anonymous,
AuthCredential::Anonymous,
];
match session.validate_creds(&mut audit, &attempt, &Duration::from_secs(0)) {
Ok(AuthState::Denied(msg)) => {
assert!(msg == BAD_CREDENTIALS);
}
_ => panic!(),
};
audit.write_log();
}
#[test]
fn test_idm_authsession_missing_appid() {
let anon_account = entry_str_to_account!(JSON_ANONYMOUS_V1);
@ -388,7 +460,10 @@ mod tests {
#[test]
fn test_idm_authsession_simple_password_mech() {
let mut audit = AuditScope::new("test_idm_authsession_simple_password_mech");
let mut audit = AuditScope::new(
"test_idm_authsession_simple_password_mech",
uuid::Uuid::new_v4(),
);
// create the ent
let mut account = entry_str_to_account!(JSON_ADMIN_V1);
// manually load in a cred
@ -419,12 +494,15 @@ mod tests {
_ => panic!(),
};
println!("{}", audit);
audit.write_log();
}
#[test]
fn test_idm_authsession_totp_password_mech() {
let mut audit = AuditScope::new("test_idm_authsession_totp_password_mech");
let mut audit = AuditScope::new(
"test_idm_authsession_totp_password_mech",
uuid::Uuid::new_v4(),
);
// create the ent
let mut account = entry_str_to_account!(JSON_ADMIN_V1);
@ -650,6 +728,6 @@ mod tests {
};
}
println!("{}", audit);
audit.write_log();
}
}

View file

@ -32,9 +32,13 @@ macro_rules! run_idm_test {
use env_logger;
::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug");
let _ = env_logger::builder().is_test(true).try_init();
let _ = env_logger::builder()
.format_timestamp(None)
.format_level(false)
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test");
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let be = Backend::new(&mut audit, "", 1).expect("Failed to init be");
let schema_outer = Schema::new(&mut audit).expect("Failed to init schema");

View file

@ -136,97 +136,104 @@ impl<'a> IdmServerWriteTransaction<'a> {
match &ae.step {
AuthEventStep::Init(init) => {
// Allocate a session id, based on current time.
let sessionid = uuid_from_duration(ct, self.sid);
lperf_segment!(au, "idm::server::auth<Init>", || {
// Allocate a session id, based on current time.
let sessionid = uuid_from_duration(ct, self.sid);
// Begin the auth procedure!
// Start a read
//
// Actually we may not need this - at the time we issue the auth-init
// we could generate the uat, the nonce and cache hashes in memory,
// then this can just be fully without a txn.
//
// We do need a txn so that we can process/search and claims
// or related based on the quality of the provided auth steps
//
// We *DO NOT* need a write though, because I think that lock outs
// and rate limits are *per server* and *in memory* only.
//
// Check anything needed? Get the current auth-session-id from request
// because it associates to the nonce's etc which were all cached.
// Begin the auth procedure!
// Start a read
//
// Actually we may not need this - at the time we issue the auth-init
// we could generate the uat, the nonce and cache hashes in memory,
// then this can just be fully without a txn.
//
// We do need a txn so that we can process/search and claims
// or related based on the quality of the provided auth steps
//
// We *DO NOT* need a write though, because I think that lock outs
// and rate limits are *per server* and *in memory* only.
//
// Check anything needed? Get the current auth-session-id from request
// because it associates to the nonce's etc which were all cached.
let filter_entry = filter!(f_or!([
f_eq("name", PartialValue::new_iutf8s(init.name.as_str())),
// This currently says invalid syntax, which is correct, but also
// annoying because it would be nice to search both ...
// f_eq("uuid", name.as_str()),
]));
let filter_entry = filter!(f_or!([
f_eq("name", PartialValue::new_iutf8s(init.name.as_str())),
// This currently says invalid syntax, which is correct, but also
// annoying because it would be nice to search both ...
// f_eq("uuid", name.as_str()),
]));
// Get the first / single entry we expect here ....
let entry = match self.qs_read.internal_search(au, filter_entry) {
Ok(mut entries) => {
// Get only one entry out ...
if entries.len() >= 2 {
return Err(OperationError::InvalidDBState);
// Get the first / single entry we expect here ....
let entry = match self.qs_read.internal_search(au, filter_entry) {
Ok(mut entries) => {
// Get only one entry out ...
if entries.len() >= 2 {
return Err(OperationError::InvalidDBState);
}
entries.pop().ok_or(OperationError::NoMatchingEntries)?
}
entries.pop().ok_or(OperationError::NoMatchingEntries)?
}
Err(e) => {
// Something went wrong! Abort!
return Err(e);
}
};
Err(e) => {
// Something went wrong! Abort!
return Err(e);
}
};
lsecurity!(au, "Initiating Authentication Session for ... {:?}", entry);
lsecurity!(au, "Initiating Authentication Session for ... {:?}", entry);
// Now, convert the Entry to an account - this gives us some stronger
// typing and functionality so we can assess what auth types can
// continue, and helps to keep non-needed entry specific data
// out of the LRU.
let account = Account::try_from_entry_ro(au, entry, &mut self.qs_read)?;
let auth_session = AuthSession::new(account, init.appid.clone());
// Now, convert the Entry to an account - this gives us some stronger
// typing and functionality so we can assess what auth types can
// continue, and helps to keep non-needed entry specific data
// out of the LRU.
let account = Account::try_from_entry_ro(au, entry, &mut self.qs_read)?;
let auth_session = AuthSession::new(account, init.appid.clone());
// Get the set of mechanisms that can proceed. This is tied
// to the session so that it can mutate state and have progression
// of what's next, or ordering.
let next_mech = auth_session.valid_auth_mechs();
// Get the set of mechanisms that can proceed. This is tied
// to the session so that it can mutate state and have progression
// of what's next, or ordering.
let next_mech = auth_session.valid_auth_mechs();
// If we have a session of the same id, return an error (despite how
// unlikely this is ...
if self.sessions.contains_key(&sessionid) {
return Err(OperationError::InvalidSessionState);
}
self.sessions.insert(sessionid, auth_session);
// If we have a session of the same id, return an error (despite how
// unlikely this is ...
lperf_segment!(au, "idm::server::auth<Init> -> sessions", || {
if self.sessions.contains_key(&sessionid) {
Err(OperationError::InvalidSessionState)
} else {
self.sessions.insert(sessionid, auth_session);
// Debugging: ensure we really inserted ...
debug_assert!(self.sessions.get(&sessionid).is_some());
Ok(())
}
})?;
// Debugging: ensure we really inserted ...
debug_assert!(self.sessions.get(&sessionid).is_some());
Ok(AuthResult {
sessionid,
state: AuthState::Continue(next_mech),
Ok(AuthResult {
sessionid,
state: AuthState::Continue(next_mech),
})
})
}
AuthEventStep::Creds(creds) => {
// Do we have a session?
let auth_session = try_audit!(
au,
self.sessions
// Why is the session missing?
.get_mut(&creds.sessionid)
.ok_or(OperationError::InvalidSessionState)
);
// Process the credentials here as required.
// Basically throw them at the auth_session and see what
// falls out.
auth_session
.validate_creds(au, &creds.creds, &ct)
.map(|aus| {
AuthResult {
// Is this right?
sessionid: creds.sessionid,
state: aus,
}
})
lperf_segment!(au, "idm::server::auth<Creds>", || {
// Do we have a session?
let auth_session = try_audit!(
au,
self.sessions
// Why is the session missing?
.get_mut(&creds.sessionid)
.ok_or(OperationError::InvalidSessionState)
);
// Process the credentials here as required.
// Basically throw them at the auth_session and see what
// falls out.
auth_session
.validate_creds(au, &creds.creds, &ct)
.map(|aus| {
AuthResult {
// Is this right?
sessionid: creds.sessionid,
state: aus,
}
})
})
}
}
}
@ -252,9 +259,11 @@ impl<'a> IdmServerWriteTransaction<'a> {
account.verify_unix_credential(au, uae.cleartext.as_str())
}
pub fn commit(self) -> Result<(), OperationError> {
self.sessions.commit();
Ok(())
pub fn commit(self, au: &mut AuditScope) -> Result<(), OperationError> {
lperf_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || {
self.sessions.commit();
Ok(())
})
}
}
@ -421,10 +430,11 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
account.spn.as_str(),
];
try_audit!(
au,
self.check_password_quality(au, pce.cleartext.as_str(), related_inputs.as_slice())
);
self.check_password_quality(au, pce.cleartext.as_str(), related_inputs.as_slice())
.map_err(|e| {
lrequest_error!(au, "check_password_quality -> {:?}", e);
e
})?;
// it returns a modify
let modlist = try_audit!(
@ -667,8 +677,10 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
}
pub fn commit(self, au: &mut AuditScope) -> Result<(), OperationError> {
self.mfareg_sessions.commit();
self.qs_write.commit(au)
lperf_segment!(au, "idm::server::IdmServerWriteTransaction::commit", || {
self.mfareg_sessions.commit();
self.qs_write.commit(au)
})
}
}
@ -746,9 +758,9 @@ mod tests {
}
};
println!("sessionid is ==> {:?}", sid);
debug!("sessionid is ==> {:?}", sid);
idms_write.commit().expect("Must not fail");
idms_write.commit(au).expect("Must not fail");
sid
};
@ -759,7 +771,7 @@ mod tests {
// Expect success
let r2 = idms_write.auth(au, &anon_step, Duration::from_secs(TEST_CURRENT_TIME));
println!("r2 ==> {:?}", r2);
debug!("r2 ==> {:?}", r2);
match r2 {
Ok(ar) => {
@ -786,7 +798,7 @@ mod tests {
}
};
idms_write.commit().expect("Must not fail");
idms_write.commit(au).expect("Must not fail");
}
});
}
@ -802,7 +814,7 @@ mod tests {
// Expect failure
let r2 = idms_write.auth(au, &anon_step, Duration::from_secs(TEST_CURRENT_TIME));
println!("r2 ==> {:?}", r2);
debug!("r2 ==> {:?}", r2);
match r2 {
Ok(_) => {
@ -859,7 +871,7 @@ mod tests {
}
};
idms_write.commit().expect("Must not fail");
idms_write.commit(au).expect("Must not fail");
sessionid
}
@ -875,7 +887,7 @@ mod tests {
// Expect success
let r2 = idms_write.auth(au, &anon_step, Duration::from_secs(TEST_CURRENT_TIME));
println!("r2 ==> {:?}", r2);
debug!("r2 ==> {:?}", r2);
match r2 {
Ok(ar) => {
@ -900,7 +912,7 @@ mod tests {
}
};
idms_write.commit().expect("Must not fail");
idms_write.commit(au).expect("Must not fail");
})
}
@ -914,7 +926,7 @@ mod tests {
// Expect success
let r2 = idms_write.auth(au, &anon_step, Duration::from_secs(TEST_CURRENT_TIME));
println!("r2 ==> {:?}", r2);
debug!("r2 ==> {:?}", r2);
match r2 {
Ok(ar) => {
@ -939,7 +951,7 @@ mod tests {
}
};
idms_write.commit().expect("Must not fail");
idms_write.commit(au).expect("Must not fail");
})
}
@ -979,7 +991,7 @@ mod tests {
// Expire as though we are in the future.
idms_write.expire_auth_sessions(Duration::from_secs(TEST_CURRENT_EXPIRE));
assert!(!idms_write.is_sessionid_present(&sid));
assert!(idms_write.commit().is_ok());
assert!(idms_write.commit(au).is_ok());
let idms_write = idms.write();
assert!(!idms_write.is_sessionid_present(&sid));
})
@ -1167,7 +1179,7 @@ mod tests {
Ok(None) => {}
_ => assert!(false),
};
assert!(idms_write.commit().is_ok());
assert!(idms_write.commit(au).is_ok());
// Check deleting the password
let mut idms_prox_write = idms.proxy_write(duration_from_epoch_now());
@ -1187,7 +1199,7 @@ mod tests {
Ok(None) => {}
_ => assert!(false),
};
assert!(idms_write.commit().is_ok());
assert!(idms_write.commit(au).is_ok());
})
}

View file

@ -1,4 +1,4 @@
// #![deny(warnings)]
#![deny(warnings)]
#![warn(unused_extern_crates)]
#[macro_use]

View file

@ -9,14 +9,18 @@ macro_rules! run_test {
use env_logger;
::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug");
let _ = env_logger::builder().is_test(true).try_init();
let _ = env_logger::builder()
.format_timestamp(None)
.format_level(false)
.is_test(true)
.try_init();
let mut audit = AuditScope::new("run_test");
let mut audit = AuditScope::new("run_test", uuid::Uuid::new_v4());
let be = match Backend::new(&mut audit, "", 1) {
Ok(be) => be,
Err(e) => {
debug!("{}", audit);
audit.write_log();
error!("{:?}", e);
panic!()
}

View file

@ -143,7 +143,7 @@ impl ModifyList<ModifyInvalid> {
Some(schema_a) => schema_a
.validate_value(&value)
.map(|_| Modify::Present(attr_norm, value.clone())),
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
Modify::Removed(attr, value) => {
@ -152,14 +152,14 @@ impl ModifyList<ModifyInvalid> {
Some(schema_a) => schema_a
.validate_partialvalue(&value)
.map(|_| Modify::Removed(attr_norm, value.clone())),
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
Modify::Purged(attr) => {
let attr_norm = schema.normalise_attr_name(attr);
match schema_attributes.get(&attr_norm) {
Some(_attr_name) => Ok(Modify::Purged(attr_norm)),
None => Err(SchemaError::InvalidAttribute),
None => Err(SchemaError::InvalidAttribute(attr_norm)),
}
}
})

View file

@ -76,13 +76,13 @@ fn enforce_unique<STATE>(
cand: &[Entry<EntryInvalid, STATE>],
attr: &str,
) -> Result<(), OperationError> {
debug!("{:?}", attr);
ltrace!(au, "{:?}", attr);
// Build a set of all the value -> uuid for the cands.
// If already exist, reject due to dup.
let cand_attr = try_audit!(au, get_cand_attr_set(au, cand, attr));
debug!("{:?}", cand_attr);
ltrace!(au, "{:?}", cand_attr);
// No candidates to check!
if cand_attr.is_empty() {
@ -104,7 +104,7 @@ fn enforce_unique<STATE>(
.collect()
));
debug!("{:?}", filt_in);
ltrace!(au, "{:?}", filt_in);
// If any results, reject.
let conflict_cand = try_audit!(au, qs.internal_exists(au, filt_in));
@ -193,7 +193,7 @@ impl Plugin for AttrUnique {
}
}
debug!("{:?}", res);
ltrace!(au, "{:?}", res);
res
}

View file

@ -290,10 +290,10 @@ mod tests {
"description": ["Builtin IDM Administrators Access Controls."],
"acp_enable": ["true"],
"acp_receiver": [
"{\"Eq\":[\"uuid\",\"00000000-0000-0000-0000-000000000000\"]}"
"{\"eq\":[\"uuid\",\"00000000-0000-0000-0000-000000000000\"]}"
],
"acp_targetscope": [
"{\"Pres\":\"class\"}"
"{\"pres\":\"class\"}"
],
"acp_search_attr": ["name", "class", "uuid"],
"acp_modify_class": ["system"],

View file

@ -7,7 +7,11 @@ macro_rules! setup_test {
use crate::utils::duration_from_epoch_now;
use env_logger;
::std::env::set_var("RUST_LOG", "actix_web=debug,kanidm=debug");
let _ = env_logger::builder().is_test(true).try_init();
let _ = env_logger::builder()
.format_timestamp(None)
.format_level(false)
.is_test(true)
.try_init();
// Create an in memory BE
let be = Backend::new($au, "", 1).expect("Failed to init BE");
@ -46,7 +50,7 @@ macro_rules! run_create_test {
use crate::server::QueryServer;
use crate::utils::duration_from_epoch_now;
let mut au = AuditScope::new("run_create_test");
let mut au = AuditScope::new("run_create_test", uuid::Uuid::new_v4());
lperf_segment!(&mut au, "plugins::macros::run_create_test", || {
let qs = setup_test!(&mut au, $preload_entries);
@ -60,7 +64,7 @@ macro_rules! run_create_test {
{
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write.create(&mut au, &ce);
debug!("r: {:?}", r);
debug!("test result: {:?}", r);
assert!(r == $expect);
$check(&mut au, &mut qs_write);
match r {
@ -79,7 +83,7 @@ macro_rules! run_create_test {
assert!(ver.len() == 0);
});
// Dump the raw audit log.
println!("{}", au);
au.write_log();
}};
}
@ -101,7 +105,7 @@ macro_rules! run_modify_test {
use crate::server::QueryServer;
use crate::utils::duration_from_epoch_now;
let mut au = AuditScope::new("run_modify_test");
let mut au = AuditScope::new("run_modify_test", uuid::Uuid::new_v4());
lperf_segment!(&mut au, "plugins::macros::run_modify_test", || {
let qs = setup_test!(&mut au, $preload_entries);
@ -124,7 +128,7 @@ macro_rules! run_modify_test {
"plugins::macros::run_modify_test -> post_test check",
|| { $check(&mut au, &mut qs_write) }
);
debug!("{:?}", r);
debug!("test result: {:?}", r);
assert!(r == $expect);
match r {
Ok(_) => {
@ -142,7 +146,7 @@ macro_rules! run_modify_test {
assert!(ver.len() == 0);
});
// Dump the raw audit log.
println!("{}", au);
au.write_log();
}};
}
@ -163,7 +167,7 @@ macro_rules! run_delete_test {
use crate::server::QueryServer;
use crate::utils::duration_from_epoch_now;
let mut au = AuditScope::new("run_delete_test");
let mut au = AuditScope::new("run_delete_test", uuid::Uuid::new_v4());
lperf_segment!(&mut au, "plugins::macros::run_delete_test", || {
let qs = setup_test!(&mut au, $preload_entries);
@ -177,6 +181,7 @@ macro_rules! run_delete_test {
{
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write.delete(&mut au, &de);
debug!("test result: {:?}", r);
$check(&mut au, &mut qs_write);
assert!(r == $expect);
match r {
@ -195,6 +200,6 @@ macro_rules! run_delete_test {
assert!(ver.len() == 0);
});
// Dump the raw audit log.
println!("{}", au);
au.write_log();
}};
}

View file

@ -457,7 +457,7 @@ mod tests {
let cands = $qs
.internal_search($au, filt)
.expect("Internal search failure");
println!("{:?}", cands);
debug!("assert_mo_cands {:?}", cands);
assert!(cands.len() == $cand);
}};
}
@ -1307,7 +1307,6 @@ mod tests {
#[test]
fn test_delete_mo_simple() {
debug!("TEST START");
// X -> B
let mut ea: Entry<EntryInit, EntryNew> = Entry::unsafe_from_entry_str(EA);

View file

@ -230,10 +230,10 @@ mod tests {
"description": ["Builtin IDM Administrators Access Controls for TESTING."],
"acp_enable": ["true"],
"acp_receiver": [
"{\"Eq\":[\"uuid\",\"00000000-0000-0000-0000-000000000000\"]}"
"{\"eq\":[\"uuid\",\"00000000-0000-0000-0000-000000000000\"]}"
],
"acp_targetscope": [
"{\"Pres\":\"class\"}"
"{\"pres\":\"class\"}"
],
"acp_search_attr": ["name", "class", "uuid", "classname", "attributename"],
"acp_modify_class": ["system", "domain_info"],

View file

@ -219,10 +219,10 @@ impl SchemaAttribute {
}
pub fn validate_ava(&self, ava: &BTreeSet<Value>) -> Result<(), SchemaError> {
debug!("Checking for valid {:?} -> {:?}", self.name, ava);
// ltrace!("Checking for valid {:?} -> {:?}", self.name, ava);
// Check multivalue
if !self.multivalue && ava.len() > 1 {
debug!("Ava len > 1 on single value attribute!");
// lrequest_error!("Ava len > 1 on single value attribute!");
return Err(SchemaError::InvalidAttributeSyntax);
};
// If syntax, check the type is correct
@ -496,8 +496,8 @@ pub trait SchemaTransaction {
match self.get_attributes().get(attr) {
Some(a_schema) => Ok(a_schema.multivalue),
None => {
debug!("Attribute does not exist?!");
Err(SchemaError::InvalidAttribute)
// ladmin_error!("Attribute does not exist?!");
Err(SchemaError::InvalidAttribute(attr.to_string()))
}
}
}
@ -1828,18 +1828,18 @@ mod tests {
#[test]
fn test_schema_simple() {
let mut audit = AuditScope::new("test_schema_simple");
let mut audit = AuditScope::new("test_schema_simple", uuid::Uuid::new_v4());
let schema = Schema::new(&mut audit).expect("failed to create schema");
let schema_ro = schema.read();
validate_schema!(schema_ro, &mut audit);
println!("{}", audit);
audit.write_log();
}
#[test]
fn test_schema_entries() {
// Given an entry, assert it's schema is valid
// We do
let mut audit = AuditScope::new("test_schema_entries");
let mut audit = AuditScope::new("test_schema_entries", uuid::Uuid::new_v4());
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.read();
let e_no_uuid: Entry<EntryInvalid, EntryNew> = unsafe {
@ -1853,7 +1853,7 @@ mod tests {
assert_eq!(
e_no_uuid.validate(&schema),
Err(SchemaError::MissingMustAttribute("uuid".to_string()))
Err(SchemaError::MissingMustAttribute(vec!["uuid".to_string()]))
);
let e_no_class: Entry<EntryInvalid, EntryNew> = unsafe {
@ -1867,7 +1867,7 @@ mod tests {
.into_invalid_new()
};
assert_eq!(e_no_class.validate(&schema), Err(SchemaError::InvalidClass));
assert_eq!(e_no_class.validate(&schema), Err(SchemaError::NoClassFound));
let e_bad_class: Entry<EntryInvalid, EntryNew> = unsafe {
Entry::unsafe_from_entry_str(
@ -1882,7 +1882,7 @@ mod tests {
};
assert_eq!(
e_bad_class.validate(&schema),
Err(SchemaError::InvalidClass)
Err(SchemaError::InvalidClass(vec!["zzzzzz".to_string()]))
);
let e_attr_invalid: Entry<EntryInvalid, EntryNew> = unsafe {
@ -1923,7 +1923,7 @@ mod tests {
assert_eq!(
e_attr_invalid_may.validate(&schema),
Err(SchemaError::InvalidAttribute)
Err(SchemaError::InvalidAttribute("zzzzz".to_string()))
);
let e_attr_invalid_syn: Entry<EntryInvalid, EntryNew> = unsafe {
@ -1985,13 +1985,13 @@ mod tests {
.into_invalid_new()
};
assert!(e_ok.validate(&schema).is_ok());
println!("{}", audit);
audit.write_log();
}
#[test]
fn test_schema_entry_validate() {
// Check that entries can be normalised and validated sanely
let mut audit = AuditScope::new("test_schema_entry_validate");
let mut audit = AuditScope::new("test_schema_entry_validate", uuid::Uuid::new_v4());
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.write();
@ -2032,12 +2032,12 @@ mod tests {
let e_valid = e_test.validate(&schema).expect("validation failure");
assert_eq!(e_expect, e_valid);
println!("{}", audit);
audit.write_log();
}
#[test]
fn test_schema_extensible() {
let mut audit = AuditScope::new("test_schema_extensible");
let mut audit = AuditScope::new("test_schema_extensible", uuid::Uuid::new_v4());
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.read();
// Just because you are extensible, doesn't mean you can be lazy
@ -2076,7 +2076,7 @@ mod tests {
assert_eq!(
e_extensible_phantom.validate(&schema),
Err(SchemaError::PhantomAttribute)
Err(SchemaError::PhantomAttribute("password_import".to_string()))
);
let e_extensible: Entry<EntryInvalid, EntryNew> = unsafe {
@ -2094,19 +2094,19 @@ mod tests {
/* Is okay because extensible! */
assert!(e_extensible.validate(&schema).is_ok());
println!("{}", audit);
audit.write_log();
}
#[test]
fn test_schema_filter_validation() {
let mut audit = AuditScope::new("test_schema_filter_validation");
let mut audit = AuditScope::new("test_schema_filter_validation", uuid::Uuid::new_v4());
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let schema = schema_outer.read();
// Test non existant attr name
let f_mixed = filter_all!(f_eq("nonClAsS", PartialValue::new_class("attributetype")));
assert_eq!(
f_mixed.validate(&schema),
Err(SchemaError::InvalidAttribute)
Err(SchemaError::InvalidAttribute("nonclass".to_string()))
);
// test syntax of bool
@ -2156,13 +2156,13 @@ mod tests {
])))
})
);
println!("{}", audit);
audit.write_log();
}
#[test]
fn test_schema_class_phantom_reject() {
// Check that entries can be normalised and validated sanely
let mut audit = AuditScope::new("test_schema_class_phantom_reject");
let mut audit = AuditScope::new("test_schema_class_phantom_reject", uuid::Uuid::new_v4());
let schema_outer = Schema::new(&mut audit).expect("failed to create schema");
let mut schema = schema_outer.write();
@ -2183,6 +2183,6 @@ mod tests {
assert!(schema.validate(&mut audit).len() == 1);
println!("{}", audit);
audit.write_log();
}
}

View file

@ -2099,7 +2099,7 @@ mod tests {
assert!(cr.is_ok());
let r2 = server_txn.search(audit, &se2).expect("search failure");
println!("--> {:?}", r2);
debug!("--> {:?}", r2);
assert!(r2.len() == 1);
let expected = unsafe { vec![e.into_sealed_committed()] };
@ -2214,7 +2214,7 @@ mod tests {
assert!(
r_inv_1
== Err(OperationError::SchemaViolation(
SchemaError::InvalidAttribute
SchemaError::InvalidAttribute("tnanuanou".to_string())
))
);
@ -2231,7 +2231,7 @@ mod tests {
assert!(
server_txn.modify(audit, &me_inv_m)
== Err(OperationError::SchemaViolation(
SchemaError::InvalidAttribute
SchemaError::InvalidAttribute("htnaonu".to_string())
))
);
@ -2835,7 +2835,7 @@ mod tests {
&"cc8e95b4-c24f-4d68-ba54-8bed76f63930".to_string(),
);
println!("{:?}", r4);
debug!("{:?}", r4);
assert!(r4 == Ok(Value::new_refer_s("cc8e95b4-c24f-4d68-ba54-8bed76f63930").unwrap()));
})
}
@ -3336,11 +3336,11 @@ mod tests {
let schema = server_txn.get_schema();
for k in schema.get_attributes().keys() {
println!("{}", k);
debug!("{}", k);
}
println!("====");
debug!("====");
for k in schema.get_classes().keys() {
println!("{}", k);
debug!("{}", k);
}
})

View file

@ -1,14 +1,16 @@
use crate::async_log::{EventLog, LogEvent};
use crate::audit::AuditScope;
use actix::prelude::*;
use crossbeam::channel::Sender;
use uuid::Uuid;
pub struct StatusActor {
log_addr: actix::Addr<EventLog>,
log_tx: Sender<Option<AuditScope>>,
}
impl StatusActor {
pub fn start(log_addr: actix::Addr<EventLog>) -> actix::Addr<StatusActor> {
pub fn start(log_tx: Sender<Option<AuditScope>>) -> actix::Addr<StatusActor> {
SyncArbiter::start(1, move || StatusActor {
log_addr: log_addr.clone(),
log_tx: log_tx.clone(),
})
}
}
@ -17,7 +19,9 @@ impl Actor for StatusActor {
type Context = SyncContext<Self>;
}
pub struct StatusRequestEvent {}
pub struct StatusRequestEvent {
pub eventid: Uuid,
}
impl Message for StatusRequestEvent {
type Result = bool;
@ -26,9 +30,11 @@ impl Message for StatusRequestEvent {
impl Handler<StatusRequestEvent> for StatusActor {
type Result = bool;
fn handle(&mut self, _event: StatusRequestEvent, _ctx: &mut SyncContext<Self>) -> Self::Result {
self.log_addr.do_send(LogEvent {
msg: "status request event: ok".to_string(),
fn handle(&mut self, event: StatusRequestEvent, _ctx: &mut SyncContext<Self>) -> Self::Result {
let mut audit = AuditScope::new("status_handler", event.eventid.clone());
ladmin_info!(&mut audit, "status handler");
self.log_tx.send(Some(audit)).unwrap_or_else(|_| {
error!("CRITICAL: UNABLE TO COMMIT LOGS");
});
true
}

View file

@ -98,7 +98,8 @@ impl Opt {
}
}
fn main() {
#[actix_rt::main]
async fn main() {
// Read cli args, determine if we should backup/restore
let opt = Opt::from_args();
@ -113,7 +114,11 @@ fn main() {
} else {
::std::env::set_var("RUST_LOG", "actix_web=info,kanidm=info");
}
env_logger::init();
env_logger::builder()
.format_timestamp(None)
.format_level(false)
.init();
match opt {
Opt::Server(sopt) => {
@ -123,9 +128,19 @@ fn main() {
config.update_tls(&sopt.ca_path, &sopt.cert_path, &sopt.key_path);
config.update_bind(&sopt.bind);
let sys = actix::System::new("kanidm-server");
create_server_core(config);
let _ = sys.run();
let _sys = actix::System::new("kanidm-server");
let sctx = create_server_core(config);
match sctx {
Ok(sctx) => {
tokio::signal::ctrl_c().await.unwrap();
println!("Ctrl-C received, shutting down");
sctx.stop()
}
Err(_) => {
error!("Failed to start server core!");
return;
}
}
}
Opt::Backup(bopt) => {
info!("Running in backup mode ...");