Large rework of audit logging

This commit is contained in:
William Brown 2018-12-27 15:22:03 +10:00
parent 4f459465c1
commit fcda7d395d
12 changed files with 316 additions and 250 deletions

5
.dockerignore Normal file
View file

@ -0,0 +1,5 @@
target
.git
.gitignore
test.db

View file

@ -22,6 +22,8 @@ bytes = "0.4"
env_logger = "0.5" env_logger = "0.5"
reqwest = "0.9" reqwest = "0.9"
chrono = "0.4"
tokio = "0.1" tokio = "0.1"
futures = "0.1" futures = "0.1"

24
Dockerfile Normal file
View file

@ -0,0 +1,24 @@
FROM opensuse/tumbleweed:latest
MAINTAINER william@blackhats.net.au
# /usr/bin/docker run --restart always --name lifx registry.blackhats.net.au/lifx
RUN echo HTTP_PROXY="http://proxy-bne1.net.blackhats.net.au:3128" > /etc/sysconfig/proxy
COPY . /home/rsidm/
WORKDIR /home/rsidm/
RUN zypper install -y timezone cargo rust rust-std gcc && \
RUSTC_BOOTSTRAP=1 cargo build --release && \
zypper rm -u -y cargo rust rust-std gcc && \
zypper clean
RUN cd /etc && \
ln -sf ../usr/share/zoneinfo/Australia/Brisbane localtime
RUN useradd -m -r rsidm
USER rsidm
ENV RUST_BACKTRACE 1
CMD ["/home/rsidm/target/release/rsidm"]

View file

@ -1,6 +1,10 @@
use actix::prelude::*; use actix::prelude::*;
use std::time::Duration;
use std::time::SystemTime; use std::time::SystemTime;
use chrono::offset::Utc;
use chrono::DateTime;
#[macro_export] #[macro_export]
macro_rules! audit_log { macro_rules! audit_log {
($audit:expr, $($arg:tt)*) => ({ ($audit:expr, $($arg:tt)*) => ({
@ -9,7 +13,7 @@ macro_rules! audit_log {
print!("DEBUG AUDIT -> "); print!("DEBUG AUDIT -> ");
println!($($arg)*) println!($($arg)*)
} }
$audit.raw_event( $audit.log_event(
fmt::format( fmt::format(
format_args!($($arg)*) format_args!($($arg)*)
) )
@ -17,81 +21,114 @@ macro_rules! audit_log {
}) })
} }
/*
* This should be used as:
* audit_segment(|au| {
* // au is the inner audit
* do your work
* audit_log!(au, ...?)
* nested_caller(&mut au, ...)
* })
*/
macro_rules! audit_segment {
($au:expr, $fun:expr) => {{
use std::time::Instant;
let start = Instant::now();
// start timer.
// run fun with our derived audit event.
let r = $fun();
// end timer, and diff
let end = Instant::now();
let diff = end.duration_since(start);
println!("diff: {:?}", diff);
// Return the result. Hope this works!
return r;
}};
}
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
struct AuditInner { enum AuditEvent {
log(AuditLog),
scope(AuditScope),
}
#[derive(Debug, Serialize, Deserialize)]
struct AuditLog {
time: String,
name: String, name: String,
time: SystemTime,
} }
// This structure tracks and event lifecycle, and is eventually // This structure tracks and event lifecycle, and is eventually
// sent to the logging system where it's structured and written // sent to the logging system where it's structured and written
// out to the current logging BE. // out to the current logging BE.
#[derive(Debug, Serialize, Deserialize)] #[derive(Debug, Serialize, Deserialize)]
pub struct AuditEvent { pub struct AuditScope {
// vec of start/end points of various parts of the event? // vec of start/end points of various parts of the event?
// We probably need some functions for this. Is there a way in rust // We probably need some functions for this. Is there a way in rust
// to automatically annotate line numbers of code? // to automatically annotate line numbers of code?
events: Vec<AuditInner>, time: String,
name: String,
duration: Option<Duration>,
events: Vec<AuditEvent>,
} }
// Allow us to be sent to the log subsystem // Allow us to be sent to the log subsystem
impl Message for AuditEvent { impl Message for AuditScope {
type Result = (); type Result = ();
} }
impl AuditEvent { impl AuditScope {
pub fn new() -> Self { pub fn new(name: &str) -> Self {
AuditEvent { events: Vec::new() } let t_now = SystemTime::now();
} let datetime: DateTime<Utc> = t_now.into();
pub fn start_event(&mut self, name: &str) { AuditScope {
self.events.push(AuditInner { time: datetime.to_rfc3339(),
name: String::from(name), name: String::from(name),
time: SystemTime::now(), duration: None,
}) events: Vec::new(),
}
} }
pub fn raw_event(&mut self, data: String) { // Given a new audit event, append it in.
self.events.push(AuditInner { pub fn append_scope(&mut self, scope: AuditScope) {
self.events.push(AuditEvent::scope(scope))
}
pub fn log_event(&mut self, data: String) {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
self.events.push(AuditEvent::log(AuditLog {
time: datetime.to_rfc3339(),
name: data, name: data,
time: SystemTime::now(), }))
})
}
pub fn end_event(&mut self, name: &str) {
self.events.push(AuditInner {
name: String::from(name),
time: SystemTime::now(),
})
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::AuditEvent; use super::AuditScope;
// Create and remove. Perhaps add some core details? // Create and remove. Perhaps add some core details?
#[test] #[test]
fn test_audit_simple() { fn test_audit_simple() {
let mut au = AuditEvent::new(); let mut au = AuditScope::new("au");
au.start_event("test");
au.end_event("test");
let d = serde_json::to_string_pretty(&au).unwrap(); let d = serde_json::to_string_pretty(&au).unwrap();
println!("{}", d); println!("{}", d);
} }
fn test_audit_nested_inner(au: &mut AuditEvent) { fn test_audit_nested_inner(au: &mut AuditScope) {}
au.start_event("inner");
au.end_event("inner");
}
// Test calling nested functions and getting the details added correctly? // Test calling nested functions and getting the details added correctly?
#[test] #[test]
fn test_audit_nested() { fn test_audit_nested() {
let mut au = AuditEvent::new(); let mut au = AuditScope::new("au");
au.start_event("test");
test_audit_nested_inner(&mut au); test_audit_nested_inner(&mut au);
au.end_event("test");
let d = serde_json::to_string_pretty(&au).unwrap(); let d = serde_json::to_string_pretty(&au).unwrap();
println!("{}", d); println!("{}", d);
} }
@ -99,9 +136,7 @@ mod tests {
// Test failing to close an event // Test failing to close an event
#[test] #[test]
fn test_audit_no_close() { fn test_audit_no_close() {
let mut au = AuditEvent::new(); let mut au = AuditScope::new("au");
au.start_event("test");
au.start_event("inner");
let d = serde_json::to_string_pretty(&au).unwrap(); let d = serde_json::to_string_pretty(&au).unwrap();
println!("{}", d); println!("{}", d);
} }

View file

@ -7,7 +7,7 @@ use rusqlite::NO_PARAMS;
use serde_json; use serde_json;
// use uuid; // use uuid;
use super::audit::AuditEvent; use super::audit::AuditScope;
use super::entry::Entry; use super::entry::Entry;
use super::filter::Filter; use super::filter::Filter;
@ -18,14 +18,14 @@ mod sqlite_be;
// This contacts the needed backend and starts it up // This contacts the needed backend and starts it up
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub struct BackendAuditEvent { pub struct BackendAuditScope {
time_start: (), time_start: (),
time_end: (), time_end: (),
} }
impl BackendAuditEvent { impl BackendAuditScope {
pub fn new() -> Self { pub fn new() -> Self {
BackendAuditEvent { BackendAuditScope {
time_start: (), time_start: (),
time_end: (), time_end: (),
} }
@ -58,103 +58,100 @@ pub struct Backend {
// In the future this will do the routing between the chosen backends etc. // In the future this will do the routing between the chosen backends etc.
impl Backend { impl Backend {
pub fn new(audit: &mut AuditEvent, path: &str) -> Self { pub fn new(audit: &mut AuditScope, path: &str) -> Self {
// this has a ::memory() type, but will path == "" work? // this has a ::memory() type, but will path == "" work?
audit.start_event("backend_new"); audit_segment!(audit, || {
let manager = SqliteConnectionManager::file(path); let manager = SqliteConnectionManager::file(path);
let builder1 = Pool::builder(); let builder1 = Pool::builder();
let builder2 = if path == "" { let builder2 = if path == "" {
builder1.max_size(1) builder1.max_size(1)
} else { } else {
// FIXME: Make this configurable // FIXME: Make this configurable
builder1.max_size(8) builder1.max_size(8)
}; };
// Look at max_size and thread_pool here for perf later // Look at max_size and thread_pool here for perf later
let pool = builder2.build(manager).expect("Failed to create pool"); let pool = builder2.build(manager).expect("Failed to create pool");
{ {
let conn = pool.get().unwrap(); let conn = pool.get().unwrap();
// Perform any migrations as required? // Perform any migrations as required?
// I think we only need the core table here, indexing will do it's own // I think we only need the core table here, indexing will do it's own
// thing later // thing later
// conn.execute("PRAGMA journal_mode=WAL;", NO_PARAMS).unwrap(); // conn.execute("PRAGMA journal_mode=WAL;", NO_PARAMS).unwrap();
conn.execute( conn.execute(
"CREATE TABLE IF NOT EXISTS id2entry ( "CREATE TABLE IF NOT EXISTS id2entry (
id INTEGER PRIMARY KEY ASC, id INTEGER PRIMARY KEY ASC,
data TEXT NOT NULL data TEXT NOT NULL
)
",
NO_PARAMS,
) )
", .unwrap();
NO_PARAMS,
)
.unwrap();
// Create a version table for migration indication // Create a version table for migration indication
// Create the core db // Create the core db
} }
audit_log!(audit, "Starting DB workers ..."); Backend { pool: pool }
audit.end_event("backend_new"); })
Backend { pool: pool }
} }
pub fn create( pub fn create(
&mut self, &mut self,
au: &mut AuditEvent, au: &mut AuditScope,
entries: &Vec<Entry>, entries: &Vec<Entry>,
) -> Result<BackendAuditEvent, BackendError> { ) -> Result<BackendAuditScope, BackendError> {
au.start_event("be_create"); audit_segment!(au, || {
let be_audit = BackendAuditScope::new();
// Start be audit timer
let be_audit = BackendAuditEvent::new(); if entries.is_empty() {
// Start be audit timer // TODO: Better error
// End the timer
if entries.is_empty() { return Err(BackendError::EmptyRequest);
// TODO: Better error
// End the timer
return Err(BackendError::EmptyRequest);
}
// Turn all the entries into relevent json/cbor types
// we do this outside the txn to avoid blocking needlessly.
// However, it could be pointless due to the extra string allocs ...
let ser_entries: Vec<String> = entries
.iter()
.map(|val| {
// TODO: Should we do better than unwrap?
serde_json::to_string(&val).unwrap()
})
.collect();
audit_log!(au, "serialising: {:?}", ser_entries);
// THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION
{
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
// write them all
for ser_entry in ser_entries {
conn.execute(
"INSERT INTO id2entry (data) VALUES (?1)",
&[&ser_entry as &ToSql],
)
.unwrap();
} }
// TODO: update indexes (as needed) // Turn all the entries into relevent json/cbor types
// Commit the txn // we do this outside the txn to avoid blocking needlessly.
conn.execute("COMMIT TRANSACTION", NO_PARAMS).unwrap(); // However, it could be pointless due to the extra string allocs ...
}
au.end_event("be_create"); let ser_entries: Vec<String> = entries
// End the timer? .iter()
Ok(be_audit) .map(|val| {
// TODO: Should we do better than unwrap?
serde_json::to_string(&val).unwrap()
})
.collect();
audit_log!(au, "serialising: {:?}", ser_entries);
// THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION
{
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
// write them all
for ser_entry in ser_entries {
conn.execute(
"INSERT INTO id2entry (data) VALUES (?1)",
&[&ser_entry as &ToSql],
)
.unwrap();
}
// TODO: update indexes (as needed)
// Commit the txn
conn.execute("COMMIT TRANSACTION", NO_PARAMS).unwrap();
}
Ok(be_audit)
})
} }
// Take filter, and AuditEvent ref? // Take filter, and AuditScope ref?
pub fn search(&self, au: &mut AuditEvent, filt: &Filter) -> Result<Vec<Entry>, BackendError> { pub fn search(&self, au: &mut AuditScope, filt: &Filter) -> Result<Vec<Entry>, BackendError> {
// Do things // Do things
// Alloc a vec for the entries. // Alloc a vec for the entries.
// FIXME: Make this actually a good size for the result set ... // FIXME: Make this actually a good size for the result set ...
@ -164,48 +161,47 @@ impl Backend {
// possible) to create the candidate set. // possible) to create the candidate set.
// Unlike DS, even if we don't get the index back, we can just pass // Unlike DS, even if we don't get the index back, we can just pass
// to the in-memory filter test and be done. // to the in-memory filter test and be done.
au.start_event("be_search"); audit_segment!(au, || {
let mut raw_entries: Vec<String> = Vec::new();
{
// Actually do a search now!
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
let mut raw_entries: Vec<String> = Vec::new(); // read them all
{ let mut stmt = conn.prepare("SELECT id, data FROM id2entry").unwrap();
// Actually do a search now! let id2entry_iter = stmt
let conn = self.pool.get().unwrap(); .query_map(NO_PARAMS, |row| IdEntry {
// Start a txn id: row.get(0),
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); data: row.get(1),
})
// read them all .unwrap();
let mut stmt = conn.prepare("SELECT id, data FROM id2entry").unwrap(); for row in id2entry_iter {
let id2entry_iter = stmt audit_log!(au, "raw entry: {:?}", row);
.query_map(NO_PARAMS, |row| IdEntry { // FIXME: Handle this properly.
id: row.get(0), raw_entries.push(row.unwrap().data);
data: row.get(1),
})
.unwrap();
for row in id2entry_iter {
audit_log!(au, "raw entry: {:?}", row);
// FIXME: Handle this properly.
raw_entries.push(row.unwrap().data);
}
// Rollback, we should have done nothing.
conn.execute("ROLLBACK TRANSACTION", NO_PARAMS).unwrap();
}
// Do other things
// Now, de-serialise the raw_entries back to entries
let entries: Vec<Entry> = raw_entries
.iter()
.filter_map(|val| {
// TODO: Should we do better than unwrap?
let e: Entry = serde_json::from_str(val.as_str()).unwrap();
if filt.entry_match_no_index(&e) {
Some(e)
} else {
None
} }
}) // Rollback, we should have done nothing.
.collect(); conn.execute("ROLLBACK TRANSACTION", NO_PARAMS).unwrap();
}
// Do other things
// Now, de-serialise the raw_entries back to entries
let entries: Vec<Entry> = raw_entries
.iter()
.filter_map(|val| {
// TODO: Should we do better than unwrap?
let e: Entry = serde_json::from_str(val.as_str()).unwrap();
if filt.entry_match_no_index(&e) {
Some(e)
} else {
None
}
})
.collect();
au.end_event("be_search"); Ok(entries)
Ok(entries) })
} }
pub fn modify() {} pub fn modify() {}
@ -235,7 +231,7 @@ mod tests {
extern crate tokio; extern crate tokio;
use super::super::audit::AuditEvent; use super::super::audit::AuditScope;
use super::super::entry::Entry; use super::super::entry::Entry;
use super::super::filter::Filter; use super::super::filter::Filter;
use super::super::log; use super::super::log;
@ -244,7 +240,7 @@ mod tests {
macro_rules! run_test { macro_rules! run_test {
($test_fn:expr) => {{ ($test_fn:expr) => {{
System::run(|| { System::run(|| {
let mut audit = AuditEvent::new(); let mut audit = AuditScope::new("run_test");
let test_log = log::start(); let test_log = log::start();
@ -266,7 +262,7 @@ mod tests {
#[test] #[test]
fn test_simple_create() { fn test_simple_create() {
run_test!(|audit: &mut AuditEvent, mut be: Backend| { run_test!(|audit: &mut AuditScope, mut be: Backend| {
audit_log!(audit, "Simple Create"); audit_log!(audit, "Simple Create");
let empty_result = be.create(audit, &Vec::new()); let empty_result = be.create(audit, &Vec::new());
@ -296,7 +292,7 @@ mod tests {
#[test] #[test]
fn test_simple_search() { fn test_simple_search() {
run_test!(|audit: &mut AuditEvent, be| { run_test!(|audit: &mut AuditScope, be| {
audit_log!(audit, "Simple Search"); audit_log!(audit, "Simple Search");
future::ok(()) future::ok(())
}); });
@ -304,7 +300,7 @@ mod tests {
#[test] #[test]
fn test_simple_modify() { fn test_simple_modify() {
run_test!(|audit: &mut AuditEvent, be| { run_test!(|audit: &mut AuditScope, be| {
audit_log!(audit, "Simple Modify"); audit_log!(audit, "Simple Modify");
future::ok(()) future::ok(())
}); });
@ -312,7 +308,7 @@ mod tests {
#[test] #[test]
fn test_simple_delete() { fn test_simple_delete() {
run_test!(|audit: &mut AuditEvent, be| { run_test!(|audit: &mut AuditScope, be| {
audit_log!(audit, "Simple Delete"); audit_log!(audit, "Simple Delete");
future::ok(()) future::ok(())
}); });

View file

@ -11,7 +11,7 @@ use super::config::Configuration;
use super::event::{CreateEvent, SearchEvent, SearchResult}; use super::event::{CreateEvent, SearchEvent, SearchResult};
use super::filter::Filter; use super::filter::Filter;
use super::log; use super::log;
use super::proto_v1::{CreateRequest, SearchRequest, Response, SearchResponse}; use super::proto_v1::{CreateRequest, Response, SearchRequest, SearchResponse};
use super::server; use super::server;
struct AppState { struct AppState {
@ -62,9 +62,9 @@ macro_rules! json_event_decode {
) )
.from_err() .from_err()
.and_then(|res| match res { .and_then(|res| match res {
Ok(event_result) => Ok(HttpResponse::Ok().json( Ok(event_result) => {
event_result.response() Ok(HttpResponse::Ok().json(event_result.response()))
)), }
Err(e) => Ok(HttpResponse::InternalServerError().json(e)), Err(e) => Ok(HttpResponse::InternalServerError().json(e)),
}); });
@ -110,7 +110,7 @@ fn class_list((_name, state): (Path<String>, State<AppState>)) -> FutureResponse
.from_err() .from_err()
.and_then(|res| match res { .and_then(|res| match res {
// What type is entry? // What type is entry?
Ok(search_result) => Ok(HttpResponse::Ok().json( search_result.response() )), Ok(search_result) => Ok(HttpResponse::Ok().json(search_result.response())),
// Ok(_) => Ok(HttpResponse::Ok().into()), // Ok(_) => Ok(HttpResponse::Ok().into()),
// Can we properly report this? // Can we properly report this?
Err(_) => Ok(HttpResponse::InternalServerError().into()), Err(_) => Ok(HttpResponse::InternalServerError().into()),

View file

@ -1,6 +1,6 @@
use super::filter::Filter; use super::filter::Filter;
use super::proto_v1::Entry as ProtoEntry; use super::proto_v1::Entry as ProtoEntry;
use super::proto_v1::{CreateRequest, SearchRequest, SearchResponse, Response}; use super::proto_v1::{CreateRequest, Response, SearchRequest, SearchResponse};
use actix::prelude::*; use actix::prelude::*;
use entry::Entry; use entry::Entry;
use error::OperationError; use error::OperationError;
@ -10,12 +10,11 @@ use error::OperationError;
// have it's own result type! // have it's own result type!
#[derive(Debug)] #[derive(Debug)]
pub struct OpResult { pub struct OpResult {}
}
impl OpResult { impl OpResult {
pub fn response(self) -> Response { pub fn response(self) -> Response {
Response{} Response {}
} }
} }
@ -28,19 +27,21 @@ impl SearchResult {
pub fn new(entries: Vec<Entry>) -> Self { pub fn new(entries: Vec<Entry>) -> Self {
SearchResult { SearchResult {
// FIXME: Can we consume this iter? // FIXME: Can we consume this iter?
entries: entries.iter().map(|e| { entries: entries
// FIXME: The issue here is this probably is applying transforms .iter()
// like access control ... May need to change. .map(|e| {
e.into() // FIXME: The issue here is this probably is applying transforms
// like access control ... May need to change.
}).collect() e.into()
})
.collect(),
} }
} }
// Consume self into a search response // Consume self into a search response
pub fn response(self) -> SearchResponse { pub fn response(self) -> SearchResponse {
SearchResponse { SearchResponse {
entries: self.entries entries: self.entries,
} }
} }
} }
@ -88,15 +89,11 @@ impl CreateEvent {
// From ProtoEntry -> Entry // From ProtoEntry -> Entry
// What is the correct consuming iterator here? Can we // What is the correct consuming iterator here? Can we
// even do that? // even do that?
entries: request.entries.iter().map(|e| entries: request.entries.iter().map(|e| Entry::from(e)).collect(),
Entry::from(e)
).collect(),
} }
} }
pub fn from_vec(entries: Vec<Entry>) -> Self { pub fn from_vec(entries: Vec<Entry>) -> Self {
CreateEvent { CreateEvent { entries: entries }
entries: entries
}
} }
} }

View file

@ -13,6 +13,7 @@ extern crate rusqlite;
extern crate uuid; extern crate uuid;
extern crate bytes; extern crate bytes;
extern crate chrono;
extern crate env_logger; extern crate env_logger;
// use actix::prelude::*; // use actix::prelude::*;

View file

@ -1,7 +1,7 @@
use actix::prelude::*; use actix::prelude::*;
use serde_json; use serde_json;
use super::audit::AuditEvent; use super::audit::AuditScope;
// Helper for internal logging. // Helper for internal logging.
#[macro_export] #[macro_export]
@ -62,10 +62,10 @@ impl Handler<LogEvent> for EventLog {
} }
} }
impl Handler<AuditEvent> for EventLog { impl Handler<AuditScope> for EventLog {
type Result = (); type Result = ();
fn handle(&mut self, event: AuditEvent, _: &mut SyncContext<Self>) -> Self::Result { fn handle(&mut self, event: AuditScope, _: &mut SyncContext<Self>) -> Self::Result {
let d = serde_json::to_string_pretty(&event).unwrap(); let d = serde_json::to_string_pretty(&event).unwrap();
println!("AUDIT: {}", d); println!("AUDIT: {}", d);
} }

View file

@ -26,7 +26,7 @@ pub struct Response {
impl Response { impl Response {
pub fn new(_: ()) -> Self { pub fn new(_: ()) -> Self {
Response{} Response {}
} }
} }
@ -48,9 +48,7 @@ pub struct SearchResponse {
impl SearchResponse { impl SearchResponse {
pub fn new(entries: Vec<Entry>) -> Self { pub fn new(entries: Vec<Entry>) -> Self {
SearchResponse { SearchResponse { entries: entries }
entries: entries
}
} }
} }

View file

@ -1,30 +1,36 @@
use actix::prelude::*; use actix::prelude::*;
use audit::AuditEvent; use audit::AuditScope;
use be::{Backend, BackendError}; use be::{Backend, BackendError};
use entry::Entry; use entry::Entry;
use error::OperationError; use error::OperationError;
use event::{CreateEvent, SearchEvent, SearchResult, OpResult}; use event::{CreateEvent, OpResult, SearchEvent, SearchResult};
use log::EventLog; use log::EventLog;
use schema::Schema; use schema::Schema;
pub fn start(log: actix::Addr<EventLog>, path: &str, threads: usize) -> actix::Addr<QueryServer> { pub fn start(log: actix::Addr<EventLog>, path: &str, threads: usize) -> actix::Addr<QueryServer> {
let mut audit = AuditEvent::new(); let mut audit = AuditScope::new("server_start");
audit.start_event("server_new"); audit_segment!(audit, || {
// Create the BE connection // Create the BE connection
// probably need a config type soon .... // probably need a config type soon ....
let be = Backend::new(&mut audit, path);
let mut schema = Schema::new(); // Create a new backend audit scope
schema.bootstrap_core(); let mut audit_be = AuditScope::new("backend_new");
// now we clone it out in the startup I think let be = Backend::new(&mut audit_be, path);
// Should the be need a log clone ref? or pass it around? audit.append_scope(audit_be);
// it probably needs it ...
audit.end_event("server_new"); let mut schema = Schema::new();
schema.bootstrap_core();
// now we clone it out in the startup I think
// Should the be need a log clone ref? or pass it around?
// it probably needs it ...
// audit.end_event("server_new");
SyncArbiter::start(threads, move || {
QueryServer::new(log.clone(), be.clone(), schema.clone())
})
});
log.do_send(audit); log.do_send(audit);
SyncArbiter::start(threads, move || {
QueryServer::new(log.clone(), be.clone(), schema.clone())
})
} }
// This is the core of the server. It implements all // This is the core of the server. It implements all
@ -60,22 +66,25 @@ impl QueryServer {
// applies all parts required in order and more. // applies all parts required in order and more.
pub fn search( pub fn search(
&mut self, &mut self,
au: &mut AuditEvent, au: &mut AuditScope,
se: &SearchEvent, se: &SearchEvent,
) -> Result<Vec<Entry>, OperationError> { ) -> Result<Vec<Entry>, OperationError> {
let mut audit_be = AuditScope::new("backend_search");
let res = self let res = self
.be .be
.search(au, &se.filter) .search(&mut audit_be, &se.filter)
.map(|r| r) .map(|r| r)
.map_err(|_| OperationError::Backend); .map_err(|_| OperationError::Backend);
// We'll add ACI later au.append_scope(audit_be);
// TODO: We'll add ACI later
res res
} }
// What should this take? // What should this take?
// This should probably take raw encoded entries? Or sohuld they // This should probably take raw encoded entries? Or sohuld they
// be handled by fe? // be handled by fe?
pub fn create(&mut self, au: &mut AuditEvent, ce: &CreateEvent) -> Result<(), OperationError> { pub fn create(&mut self, au: &mut AuditScope, ce: &CreateEvent) -> Result<(), OperationError> {
// Start a txn // Start a txn
// Run any pre checks // Run any pre checks
// FIXME: Normalise all entries incoming // FIXME: Normalise all entries incoming
@ -93,15 +102,17 @@ impl QueryServer {
return r; return r;
} }
let mut audit_be = AuditScope::new("backend_create");
// We may change from ce.entries later to something else? // We may change from ce.entries later to something else?
let res = self let res = self
.be .be
.create(au, &ce.entries) .create(&mut audit_be, &ce.entries)
.map(|_| ()) .map(|_| ())
.map_err(|e| match e { .map_err(|e| match e {
BackendError::EmptyRequest => OperationError::EmptyRequest, BackendError::EmptyRequest => OperationError::EmptyRequest,
_ => OperationError::Backend, _ => OperationError::Backend,
}); });
au.append_scope(audit_be);
// Run and post checks // Run and post checks
// Commit/Abort the txn // Commit/Abort the txn
@ -128,23 +139,24 @@ impl Handler<SearchEvent> for QueryServer {
type Result = Result<SearchResult, OperationError>; type Result = Result<SearchResult, OperationError>;
fn handle(&mut self, msg: SearchEvent, _: &mut Self::Context) -> Self::Result { fn handle(&mut self, msg: SearchEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditEvent::new(); let mut audit = AuditScope::new("search");
audit.start_event("search"); let res = audit_segment!(&mut audit, || {
audit_log!(audit, "Begin event {:?}", msg); audit_log!(audit, "Begin event {:?}", msg);
// Parse what we need from the event? // Parse what we need from the event?
// What kind of event is it? // What kind of event is it?
// In the future we'll likely change search event ... // In the future we'll likely change search event ...
// was this ok? // was this ok?
let res = match self.search(&mut audit, &msg) { match self.search(&mut audit, &msg) {
Ok(entries) => Ok(SearchResult::new(entries)), Ok(entries) => Ok(SearchResult::new(entries)),
Err(e) => Err(e), Err(e) => Err(e),
}; }
audit_log!(audit, "End event {:?}", msg); // audit_log!(audit, "End event {:?}", msg);
audit.end_event("search"); // audit.end_event("search");
});
// At the end of the event we send it for logging. // At the end of the event we send it for logging.
self.log.do_send(audit); self.log.do_send(audit);
res res
@ -155,17 +167,15 @@ impl Handler<CreateEvent> for QueryServer {
type Result = Result<OpResult, OperationError>; type Result = Result<OpResult, OperationError>;
fn handle(&mut self, msg: CreateEvent, _: &mut Self::Context) -> Self::Result { fn handle(&mut self, msg: CreateEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditEvent::new(); let mut audit = AuditScope::new("create");
audit.start_event("create"); let res = audit_segment!(&mut audit, || {
audit_log!(audit, "Begin create event {:?}", msg); audit_log!(audit, "Begin create event {:?}", msg);
let res = match self.create(&mut audit, &msg) { match self.create(&mut audit, &msg) {
Ok(()) => Ok(OpResult{}), Ok(()) => Ok(OpResult {}),
Err(e) => Err(e), Err(e) => Err(e),
}; }
});
audit_log!(audit, "End create event {:?} -> {:?}", msg, res);
audit.end_event("create");
// At the end of the event we send it for logging. // At the end of the event we send it for logging.
self.log.do_send(audit); self.log.do_send(audit);
// At the end of the event we send it for logging. // At the end of the event we send it for logging.
@ -186,21 +196,21 @@ mod tests {
extern crate tokio; extern crate tokio;
use super::super::audit::AuditEvent; use super::super::audit::AuditScope;
use super::super::be::Backend; use super::super::be::Backend;
use super::super::entry::Entry; use super::super::entry::Entry;
use super::super::event::{CreateEvent, SearchEvent}; use super::super::event::{CreateEvent, SearchEvent};
use super::super::filter::Filter; use super::super::filter::Filter;
use super::super::log; use super::super::log;
use super::super::proto_v1::{CreateRequest, SearchRequest};
use super::super::proto_v1::Entry as ProtoEntry; use super::super::proto_v1::Entry as ProtoEntry;
use super::super::proto_v1::{CreateRequest, SearchRequest};
use super::super::schema::Schema; use super::super::schema::Schema;
use super::super::server::QueryServer; use super::super::server::QueryServer;
macro_rules! run_test { macro_rules! run_test {
($test_fn:expr) => {{ ($test_fn:expr) => {{
System::run(|| { System::run(|| {
let mut audit = AuditEvent::new(); let mut audit = AuditScope::new("run_test");
let test_log = log::start(); let test_log = log::start();
let be = Backend::new(&mut audit, ""); let be = Backend::new(&mut audit, "");
@ -224,7 +234,7 @@ mod tests {
#[test] #[test]
fn test_be_create_user() { fn test_be_create_user() {
run_test!(|_log, mut server: QueryServer, audit: &mut AuditEvent| { run_test!(|_log, mut server: QueryServer, audit: &mut AuditScope| {
let filt = Filter::Pres(String::from("name")); let filt = Filter::Pres(String::from("name"));
let se1 = SearchEvent::from_request(SearchRequest::new(filt.clone())); let se1 = SearchEvent::from_request(SearchRequest::new(filt.clone()));
@ -264,5 +274,5 @@ mod tests {
// Test Create Empty // Test Create Empty
// //
} }

View file

@ -4,7 +4,7 @@ use actix::prelude::*;
extern crate rsidm; extern crate rsidm;
use rsidm::config::Configuration; use rsidm::config::Configuration;
use rsidm::core::create_server_core; use rsidm::core::create_server_core;
use rsidm::proto_v1::{CreateRequest, SearchRequest, SearchResponse, Response, Entry}; use rsidm::proto_v1::{CreateRequest, Entry, Response, SearchRequest, SearchResponse};
extern crate reqwest; extern crate reqwest;
@ -68,9 +68,7 @@ fn test_server_proto() {
) )
.unwrap(); .unwrap();
let c = CreateRequest { let c = CreateRequest { entries: vec![e] };
entries: vec![e]
};
let mut response = client let mut response = client
.post("http://127.0.0.1:8080/v1/create") .post("http://127.0.0.1:8080/v1/create")