Large rework of audit logging

This commit is contained in:
William Brown 2018-12-27 15:22:03 +10:00
parent 4f459465c1
commit fcda7d395d
12 changed files with 316 additions and 250 deletions

5
.dockerignore Normal file
View file

@ -0,0 +1,5 @@
target
.git
.gitignore
test.db

View file

@ -22,6 +22,8 @@ bytes = "0.4"
env_logger = "0.5"
reqwest = "0.9"
chrono = "0.4"
tokio = "0.1"
futures = "0.1"

24
Dockerfile Normal file
View file

@ -0,0 +1,24 @@
FROM opensuse/tumbleweed:latest
MAINTAINER william@blackhats.net.au
# /usr/bin/docker run --restart always --name lifx registry.blackhats.net.au/lifx
RUN echo HTTP_PROXY="http://proxy-bne1.net.blackhats.net.au:3128" > /etc/sysconfig/proxy
COPY . /home/rsidm/
WORKDIR /home/rsidm/
RUN zypper install -y timezone cargo rust rust-std gcc && \
RUSTC_BOOTSTRAP=1 cargo build --release && \
zypper rm -u -y cargo rust rust-std gcc && \
zypper clean
RUN cd /etc && \
ln -sf ../usr/share/zoneinfo/Australia/Brisbane localtime
RUN useradd -m -r rsidm
USER rsidm
ENV RUST_BACKTRACE 1
CMD ["/home/rsidm/target/release/rsidm"]

View file

@ -1,6 +1,10 @@
use actix::prelude::*;
use std::time::Duration;
use std::time::SystemTime;
use chrono::offset::Utc;
use chrono::DateTime;
#[macro_export]
macro_rules! audit_log {
($audit:expr, $($arg:tt)*) => ({
@ -9,7 +13,7 @@ macro_rules! audit_log {
print!("DEBUG AUDIT -> ");
println!($($arg)*)
}
$audit.raw_event(
$audit.log_event(
fmt::format(
format_args!($($arg)*)
)
@ -17,81 +21,114 @@ macro_rules! audit_log {
})
}
/*
* This should be used as:
* audit_segment(|au| {
* // au is the inner audit
* do your work
* audit_log!(au, ...?)
* nested_caller(&mut au, ...)
* })
*/
macro_rules! audit_segment {
($au:expr, $fun:expr) => {{
use std::time::Instant;
let start = Instant::now();
// start timer.
// run fun with our derived audit event.
let r = $fun();
// end timer, and diff
let end = Instant::now();
let diff = end.duration_since(start);
println!("diff: {:?}", diff);
// Return the result. Hope this works!
return r;
}};
}
#[derive(Debug, Serialize, Deserialize)]
struct AuditInner {
enum AuditEvent {
log(AuditLog),
scope(AuditScope),
}
#[derive(Debug, Serialize, Deserialize)]
struct AuditLog {
time: String,
name: String,
time: SystemTime,
}
// This structure tracks and event lifecycle, and is eventually
// sent to the logging system where it's structured and written
// out to the current logging BE.
#[derive(Debug, Serialize, Deserialize)]
pub struct AuditEvent {
pub struct AuditScope {
// vec of start/end points of various parts of the event?
// We probably need some functions for this. Is there a way in rust
// to automatically annotate line numbers of code?
events: Vec<AuditInner>,
time: String,
name: String,
duration: Option<Duration>,
events: Vec<AuditEvent>,
}
// Allow us to be sent to the log subsystem
impl Message for AuditEvent {
impl Message for AuditScope {
type Result = ();
}
impl AuditEvent {
pub fn new() -> Self {
AuditEvent { events: Vec::new() }
}
impl AuditScope {
pub fn new(name: &str) -> Self {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
pub fn start_event(&mut self, name: &str) {
self.events.push(AuditInner {
AuditScope {
time: datetime.to_rfc3339(),
name: String::from(name),
time: SystemTime::now(),
})
duration: None,
events: Vec::new(),
}
}
pub fn raw_event(&mut self, data: String) {
self.events.push(AuditInner {
// Given a new audit event, append it in.
pub fn append_scope(&mut self, scope: AuditScope) {
self.events.push(AuditEvent::scope(scope))
}
pub fn log_event(&mut self, data: String) {
let t_now = SystemTime::now();
let datetime: DateTime<Utc> = t_now.into();
self.events.push(AuditEvent::log(AuditLog {
time: datetime.to_rfc3339(),
name: data,
time: SystemTime::now(),
})
}
pub fn end_event(&mut self, name: &str) {
self.events.push(AuditInner {
name: String::from(name),
time: SystemTime::now(),
})
}))
}
}
#[cfg(test)]
mod tests {
use super::AuditEvent;
use super::AuditScope;
// Create and remove. Perhaps add some core details?
#[test]
fn test_audit_simple() {
let mut au = AuditEvent::new();
au.start_event("test");
au.end_event("test");
let mut au = AuditScope::new("au");
let d = serde_json::to_string_pretty(&au).unwrap();
println!("{}", d);
}
fn test_audit_nested_inner(au: &mut AuditEvent) {
au.start_event("inner");
au.end_event("inner");
}
fn test_audit_nested_inner(au: &mut AuditScope) {}
// Test calling nested functions and getting the details added correctly?
#[test]
fn test_audit_nested() {
let mut au = AuditEvent::new();
au.start_event("test");
let mut au = AuditScope::new("au");
test_audit_nested_inner(&mut au);
au.end_event("test");
let d = serde_json::to_string_pretty(&au).unwrap();
println!("{}", d);
}
@ -99,9 +136,7 @@ mod tests {
// Test failing to close an event
#[test]
fn test_audit_no_close() {
let mut au = AuditEvent::new();
au.start_event("test");
au.start_event("inner");
let mut au = AuditScope::new("au");
let d = serde_json::to_string_pretty(&au).unwrap();
println!("{}", d);
}

View file

@ -7,7 +7,7 @@ use rusqlite::NO_PARAMS;
use serde_json;
// use uuid;
use super::audit::AuditEvent;
use super::audit::AuditScope;
use super::entry::Entry;
use super::filter::Filter;
@ -18,14 +18,14 @@ mod sqlite_be;
// This contacts the needed backend and starts it up
#[derive(Debug, PartialEq)]
pub struct BackendAuditEvent {
pub struct BackendAuditScope {
time_start: (),
time_end: (),
}
impl BackendAuditEvent {
impl BackendAuditScope {
pub fn new() -> Self {
BackendAuditEvent {
BackendAuditScope {
time_start: (),
time_end: (),
}
@ -58,103 +58,100 @@ pub struct Backend {
// In the future this will do the routing between the chosen backends etc.
impl Backend {
pub fn new(audit: &mut AuditEvent, path: &str) -> Self {
pub fn new(audit: &mut AuditScope, path: &str) -> Self {
// this has a ::memory() type, but will path == "" work?
audit.start_event("backend_new");
let manager = SqliteConnectionManager::file(path);
let builder1 = Pool::builder();
let builder2 = if path == "" {
builder1.max_size(1)
} else {
// FIXME: Make this configurable
builder1.max_size(8)
};
// Look at max_size and thread_pool here for perf later
let pool = builder2.build(manager).expect("Failed to create pool");
audit_segment!(audit, || {
let manager = SqliteConnectionManager::file(path);
let builder1 = Pool::builder();
let builder2 = if path == "" {
builder1.max_size(1)
} else {
// FIXME: Make this configurable
builder1.max_size(8)
};
// Look at max_size and thread_pool here for perf later
let pool = builder2.build(manager).expect("Failed to create pool");
{
let conn = pool.get().unwrap();
// Perform any migrations as required?
// I think we only need the core table here, indexing will do it's own
// thing later
// conn.execute("PRAGMA journal_mode=WAL;", NO_PARAMS).unwrap();
conn.execute(
"CREATE TABLE IF NOT EXISTS id2entry (
id INTEGER PRIMARY KEY ASC,
data TEXT NOT NULL
{
let conn = pool.get().unwrap();
// Perform any migrations as required?
// I think we only need the core table here, indexing will do it's own
// thing later
// conn.execute("PRAGMA journal_mode=WAL;", NO_PARAMS).unwrap();
conn.execute(
"CREATE TABLE IF NOT EXISTS id2entry (
id INTEGER PRIMARY KEY ASC,
data TEXT NOT NULL
)
",
NO_PARAMS,
)
",
NO_PARAMS,
)
.unwrap();
.unwrap();
// Create a version table for migration indication
// Create a version table for migration indication
// Create the core db
}
// Create the core db
}
audit_log!(audit, "Starting DB workers ...");
audit.end_event("backend_new");
Backend { pool: pool }
Backend { pool: pool }
})
}
pub fn create(
&mut self,
au: &mut AuditEvent,
au: &mut AuditScope,
entries: &Vec<Entry>,
) -> Result<BackendAuditEvent, BackendError> {
au.start_event("be_create");
) -> Result<BackendAuditScope, BackendError> {
audit_segment!(au, || {
let be_audit = BackendAuditScope::new();
// Start be audit timer
let be_audit = BackendAuditEvent::new();
// Start be audit timer
if entries.is_empty() {
// TODO: Better error
// End the timer
return Err(BackendError::EmptyRequest);
}
// Turn all the entries into relevent json/cbor types
// we do this outside the txn to avoid blocking needlessly.
// However, it could be pointless due to the extra string allocs ...
let ser_entries: Vec<String> = entries
.iter()
.map(|val| {
// TODO: Should we do better than unwrap?
serde_json::to_string(&val).unwrap()
})
.collect();
audit_log!(au, "serialising: {:?}", ser_entries);
// THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION
{
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
// write them all
for ser_entry in ser_entries {
conn.execute(
"INSERT INTO id2entry (data) VALUES (?1)",
&[&ser_entry as &ToSql],
)
.unwrap();
if entries.is_empty() {
// TODO: Better error
// End the timer
return Err(BackendError::EmptyRequest);
}
// TODO: update indexes (as needed)
// Commit the txn
conn.execute("COMMIT TRANSACTION", NO_PARAMS).unwrap();
}
// Turn all the entries into relevent json/cbor types
// we do this outside the txn to avoid blocking needlessly.
// However, it could be pointless due to the extra string allocs ...
au.end_event("be_create");
// End the timer?
Ok(be_audit)
let ser_entries: Vec<String> = entries
.iter()
.map(|val| {
// TODO: Should we do better than unwrap?
serde_json::to_string(&val).unwrap()
})
.collect();
audit_log!(au, "serialising: {:?}", ser_entries);
// THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION
{
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
// write them all
for ser_entry in ser_entries {
conn.execute(
"INSERT INTO id2entry (data) VALUES (?1)",
&[&ser_entry as &ToSql],
)
.unwrap();
}
// TODO: update indexes (as needed)
// Commit the txn
conn.execute("COMMIT TRANSACTION", NO_PARAMS).unwrap();
}
Ok(be_audit)
})
}
// Take filter, and AuditEvent ref?
pub fn search(&self, au: &mut AuditEvent, filt: &Filter) -> Result<Vec<Entry>, BackendError> {
// Take filter, and AuditScope ref?
pub fn search(&self, au: &mut AuditScope, filt: &Filter) -> Result<Vec<Entry>, BackendError> {
// Do things
// Alloc a vec for the entries.
// FIXME: Make this actually a good size for the result set ...
@ -164,48 +161,47 @@ impl Backend {
// possible) to create the candidate set.
// Unlike DS, even if we don't get the index back, we can just pass
// to the in-memory filter test and be done.
au.start_event("be_search");
audit_segment!(au, || {
let mut raw_entries: Vec<String> = Vec::new();
{
// Actually do a search now!
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
let mut raw_entries: Vec<String> = Vec::new();
{
// Actually do a search now!
let conn = self.pool.get().unwrap();
// Start a txn
conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap();
// read them all
let mut stmt = conn.prepare("SELECT id, data FROM id2entry").unwrap();
let id2entry_iter = stmt
.query_map(NO_PARAMS, |row| IdEntry {
id: row.get(0),
data: row.get(1),
})
.unwrap();
for row in id2entry_iter {
audit_log!(au, "raw entry: {:?}", row);
// FIXME: Handle this properly.
raw_entries.push(row.unwrap().data);
}
// Rollback, we should have done nothing.
conn.execute("ROLLBACK TRANSACTION", NO_PARAMS).unwrap();
}
// Do other things
// Now, de-serialise the raw_entries back to entries
let entries: Vec<Entry> = raw_entries
.iter()
.filter_map(|val| {
// TODO: Should we do better than unwrap?
let e: Entry = serde_json::from_str(val.as_str()).unwrap();
if filt.entry_match_no_index(&e) {
Some(e)
} else {
None
// read them all
let mut stmt = conn.prepare("SELECT id, data FROM id2entry").unwrap();
let id2entry_iter = stmt
.query_map(NO_PARAMS, |row| IdEntry {
id: row.get(0),
data: row.get(1),
})
.unwrap();
for row in id2entry_iter {
audit_log!(au, "raw entry: {:?}", row);
// FIXME: Handle this properly.
raw_entries.push(row.unwrap().data);
}
})
.collect();
// Rollback, we should have done nothing.
conn.execute("ROLLBACK TRANSACTION", NO_PARAMS).unwrap();
}
// Do other things
// Now, de-serialise the raw_entries back to entries
let entries: Vec<Entry> = raw_entries
.iter()
.filter_map(|val| {
// TODO: Should we do better than unwrap?
let e: Entry = serde_json::from_str(val.as_str()).unwrap();
if filt.entry_match_no_index(&e) {
Some(e)
} else {
None
}
})
.collect();
au.end_event("be_search");
Ok(entries)
Ok(entries)
})
}
pub fn modify() {}
@ -235,7 +231,7 @@ mod tests {
extern crate tokio;
use super::super::audit::AuditEvent;
use super::super::audit::AuditScope;
use super::super::entry::Entry;
use super::super::filter::Filter;
use super::super::log;
@ -244,7 +240,7 @@ mod tests {
macro_rules! run_test {
($test_fn:expr) => {{
System::run(|| {
let mut audit = AuditEvent::new();
let mut audit = AuditScope::new("run_test");
let test_log = log::start();
@ -266,7 +262,7 @@ mod tests {
#[test]
fn test_simple_create() {
run_test!(|audit: &mut AuditEvent, mut be: Backend| {
run_test!(|audit: &mut AuditScope, mut be: Backend| {
audit_log!(audit, "Simple Create");
let empty_result = be.create(audit, &Vec::new());
@ -296,7 +292,7 @@ mod tests {
#[test]
fn test_simple_search() {
run_test!(|audit: &mut AuditEvent, be| {
run_test!(|audit: &mut AuditScope, be| {
audit_log!(audit, "Simple Search");
future::ok(())
});
@ -304,7 +300,7 @@ mod tests {
#[test]
fn test_simple_modify() {
run_test!(|audit: &mut AuditEvent, be| {
run_test!(|audit: &mut AuditScope, be| {
audit_log!(audit, "Simple Modify");
future::ok(())
});
@ -312,7 +308,7 @@ mod tests {
#[test]
fn test_simple_delete() {
run_test!(|audit: &mut AuditEvent, be| {
run_test!(|audit: &mut AuditScope, be| {
audit_log!(audit, "Simple Delete");
future::ok(())
});

View file

@ -11,7 +11,7 @@ use super::config::Configuration;
use super::event::{CreateEvent, SearchEvent, SearchResult};
use super::filter::Filter;
use super::log;
use super::proto_v1::{CreateRequest, SearchRequest, Response, SearchResponse};
use super::proto_v1::{CreateRequest, Response, SearchRequest, SearchResponse};
use super::server;
struct AppState {
@ -62,9 +62,9 @@ macro_rules! json_event_decode {
)
.from_err()
.and_then(|res| match res {
Ok(event_result) => Ok(HttpResponse::Ok().json(
event_result.response()
)),
Ok(event_result) => {
Ok(HttpResponse::Ok().json(event_result.response()))
}
Err(e) => Ok(HttpResponse::InternalServerError().json(e)),
});
@ -110,7 +110,7 @@ fn class_list((_name, state): (Path<String>, State<AppState>)) -> FutureResponse
.from_err()
.and_then(|res| match res {
// What type is entry?
Ok(search_result) => Ok(HttpResponse::Ok().json( search_result.response() )),
Ok(search_result) => Ok(HttpResponse::Ok().json(search_result.response())),
// Ok(_) => Ok(HttpResponse::Ok().into()),
// Can we properly report this?
Err(_) => Ok(HttpResponse::InternalServerError().into()),

View file

@ -1,6 +1,6 @@
use super::filter::Filter;
use super::proto_v1::Entry as ProtoEntry;
use super::proto_v1::{CreateRequest, SearchRequest, SearchResponse, Response};
use super::proto_v1::{CreateRequest, Response, SearchRequest, SearchResponse};
use actix::prelude::*;
use entry::Entry;
use error::OperationError;
@ -10,12 +10,11 @@ use error::OperationError;
// have it's own result type!
#[derive(Debug)]
pub struct OpResult {
}
pub struct OpResult {}
impl OpResult {
pub fn response(self) -> Response {
Response{}
Response {}
}
}
@ -28,19 +27,21 @@ impl SearchResult {
pub fn new(entries: Vec<Entry>) -> Self {
SearchResult {
// FIXME: Can we consume this iter?
entries: entries.iter().map(|e| {
// FIXME: The issue here is this probably is applying transforms
// like access control ... May need to change.
e.into()
}).collect()
entries: entries
.iter()
.map(|e| {
// FIXME: The issue here is this probably is applying transforms
// like access control ... May need to change.
e.into()
})
.collect(),
}
}
// Consume self into a search response
pub fn response(self) -> SearchResponse {
SearchResponse {
entries: self.entries
entries: self.entries,
}
}
}
@ -88,15 +89,11 @@ impl CreateEvent {
// From ProtoEntry -> Entry
// What is the correct consuming iterator here? Can we
// even do that?
entries: request.entries.iter().map(|e|
Entry::from(e)
).collect(),
entries: request.entries.iter().map(|e| Entry::from(e)).collect(),
}
}
pub fn from_vec(entries: Vec<Entry>) -> Self {
CreateEvent {
entries: entries
}
CreateEvent { entries: entries }
}
}

View file

@ -13,6 +13,7 @@ extern crate rusqlite;
extern crate uuid;
extern crate bytes;
extern crate chrono;
extern crate env_logger;
// use actix::prelude::*;

View file

@ -1,7 +1,7 @@
use actix::prelude::*;
use serde_json;
use super::audit::AuditEvent;
use super::audit::AuditScope;
// Helper for internal logging.
#[macro_export]
@ -62,10 +62,10 @@ impl Handler<LogEvent> for EventLog {
}
}
impl Handler<AuditEvent> for EventLog {
impl Handler<AuditScope> for EventLog {
type Result = ();
fn handle(&mut self, event: AuditEvent, _: &mut SyncContext<Self>) -> Self::Result {
fn handle(&mut self, event: AuditScope, _: &mut SyncContext<Self>) -> Self::Result {
let d = serde_json::to_string_pretty(&event).unwrap();
println!("AUDIT: {}", d);
}

View file

@ -26,7 +26,7 @@ pub struct Response {
impl Response {
pub fn new(_: ()) -> Self {
Response{}
Response {}
}
}
@ -48,9 +48,7 @@ pub struct SearchResponse {
impl SearchResponse {
pub fn new(entries: Vec<Entry>) -> Self {
SearchResponse {
entries: entries
}
SearchResponse { entries: entries }
}
}

View file

@ -1,30 +1,36 @@
use actix::prelude::*;
use audit::AuditEvent;
use audit::AuditScope;
use be::{Backend, BackendError};
use entry::Entry;
use error::OperationError;
use event::{CreateEvent, SearchEvent, SearchResult, OpResult};
use event::{CreateEvent, OpResult, SearchEvent, SearchResult};
use log::EventLog;
use schema::Schema;
pub fn start(log: actix::Addr<EventLog>, path: &str, threads: usize) -> actix::Addr<QueryServer> {
let mut audit = AuditEvent::new();
audit.start_event("server_new");
// Create the BE connection
// probably need a config type soon ....
let be = Backend::new(&mut audit, path);
let mut schema = Schema::new();
schema.bootstrap_core();
// now we clone it out in the startup I think
// Should the be need a log clone ref? or pass it around?
// it probably needs it ...
audit.end_event("server_new");
let mut audit = AuditScope::new("server_start");
audit_segment!(audit, || {
// Create the BE connection
// probably need a config type soon ....
// Create a new backend audit scope
let mut audit_be = AuditScope::new("backend_new");
let be = Backend::new(&mut audit_be, path);
audit.append_scope(audit_be);
let mut schema = Schema::new();
schema.bootstrap_core();
// now we clone it out in the startup I think
// Should the be need a log clone ref? or pass it around?
// it probably needs it ...
// audit.end_event("server_new");
SyncArbiter::start(threads, move || {
QueryServer::new(log.clone(), be.clone(), schema.clone())
})
});
log.do_send(audit);
SyncArbiter::start(threads, move || {
QueryServer::new(log.clone(), be.clone(), schema.clone())
})
}
// This is the core of the server. It implements all
@ -60,22 +66,25 @@ impl QueryServer {
// applies all parts required in order and more.
pub fn search(
&mut self,
au: &mut AuditEvent,
au: &mut AuditScope,
se: &SearchEvent,
) -> Result<Vec<Entry>, OperationError> {
let mut audit_be = AuditScope::new("backend_search");
let res = self
.be
.search(au, &se.filter)
.search(&mut audit_be, &se.filter)
.map(|r| r)
.map_err(|_| OperationError::Backend);
// We'll add ACI later
au.append_scope(audit_be);
// TODO: We'll add ACI later
res
}
// What should this take?
// This should probably take raw encoded entries? Or sohuld they
// be handled by fe?
pub fn create(&mut self, au: &mut AuditEvent, ce: &CreateEvent) -> Result<(), OperationError> {
pub fn create(&mut self, au: &mut AuditScope, ce: &CreateEvent) -> Result<(), OperationError> {
// Start a txn
// Run any pre checks
// FIXME: Normalise all entries incoming
@ -93,15 +102,17 @@ impl QueryServer {
return r;
}
let mut audit_be = AuditScope::new("backend_create");
// We may change from ce.entries later to something else?
let res = self
.be
.create(au, &ce.entries)
.create(&mut audit_be, &ce.entries)
.map(|_| ())
.map_err(|e| match e {
BackendError::EmptyRequest => OperationError::EmptyRequest,
_ => OperationError::Backend,
});
au.append_scope(audit_be);
// Run and post checks
// Commit/Abort the txn
@ -128,23 +139,24 @@ impl Handler<SearchEvent> for QueryServer {
type Result = Result<SearchResult, OperationError>;
fn handle(&mut self, msg: SearchEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditEvent::new();
audit.start_event("search");
audit_log!(audit, "Begin event {:?}", msg);
let mut audit = AuditScope::new("search");
let res = audit_segment!(&mut audit, || {
audit_log!(audit, "Begin event {:?}", msg);
// Parse what we need from the event?
// What kind of event is it?
// Parse what we need from the event?
// What kind of event is it?
// In the future we'll likely change search event ...
// In the future we'll likely change search event ...
// was this ok?
let res = match self.search(&mut audit, &msg) {
Ok(entries) => Ok(SearchResult::new(entries)),
Err(e) => Err(e),
};
// was this ok?
match self.search(&mut audit, &msg) {
Ok(entries) => Ok(SearchResult::new(entries)),
Err(e) => Err(e),
}
audit_log!(audit, "End event {:?}", msg);
audit.end_event("search");
// audit_log!(audit, "End event {:?}", msg);
// audit.end_event("search");
});
// At the end of the event we send it for logging.
self.log.do_send(audit);
res
@ -155,17 +167,15 @@ impl Handler<CreateEvent> for QueryServer {
type Result = Result<OpResult, OperationError>;
fn handle(&mut self, msg: CreateEvent, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditEvent::new();
audit.start_event("create");
audit_log!(audit, "Begin create event {:?}", msg);
let mut audit = AuditScope::new("create");
let res = audit_segment!(&mut audit, || {
audit_log!(audit, "Begin create event {:?}", msg);
let res = match self.create(&mut audit, &msg) {
Ok(()) => Ok(OpResult{}),
Err(e) => Err(e),
};
audit_log!(audit, "End create event {:?} -> {:?}", msg, res);
audit.end_event("create");
match self.create(&mut audit, &msg) {
Ok(()) => Ok(OpResult {}),
Err(e) => Err(e),
}
});
// At the end of the event we send it for logging.
self.log.do_send(audit);
// At the end of the event we send it for logging.
@ -186,21 +196,21 @@ mod tests {
extern crate tokio;
use super::super::audit::AuditEvent;
use super::super::audit::AuditScope;
use super::super::be::Backend;
use super::super::entry::Entry;
use super::super::event::{CreateEvent, SearchEvent};
use super::super::filter::Filter;
use super::super::log;
use super::super::proto_v1::{CreateRequest, SearchRequest};
use super::super::proto_v1::Entry as ProtoEntry;
use super::super::proto_v1::{CreateRequest, SearchRequest};
use super::super::schema::Schema;
use super::super::server::QueryServer;
macro_rules! run_test {
($test_fn:expr) => {{
System::run(|| {
let mut audit = AuditEvent::new();
let mut audit = AuditScope::new("run_test");
let test_log = log::start();
let be = Backend::new(&mut audit, "");
@ -224,7 +234,7 @@ mod tests {
#[test]
fn test_be_create_user() {
run_test!(|_log, mut server: QueryServer, audit: &mut AuditEvent| {
run_test!(|_log, mut server: QueryServer, audit: &mut AuditScope| {
let filt = Filter::Pres(String::from("name"));
let se1 = SearchEvent::from_request(SearchRequest::new(filt.clone()));
@ -264,5 +274,5 @@ mod tests {
// Test Create Empty
//
//
}

View file

@ -4,7 +4,7 @@ use actix::prelude::*;
extern crate rsidm;
use rsidm::config::Configuration;
use rsidm::core::create_server_core;
use rsidm::proto_v1::{CreateRequest, SearchRequest, SearchResponse, Response, Entry};
use rsidm::proto_v1::{CreateRequest, Entry, Response, SearchRequest, SearchResponse};
extern crate reqwest;
@ -68,9 +68,7 @@ fn test_server_proto() {
)
.unwrap();
let c = CreateRequest {
entries: vec![e]
};
let c = CreateRequest { entries: vec![e] };
let mut response = client
.post("http://127.0.0.1:8080/v1/create")