diff --git a/src/lib/audit.rs b/src/lib/audit.rs index 9ff870089..dba52b7e7 100644 --- a/src/lib/audit.rs +++ b/src/lib/audit.rs @@ -85,7 +85,7 @@ impl Message for AuditScope { impl fmt::Display for AuditScope { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut depth = 0; + let mut _depth = 0; // write!(f, "{}: begin -> {}", self.time, self.name); let d = serde_json::to_string_pretty(self).unwrap(); write!(f, "{}", d) diff --git a/src/lib/be/mod.rs b/src/lib/be/mod.rs index 9843f9c6d..133fbed32 100644 --- a/src/lib/be/mod.rs +++ b/src/lib/be/mod.rs @@ -10,6 +10,7 @@ use serde_json; use audit::AuditScope; use entry::{Entry, EntryCommitted, EntryNew, EntryValid}; use filter::{Filter, FilterValid}; +use error::OperationError; mod idl; mod mem_be; @@ -392,7 +393,7 @@ impl BackendWriteTransaction { unimplemented!() } - pub fn commit(mut self) -> Result<(), ()> { + pub fn commit(mut self) -> Result<(), OperationError> { println!("Commiting txn"); assert!(!self.committed); self.committed = true; @@ -401,7 +402,7 @@ impl BackendWriteTransaction { .map(|_| ()) .map_err(|e| { println!("{:?}", e); - () + OperationError::BackendEngine }) } @@ -490,7 +491,7 @@ impl BackendWriteTransaction { // In the future this will do the routing between the chosen backends etc. impl Backend { - pub fn new(audit: &mut AuditScope, path: &str) -> Result { + pub fn new(audit: &mut AuditScope, path: &str) -> Result { // this has a ::memory() type, but will path == "" work? audit_segment!(audit, || { let manager = SqliteConnectionManager::file(path); @@ -755,4 +756,12 @@ mod tests { assert!(be.delete(audit, &vec![r2.clone(), r3.clone()]).is_ok()); }); } + + #[test] + fn test_backup_restore() { + run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| { + be.restore(); + be.backup(); + }); + ) } diff --git a/src/lib/core.rs b/src/lib/core.rs index 6b9a21a38..881745147 100644 --- a/src/lib/core.rs +++ b/src/lib/core.rs @@ -2,8 +2,8 @@ use actix::Actor; use actix_web::middleware::session::{self, RequestSession}; use actix_web::{ - error, http, middleware, App, AsyncResponder, Error, FutureResponse, HttpMessage, HttpRequest, - HttpResponse, Path, Result, State, + error, http, middleware, App, Error, HttpMessage, HttpRequest, + HttpResponse, Result, State, }; use bytes::BytesMut; @@ -13,11 +13,10 @@ use super::config::Configuration; // SearchResult use super::event::{AuthEvent, CreateEvent, DeleteEvent, ModifyEvent, SearchEvent}; -use super::filter::Filter; use super::interval::IntervalActor; use super::log; use super::proto_v1::{ - AuthRequest, AuthResponse, CreateRequest, DeleteRequest, ModifyRequest, SearchRequest, + AuthRequest, CreateRequest, DeleteRequest, ModifyRequest, SearchRequest, }; use super::server; diff --git a/src/lib/entry.rs b/src/lib/entry.rs index 16d7fe68f..5c04e5f07 100644 --- a/src/lib/entry.rs +++ b/src/lib/entry.rs @@ -140,6 +140,7 @@ pub struct Entry { } impl Entry { + /* pub fn new() -> Self { Entry { // This means NEVER COMMITED @@ -149,6 +150,7 @@ impl Entry { attrs: BTreeMap::new(), } } + */ // FIXME: Can we consume protoentry? pub fn from(e: &ProtoEntry) -> Self { @@ -182,7 +184,7 @@ impl Entry { // We need to clone before we start, as well be mutating content. // We destructure: let Entry { - valid, + valid: _, state, id, attrs, @@ -356,6 +358,7 @@ impl Entry { } } + #[cfg(test)] pub unsafe fn to_valid_committed(self) -> Entry { Entry { valid: EntryValid, @@ -514,6 +517,7 @@ impl Entry { mods.push_mod(Modify::Purged(k.clone())); } } + // TODO: Do something with this error properly. Err(e) => return Err(()), } for v in vs { diff --git a/src/lib/error.rs b/src/lib/error.rs index 3e496021c..3c1fd0d88 100644 --- a/src/lib/error.rs +++ b/src/lib/error.rs @@ -1,4 +1,6 @@ -#[derive(Debug, PartialEq)] +// use rusqlite::Error as RusqliteError; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] pub enum SchemaError { NotImplemented, InvalidClass, @@ -16,10 +18,11 @@ pub enum OperationError { EmptyRequest, Backend, NoMatchingEntries, - SchemaViolation, + SchemaViolation(SchemaError), Plugin, FilterGeneration, InvalidDBState, InvalidRequestState, InvalidState, + BackendEngine, } diff --git a/src/lib/filter.rs b/src/lib/filter.rs index 83ac227bf..f6cf624e5 100644 --- a/src/lib/filter.rs +++ b/src/lib/filter.rs @@ -4,8 +4,7 @@ use error::SchemaError; use proto_v1::Filter as ProtoFilter; -use regex::Regex; -use schema::{SchemaAttribute, SchemaClass, SchemaReadTransaction}; +use schema::{SchemaReadTransaction}; use std::cmp::{Ordering, PartialOrd}; use std::marker::PhantomData; @@ -177,7 +176,7 @@ impl Filter { ProtoFilter::Pres(a) => Filter::Pres(a.clone()), ProtoFilter::Or(l) => Filter::Or(l.iter().map(|f| Self::from(f)).collect()), ProtoFilter::And(l) => Filter::And(l.iter().map(|f| Self::from(f)).collect()), - ProtoFilter::AndNot(l) => Filter::AndNot(Box::new(Self::from(f))), + ProtoFilter::AndNot(l) => Filter::AndNot(Box::new(Self::from(l))), } } } diff --git a/src/lib/plugins/base.rs b/src/lib/plugins/base.rs index a76a5de40..bdce1fdc3 100644 --- a/src/lib/plugins/base.rs +++ b/src/lib/plugins/base.rs @@ -2,12 +2,12 @@ use plugins::Plugin; use uuid::Uuid; use audit::AuditScope; -use be::{BackendReadTransaction, BackendTransaction, BackendWriteTransaction}; +use be::{BackendReadTransaction, BackendWriteTransaction}; use entry::{Entry, EntryInvalid, EntryNew}; use error::OperationError; use event::CreateEvent; use filter::Filter; -use schema::{SchemaTransaction, SchemaWriteTransaction}; +use schema::{SchemaWriteTransaction}; // TO FINISH /* diff --git a/src/lib/plugins/mod.rs b/src/lib/plugins/mod.rs index 6d3433f6a..ae4080707 100644 --- a/src/lib/plugins/mod.rs +++ b/src/lib/plugins/mod.rs @@ -1,9 +1,9 @@ use audit::AuditScope; -use be::{BackendTransaction, BackendWriteTransaction}; +use be::{BackendWriteTransaction}; use entry::{Entry, EntryInvalid, EntryNew}; use error::OperationError; use event::CreateEvent; -use schema::{SchemaTransaction, SchemaWriteTransaction}; +use schema::{SchemaWriteTransaction}; mod base; mod protected; diff --git a/src/lib/schema.rs b/src/lib/schema.rs index 32dee9af4..71723f738 100644 --- a/src/lib/schema.rs +++ b/src/lib/schema.rs @@ -1,7 +1,7 @@ use super::audit::AuditScope; use super::constants::*; // use super::entry::Entry; -use super::error::SchemaError; +use super::error::{SchemaError, OperationError}; // use super::filter::Filter; use std::collections::HashMap; // Apparently this is nightly only? @@ -289,7 +289,7 @@ pub struct SchemaInner { pub trait SchemaReadTransaction { fn get_inner(&self) -> &SchemaInner; - fn validate(&self, audit: &mut AuditScope) -> Result<(), ()> { + fn validate(&self, audit: &mut AuditScope) -> Result<(), OperationError> { self.get_inner().validate(audit) } @@ -314,7 +314,7 @@ pub trait SchemaReadTransaction { } impl SchemaInner { - pub fn new(audit: &mut AuditScope) -> Result { + pub fn new(audit: &mut AuditScope) -> Result { let mut au = AuditScope::new("schema_new"); let r = audit_segment!(au, || { // @@ -643,7 +643,7 @@ impl SchemaInner { } // This shouldn't fail? - pub fn bootstrap_core(&mut self, audit: &mut AuditScope) -> Result<(), ()> { + pub fn bootstrap_core(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { // This will create a set of sane, system core schema that we can use // main types are users, groups let mut au = AuditScope::new("schema_bootstrap_core"); @@ -845,9 +845,7 @@ impl SchemaInner { r } - pub fn validate(&self, audit: &mut AuditScope) -> Result<(), ()> { - // FIXME: How can we make this return a proper result? - // + pub fn validate(&self, audit: &mut AuditScope) -> Result<(), OperationError> { // TODO: Does this need to validate anything further at all? The UUID // will be checked as part of the schema migration on startup, so I think // just that all the content is sane is fine. @@ -862,14 +860,14 @@ impl SchemaInner { a ); if !self.attributes.contains_key(a) { - return Err(()); + return Err(OperationError::SchemaViolation(SchemaError::Corrupted)); } } for a in &class.may { // report the attribute. audit_log!(audit, "validate may class:attr -> {}:{}", class.name, a); if !self.attributes.contains_key(a) { - return Err(()); + return Err(OperationError::SchemaViolation(SchemaError::Corrupted)); } } for a in &class.systemmust { @@ -881,14 +879,14 @@ impl SchemaInner { a ); if !self.attributes.contains_key(a) { - return Err(()); + return Err(OperationError::SchemaViolation(SchemaError::Corrupted)); } } for a in &class.must { // report the attribute. audit_log!(audit, "validate must class:attr -> {}:{}", class.name, a); if !self.attributes.contains_key(a) { - return Err(()); + return Err(OperationError::SchemaViolation(SchemaError::Corrupted)); } } } @@ -924,7 +922,7 @@ pub struct SchemaWriteTransaction<'a> { } impl<'a> SchemaWriteTransaction<'a> { - pub fn bootstrap_core(&mut self, audit: &mut AuditScope) -> Result<(), ()> { + pub fn bootstrap_core(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> { self.inner.bootstrap_core(audit) } @@ -933,8 +931,9 @@ impl<'a> SchemaWriteTransaction<'a> { // first, then schema to ensure that the be content matches our schema. Saying this, if your // schema commit fails we need to roll back still .... How great are transactions. // At the least, this is what validation is for! - pub fn commit(self) { + pub fn commit(self) -> Result<(), OperationError> { self.inner.commit(); + Ok(()) } } @@ -957,7 +956,7 @@ impl SchemaReadTransaction for SchemaTransaction { } impl Schema { - pub fn new(audit: &mut AuditScope) -> Result { + pub fn new(audit: &mut AuditScope) -> Result { SchemaInner::new(audit).map(|si| Schema { inner: CowCell::new(si), }) diff --git a/src/lib/server.rs b/src/lib/server.rs index bfaeca3fa..343426a6f 100644 --- a/src/lib/server.rs +++ b/src/lib/server.rs @@ -110,7 +110,8 @@ pub trait QueryServerReadTransaction { // TODO: Validate the filter let vf = match se.filter.validate(self.get_schema()) { Ok(f) => f, - Err(e) => return Err(OperationError::SchemaViolation), + // TODO: Do something with this error + Err(e) => return Err(OperationError::SchemaViolation(e)), }; // TODO: Assert access control allows the filter and requested attrs. @@ -139,7 +140,8 @@ pub trait QueryServerReadTransaction { // How to get schema? let vf = match ee.filter.validate(self.get_schema()) { Ok(f) => f, - Err(e) => return Err(OperationError::SchemaViolation), + // TODO: Do something with this error + Err(e) => return Err(OperationError::SchemaViolation(e)), }; let res = self @@ -316,6 +318,7 @@ impl<'a> QueryServerWriteTransaction<'a> { return plug_pre_res; } + // TODO: Rework this to be better. let (norm_cand, invalid_cand): ( Vec, _>>, Vec>, @@ -328,8 +331,8 @@ impl<'a> QueryServerWriteTransaction<'a> { audit_log!(au, "Schema Violation: {:?}", err); } - if invalid_cand.len() > 0 { - return Err(OperationError::SchemaViolation); + for err in invalid_cand.iter() { + return Err(OperationError::SchemaViolation(err.unwrap_err())); } let norm_cand: Vec> = norm_cand @@ -399,7 +402,7 @@ impl<'a> QueryServerWriteTransaction<'a> { String::from("recycled"), )]); - let mut candidates: Vec> = pre_candidates + let candidates: Vec> = pre_candidates .into_iter() .map(|er| { // TODO: Deal with this properly william @@ -426,8 +429,9 @@ impl<'a> QueryServerWriteTransaction<'a> { audit_log!(au, "Schema Violation: {:?}", err); } - if invalid_cand.len() > 0 { - return Err(OperationError::SchemaViolation); + // TODO: Make this better + for err in invalid_cand.iter() { + return Err(OperationError::SchemaViolation(err.unwrap_err())); } let del_cand: Vec> = norm_cand @@ -602,7 +606,7 @@ impl<'a> QueryServerWriteTransaction<'a> { // Clone a set of writeables. // Apply the modlist -> Remember, we have a set of origs // and the new modified ents. - let mut candidates: Vec> = pre_candidates + let candidates: Vec> = pre_candidates .into_iter() .map(|er| { // TODO: Deal with this properly william @@ -630,8 +634,9 @@ impl<'a> QueryServerWriteTransaction<'a> { audit_log!(au, "Schema Violation: {:?}", err); } - if invalid_cand.len() > 0 { - return Err(OperationError::SchemaViolation); + // TODO: Make this better + for err in invalid_cand.iter() { + return Err(OperationError::SchemaViolation(err.unwrap_err())); } let norm_cand: Vec> = norm_cand @@ -865,7 +870,7 @@ impl<'a> QueryServerWriteTransaction<'a> { Ok(()) } - pub fn commit(self, audit: &mut AuditScope) -> Result<(), ()> { + pub fn commit(self, audit: &mut AuditScope) -> Result<(), OperationError> { let QueryServerWriteTransaction { committed, be_txn, @@ -880,14 +885,14 @@ impl<'a> QueryServerWriteTransaction<'a> { // to perform a reload BEFORE we commit. // Alternate, we attempt to reload during batch ops, but this seems // costly. - .map(|_| { + .and_then(|_| { // Backend Commit be_txn.commit() - }) - .map(|_| { - // Schema commit: Since validate passed and be is good, this - // must now also be good. - schema.commit() + .and_then(|_| { + // Schema commit: Since validate passed and be is good, this + // must now also be good. + schema.commit() + }) }) // Audit done } @@ -947,13 +952,13 @@ impl Handler for QueryServer { let qs_write = self.write(); - match qs_write.create(&mut audit, &msg) { - Ok(()) => { - qs_write.commit(&mut audit); - Ok(OpResult {}) - } - Err(e) => Err(e), - } + qs_write.create(&mut audit, &msg) + .and_then(|_| { + qs_write.commit(&mut audit) + .map(|_| { + OpResult {} + }) + }) }); // At the end of the event we send it for logging. self.log.do_send(audit); @@ -971,13 +976,13 @@ impl Handler for QueryServer { let qs_write = self.write(); - match qs_write.modify(&mut audit, &msg) { - Ok(()) => { - qs_write.commit(&mut audit); - Ok(OpResult {}) - } - Err(e) => Err(e), - } + qs_write.modify(&mut audit, &msg) + .and_then(|_| { + qs_write.commit(&mut audit) + .map(|_| { + OpResult {} + }) + }) }); self.log.do_send(audit); res @@ -994,13 +999,13 @@ impl Handler for QueryServer { let qs_write = self.write(); - match qs_write.delete(&mut audit, &msg) { - Ok(()) => { - qs_write.commit(&mut audit); - Ok(OpResult {}) - } - Err(e) => Err(e), - } + qs_write.delete(&mut audit, &msg) + .and_then(|_| { + qs_write.commit(&mut audit) + .map(|_| { + OpResult {} + }) + }) }); self.log.do_send(audit); res @@ -1031,17 +1036,19 @@ impl Handler for QueryServer { audit_log!(audit, "Begin purge tombstone event {:?}", msg); let qs_write = self.write(); - let res = match qs_write.purge_tombstones(&mut audit) { - Ok(()) => { - qs_write.commit(&mut audit); - Ok(OpResult {}) - } - Err(e) => Err(e), - }; + let res = qs_write.purge_tombstones(&mut audit) + .map(|_| { + qs_write.commit(&mut audit) + .map(|_| { + OpResult {} + }) + }); audit_log!(audit, "Purge tombstones result: {:?}", res); + res.expect("Invalid Server State"); }); // At the end of the event we send it for logging. self.log.do_send(audit); + res } } @@ -1668,8 +1675,6 @@ mod tests { String::from("testperson1"), )); assert!(server_txn.delete(audit, &de_sin).is_ok()); - // After a delete -> recycle, create duplicate name etc. - // Can in be seen by special search? (external recycle search) let filt_rc = ProtoFilter::Eq(String::from("class"), String::from("recycled")); let sre_rc = SearchEvent::from_rec_request(SearchRecycledRequest::new(filt_rc.clone())); @@ -1677,6 +1682,7 @@ mod tests { assert!(r2.len() == 1); // Create dup uuid (rej) + // After a delete -> recycle, create duplicate name etc. let cr = server_txn.create(audit, &ce); assert!(cr.is_err());