From 3ad0f0ca2852601624ba5d0efca9610577bcaa25 Mon Sep 17 00:00:00 2001 From: William Brown Date: Sun, 30 Dec 2018 12:17:09 +1000 Subject: [PATCH] Finished major transaction refactor --- Cargo.toml | 4 +- designs/access_profiles_and_security.rst | 7 + designs/auth.rst | 15 + designs/configuration.rst | 6 + designs/operation_batching.rst | 7 + designs/uid_gid_generation.rst | 7 + src/clients/whoami.rs | 2 +- src/lib/audit.rs | 16 +- src/lib/be/mod.rs | 463 ++++++---- src/lib/constants.rs | 39 + src/lib/core.rs | 7 +- src/lib/entry.rs | 47 +- src/lib/filter.rs | 6 +- src/lib/lib.rs | 3 + src/lib/log.rs | 1 - src/lib/plugins/base.rs | 431 +++++++++ src/lib/plugins/mod.rs | 28 +- src/lib/plugins/protected.rs | 2 + src/lib/proto_v1.rs | 13 + src/lib/schema.rs | 1031 ++++++++++++++-------- src/lib/server.rs | 375 ++++++-- src/server/main.rs | 1 - 22 files changed, 1854 insertions(+), 657 deletions(-) create mode 100644 designs/access_profiles_and_security.rst create mode 100644 designs/auth.rst create mode 100644 designs/configuration.rst create mode 100644 designs/operation_batching.rst create mode 100644 designs/uid_gid_generation.rst create mode 100644 src/lib/constants.rs create mode 100644 src/lib/plugins/base.rs create mode 100644 src/lib/plugins/protected.rs diff --git a/Cargo.toml b/Cargo.toml index 82c9e3ea7..e7f19d7bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,8 +42,10 @@ serde = "1.0" serde_json = "1.0" serde_derive = "1.0" -rusqlite = "0.15" +rusqlite = { version = "0.15", features = ["backup"] } r2d2 = "0.8" r2d2_sqlite = "0.7" +concread = "0.1" + diff --git a/designs/access_profiles_and_security.rst b/designs/access_profiles_and_security.rst new file mode 100644 index 000000000..7f991fa86 --- /dev/null +++ b/designs/access_profiles_and_security.rst @@ -0,0 +1,7 @@ + +* Filters are security checked for access +* attribute request lists are checked for access + +* profiles work on filters +* + diff --git a/designs/auth.rst b/designs/auth.rst new file mode 100644 index 000000000..1de5a928f --- /dev/null +++ b/designs/auth.rst @@ -0,0 +1,15 @@ + +* auth is a stepped protocol (similar to SASL) +* we offer possible authentications +* these proceed until a deny or allow is hit. + +* we provide a cookie that is valid on all server instances (except read-onlies +that have unique cookie keys to prevent forgery of writable master cookies) + +* cookies can request tokens, tokens are signed cbor that contains the set +of group uuids + names derferenced so that a client can make all authorisation +decisions from a single datapoint + +* each token can be unique based on the type of auth (ie 2fa needed to get access +to admin groups) + diff --git a/designs/configuration.rst b/designs/configuration.rst new file mode 100644 index 000000000..984386c7b --- /dev/null +++ b/designs/configuration.rst @@ -0,0 +1,6 @@ + +* configuration is static and read at start up +** performance and simplicity +** configuration defines if a plugin is enabled on not +** no dynamic plugins + diff --git a/designs/operation_batching.rst b/designs/operation_batching.rst new file mode 100644 index 000000000..35d41889b --- /dev/null +++ b/designs/operation_batching.rst @@ -0,0 +1,7 @@ + +* create, delete, modify all take multiple objects to work on so that changes can be consistent. + +* in theory, there should be one interface, "modify" that specifies create, delete, modify, so that all changes are possible in a single operation +* This however presents some schema verification changes, but they are not insurmountable and it would make the main server core simpler + + diff --git a/designs/uid_gid_generation.rst b/designs/uid_gid_generation.rst new file mode 100644 index 000000000..1c1e5c565 --- /dev/null +++ b/designs/uid_gid_generation.rst @@ -0,0 +1,7 @@ + +* user private group is implied +* uidnumber and gidnumber are stored on the entry +* if not set, derive from uuid +* if set, we respect the values + + diff --git a/src/clients/whoami.rs b/src/clients/whoami.rs index b4c376c0e..4ebd660ea 100644 --- a/src/clients/whoami.rs +++ b/src/clients/whoami.rs @@ -1,6 +1,6 @@ extern crate rsidm; -use rsidm::proto_v1; +// use rsidm::proto_v1; fn main() { println!("Hello whoami"); diff --git a/src/lib/audit.rs b/src/lib/audit.rs index 6e03e0502..9ff870089 100644 --- a/src/lib/audit.rs +++ b/src/lib/audit.rs @@ -45,6 +45,8 @@ macro_rules! audit_segment { let end = Instant::now(); let diff = end.duration_since(start); + $au.set_duration(diff); + // Return the result. Hope this works! r }}; @@ -52,8 +54,8 @@ macro_rules! audit_segment { #[derive(Serialize, Deserialize)] enum AuditEvent { - log(AuditLog), - scope(AuditScope), + Log(AuditLog), + Scope(AuditScope), } #[derive(Debug, Serialize, Deserialize)] @@ -83,6 +85,8 @@ impl Message for AuditScope { impl fmt::Display for AuditScope { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut depth = 0; + // write!(f, "{}: begin -> {}", self.time, self.name); let d = serde_json::to_string_pretty(self).unwrap(); write!(f, "{}", d) } @@ -105,16 +109,20 @@ impl AuditScope { self.name.as_str() } + pub fn set_duration(&mut self, diff: Duration) { + self.duration = Some(diff); + } + // Given a new audit event, append it in. pub fn append_scope(&mut self, scope: AuditScope) { - self.events.push(AuditEvent::scope(scope)) + self.events.push(AuditEvent::Scope(scope)) } pub fn log_event(&mut self, data: String) { let t_now = SystemTime::now(); let datetime: DateTime = t_now.into(); - self.events.push(AuditEvent::log(AuditLog { + self.events.push(AuditEvent::Log(AuditLog { time: datetime.to_rfc3339(), name: data, })) diff --git a/src/lib/be/mod.rs b/src/lib/be/mod.rs index b6aaac590..ed9419f1b 100644 --- a/src/lib/be/mod.rs +++ b/src/lib/be/mod.rs @@ -7,31 +7,14 @@ use rusqlite::NO_PARAMS; use serde_json; // use uuid; -use super::audit::AuditScope; -use super::entry::Entry; -use super::filter::Filter; +use audit::AuditScope; +use entry::Entry; +use filter::Filter; mod idl; mod mem_be; mod sqlite_be; -// This contacts the needed backend and starts it up - -#[derive(Debug, PartialEq)] -pub struct BackendAuditScope { - time_start: (), - time_end: (), -} - -impl BackendAuditScope { - pub fn new() -> Self { - BackendAuditScope { - time_start: (), - time_end: (), - } - } -} - #[derive(Debug)] struct IdEntry { // FIXME: This should be u64, but sqlite uses i32 ... @@ -56,54 +39,144 @@ pub struct Backend { pool: Pool, } -// In the future this will do the routing between the chosen backends etc. -impl Backend { - pub fn new(audit: &mut AuditScope, path: &str) -> Self { - // this has a ::memory() type, but will path == "" work? - audit_segment!(audit, || { - let manager = SqliteConnectionManager::file(path); - let builder1 = Pool::builder(); - let builder2 = if path == "" { - builder1.max_size(1) - } else { - // FIXME: Make this configurable - builder1.max_size(8) - }; - // Look at max_size and thread_pool here for perf later - let pool = builder2.build(manager).expect("Failed to create pool"); +pub struct BackendTransaction { + committed: bool, + conn: r2d2::PooledConnection, +} +pub struct BackendWriteTransaction { + committed: bool, + conn: r2d2::PooledConnection, +} + +pub trait BackendReadTransaction { + fn get_conn(&self) -> &r2d2::PooledConnection; + + // Take filter, and AuditScope ref? + fn search(&self, au: &mut AuditScope, filt: &Filter) -> Result, BackendError> { + // Do things + // Alloc a vec for the entries. + // FIXME: Make this actually a good size for the result set ... + // FIXME: Actually compute indexes here. + // So to make this use indexes, we can use the filter type and + // destructure it to work out what we need to actually search (if + // possible) to create the candidate set. + // Unlike DS, even if we don't get the index back, we can just pass + // to the in-memory filter test and be done. + audit_segment!(au, || { + let mut raw_entries: Vec = Vec::new(); { - let conn = pool.get().unwrap(); - // Perform any migrations as required? - // I think we only need the core table here, indexing will do it's own - // thing later - // conn.execute("PRAGMA journal_mode=WAL;", NO_PARAMS).unwrap(); - conn.execute( - "CREATE TABLE IF NOT EXISTS id2entry ( - id INTEGER PRIMARY KEY ASC, - data TEXT NOT NULL - ) - ", - NO_PARAMS, - ) - .unwrap(); + // Actually do a search now! + // let conn = self.pool.get().unwrap(); + // Start a txn + // conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); - // Create a version table for migration indication - - // Create the core db + // read them all + let mut stmt = self.get_conn().prepare("SELECT id, data FROM id2entry").unwrap(); + let id2entry_iter = stmt + .query_map(NO_PARAMS, |row| IdEntry { + id: row.get(0), + data: row.get(1), + }) + .unwrap(); + for row in id2entry_iter { + audit_log!(au, "raw entry: {:?}", row); + // FIXME: Handle this properly. + raw_entries.push(row.unwrap().data); + } + // Rollback, we should have done nothing. + // conn.execute("ROLLBACK TRANSACTION", NO_PARAMS).unwrap(); } + // Do other things + // Now, de-serialise the raw_entries back to entries + let entries: Vec = raw_entries + .iter() + .filter_map(|val| { + // TODO: Should we do better than unwrap? + let e: Entry = serde_json::from_str(val.as_str()).unwrap(); + if filt.entry_match_no_index(&e) { + Some(e) + } else { + None + } + }) + .collect(); - Backend { pool: pool } + Ok(entries) }) } - pub fn create( - &mut self, - au: &mut AuditScope, - entries: &Vec, - ) -> Result { +} + + +impl Drop for BackendTransaction { + // Abort + // TODO: Is this correct for RO txn? + fn drop(self: &mut Self) { + if !self.committed { + println!("Aborting txn"); + self.conn + .execute("ROLLBACK TRANSACTION", NO_PARAMS) + .unwrap(); + } + } +} + +impl BackendTransaction { + pub fn new(conn: r2d2::PooledConnection) -> Self { + // Start the transaction + println!("Starting txn ..."); + // TODO: Way to flag that this will be a read? + conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); + BackendTransaction { + committed: false, + conn: conn, + } + } +} + +impl BackendReadTransaction for BackendTransaction { + fn get_conn(&self) -> &r2d2::PooledConnection { + &self.conn + } +} + +static DBV_ID2ENTRY: &'static str = "id2entry"; +static DBV_INDEX: &'static str = "index"; + + +impl Drop for BackendWriteTransaction { + // Abort + fn drop(self: &mut Self) { + if !self.committed { + println!("Aborting txn"); + self.conn + .execute("ROLLBACK TRANSACTION", NO_PARAMS) + .unwrap(); + } + } +} + +impl BackendReadTransaction for BackendWriteTransaction { + fn get_conn(&self) -> &r2d2::PooledConnection { + &self.conn + } +} + +impl BackendWriteTransaction { + pub fn new(conn: r2d2::PooledConnection) -> Self { + // Start the transaction + println!("Starting txn ..."); + // TODO: Way to flag that this will be a write? + conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); + BackendWriteTransaction { + committed: false, + conn: conn, + } + } + + pub fn create(&self, au: &mut AuditScope, entries: &Vec) -> Result<(), BackendError> { audit_segment!(au, || { - let be_audit = BackendAuditScope::new(); // Start be audit timer if entries.is_empty() { @@ -128,79 +201,25 @@ impl Backend { // THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION { - let conn = self.pool.get().unwrap(); // Start a txn - conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); + // self.conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); // write them all for ser_entry in ser_entries { - conn.execute( - "INSERT INTO id2entry (data) VALUES (?1)", - &[&ser_entry as &ToSql], - ) - .unwrap(); + self.conn + .execute( + "INSERT INTO id2entry (data) VALUES (?1)", + &[&ser_entry as &ToSql], + ) + .unwrap(); } // TODO: update indexes (as needed) // Commit the txn - conn.execute("COMMIT TRANSACTION", NO_PARAMS).unwrap(); + // conn.execute("COMMIT TRANSACTION", NO_PARAMS).unwrap(); } - Ok(be_audit) - }) - } - - // Take filter, and AuditScope ref? - pub fn search(&self, au: &mut AuditScope, filt: &Filter) -> Result, BackendError> { - // Do things - // Alloc a vec for the entries. - // FIXME: Make this actually a good size for the result set ... - // FIXME: Actually compute indexes here. - // So to make this use indexes, we can use the filter type and - // destructure it to work out what we need to actually search (if - // possible) to create the candidate set. - // Unlike DS, even if we don't get the index back, we can just pass - // to the in-memory filter test and be done. - audit_segment!(au, || { - let mut raw_entries: Vec = Vec::new(); - { - // Actually do a search now! - let conn = self.pool.get().unwrap(); - // Start a txn - conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); - - // read them all - let mut stmt = conn.prepare("SELECT id, data FROM id2entry").unwrap(); - let id2entry_iter = stmt - .query_map(NO_PARAMS, |row| IdEntry { - id: row.get(0), - data: row.get(1), - }) - .unwrap(); - for row in id2entry_iter { - audit_log!(au, "raw entry: {:?}", row); - // FIXME: Handle this properly. - raw_entries.push(row.unwrap().data); - } - // Rollback, we should have done nothing. - conn.execute("ROLLBACK TRANSACTION", NO_PARAMS).unwrap(); - } - // Do other things - // Now, de-serialise the raw_entries back to entries - let entries: Vec = raw_entries - .iter() - .filter_map(|val| { - // TODO: Should we do better than unwrap? - let e: Entry = serde_json::from_str(val.as_str()).unwrap(); - if filt.entry_match_no_index(&e) { - Some(e) - } else { - None - } - }) - .collect(); - - Ok(entries) + Ok(()) }) } @@ -227,9 +246,162 @@ impl Backend { } } - pub fn modify() {} + pub fn modify() { + unimplemented!() + } - pub fn delete() {} + pub fn delete() { + unimplemented!() + } + + pub fn backup() { + unimplemented!() + } + + // Should this be offline only? + pub fn restore() { + unimplemented!() + } + + pub fn commit(mut self) -> Result<(), ()> { + println!("Commiting txn"); + self.committed = true; + self.conn + .execute("COMMIT TRANSACTION", NO_PARAMS) + .map(|_| ()) + .map_err(|e| { + println!("{:?}", e); + () + }) + } + + // ===== inner helpers ===== + // Some of these are not self due to use in new() + fn get_db_version_key(&self, key: &str) -> i32 { + match self.conn.query_row_named( + "SELECT version FROM db_version WHERE id = :id", + &[(":id", &key)], + |row| row.get(0), + ) { + Ok(e) => e, + Err(_) => { + // The value is missing, default to 0. + 0 + } + } + } + + pub fn setup(&self, audit: &mut AuditScope) -> Result<(), ()> { + { + // self.conn.execute("BEGIN TRANSACTION", NO_PARAMS).unwrap(); + + // conn.execute("PRAGMA journal_mode=WAL;", NO_PARAMS).unwrap(); + // + // This stores versions of components. For example: + // ---------------------- + // | id | version | + // | id2entry | 1 | + // | index | 1 | + // | schema | 1 | + // ---------------------- + // + // This allows each component to initialise on it's own, be + // rolled back individually, by upgraded in isolation, and more + // + // NEVER CHANGE THIS DEFINITION. + self.conn + .execute( + "CREATE TABLE IF NOT EXISTS db_version ( + id TEXT PRIMARY KEY, + version INTEGER + ) + ", + NO_PARAMS, + ) + .unwrap(); + + // If the table is empty, populate the versions as 0. + let mut dbv_id2entry = self.get_db_version_key(DBV_ID2ENTRY); + audit_log!(audit, "dbv_id2entry initial == {}", dbv_id2entry); + + // Check db_version here. + // * if 0 -> create v1. + if dbv_id2entry == 0 { + self.conn + .execute( + "CREATE TABLE IF NOT EXISTS id2entry ( + id INTEGER PRIMARY KEY ASC, + data TEXT NOT NULL + ) + ", + NO_PARAMS, + ) + .unwrap(); + dbv_id2entry = 1; + audit_log!(audit, "dbv_id2entry migrated -> {}", dbv_id2entry); + } + // * if v1 -> complete. + + self.conn + .execute_named( + "INSERT OR REPLACE INTO db_version (id, version) VALUES(:id, :dbv_id2entry)", + &[(":id", &DBV_ID2ENTRY), (":dbv_id2entry", &dbv_id2entry)], + ) + .unwrap(); + + // NOTE: Indexing is configured in a different step! + // Indexing uses a db version flag to represent the version + // of the indexes representation on disk in case we change + // it. + Ok(()) + } + } +} + +// In the future this will do the routing between the chosen backends etc. +impl Backend { + pub fn new(audit: &mut AuditScope, path: &str) -> Result { + // this has a ::memory() type, but will path == "" work? + audit_segment!(audit, || { + let manager = SqliteConnectionManager::file(path); + let builder1 = Pool::builder(); + let builder2 = if path == "" { + // We are in a debug mode, with in memory. We MUST have only + // a single DB thread, else we cause consistency issues. + builder1.max_size(1) + } else { + // FIXME: Make this configurable + builder1.max_size(8) + }; + // Look at max_size and thread_pool here for perf later + let pool = builder2.build(manager).expect("Failed to create pool"); + let be = Backend { pool: pool }; + + // Now complete our setup with a txn + let r = { + let be_txn = be.write(); + be_txn.setup(audit); + be_txn.commit() + }; + + audit_log!(audit, "be new setup: {:?}", r); + + match r { + Ok(_) => Ok(be), + Err(e) => Err(e), + } + }) + } + + pub fn read(&self) -> BackendTransaction { + let conn = self.pool.get().unwrap(); + BackendTransaction::new(conn) + } + + pub fn write(&self) -> BackendWriteTransaction { + let conn = self.pool.get().unwrap(); + BackendWriteTransaction::new(conn) + } } impl Clone for Backend { @@ -257,35 +429,27 @@ mod tests { use super::super::audit::AuditScope; use super::super::entry::Entry; use super::super::filter::Filter; - use super::super::log; - use super::{Backend, BackendError}; + use super::{Backend, BackendError, BackendTransaction, BackendWriteTransaction, BackendReadTransaction}; macro_rules! run_test { ($test_fn:expr) => {{ - System::run(|| { - let mut audit = AuditScope::new("run_test"); + let mut audit = AuditScope::new("run_test"); - let test_log = log::start(); + let be = Backend::new(&mut audit, "").unwrap(); + let mut be_txn = be.write(); - let be = Backend::new(&mut audit, ""); - - // Could wrap another future here for the future::ok bit... - let fut = $test_fn(&mut audit, be); - let comp_fut = fut.map_err(|()| ()).and_then(move |_r| { - test_log.do_send(audit); - println!("Stopping actix ..."); - actix::System::current().stop(); - future::result(Ok(())) - }); - - tokio::spawn(comp_fut); - }); + // Could wrap another future here for the future::ok bit... + let r = $test_fn(&mut audit, &be_txn); + // Commit, to guarantee it worked. + assert!(be_txn.commit().is_ok()); + println!("{}", audit); + r }}; } #[test] fn test_simple_create() { - run_test!(|audit: &mut AuditScope, mut be: Backend| { + run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| { audit_log!(audit, "Simple Create"); let empty_result = be.create(audit, &Vec::new()); @@ -306,34 +470,27 @@ mod tests { // There should only be one entry so is this enough? assert!(entries.first().is_some()); // Later we could check consistency of the entry saved ... - - // Check it's there - - future::ok(()) }); } #[test] fn test_simple_search() { - run_test!(|audit: &mut AuditScope, be| { + run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| { audit_log!(audit, "Simple Search"); - future::ok(()) }); } #[test] fn test_simple_modify() { - run_test!(|audit: &mut AuditScope, be| { + run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| { audit_log!(audit, "Simple Modify"); - future::ok(()) }); } #[test] fn test_simple_delete() { - run_test!(|audit: &mut AuditScope, be| { + run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| { audit_log!(audit, "Simple Delete"); - future::ok(()) }); } } diff --git a/src/lib/constants.rs b/src/lib/constants.rs new file mode 100644 index 000000000..b5b69ef82 --- /dev/null +++ b/src/lib/constants.rs @@ -0,0 +1,39 @@ +pub static UUID_ADMIN: &'static str = "00000000-0000-0000-0000-000000000000"; +pub static UUID_ANONYMOUS: &'static str = "00000000-0000-0000-0000-ffffffffffff"; + +// Core +pub static UUID_SCHEMA_ATTR_CLASS: &'static str = "aa0f193f-3010-4783-9c9e-f97edb14d8c2"; +pub static UUID_SCHEMA_ATTR_UUID: &'static str = "642a893b-fe1a-4fe1-805d-fb78e7f83ee7"; +pub static UUID_SCHEMA_ATTR_NAME: &'static str = "27be9127-5ba1-4c06-bce9-7250f2c7f630"; +pub static UUID_SCHEMA_ATTR_PRINCIPAL_NAME: &'static str = "64dda3ac-12cb-4000-9b30-97a92767ccab"; +pub static UUID_SCHEMA_ATTR_DESCRIPTION: &'static str = "a4da35a2-c5fb-4f8f-a341-72cd39ec9eee"; +pub static UUID_SCHEMA_ATTR_SYSTEM: &'static str = "ee28df1e-cf02-49ca-80b5-8310fb619377"; +pub static UUID_SCHEMA_ATTR_SECRET: &'static str = "0231c61a-0a43-4987-9293-8732ed9459fa"; +pub static UUID_SCHEMA_ATTR_MULTIVALUE: &'static str = "8a6a8bf3-7053-42e2-8cda-15af7a197513"; +pub static UUID_SCHEMA_ATTR_INDEX: &'static str = "2c5ff455-0709-4f67-a37c-35ff7e67bfff"; +pub static UUID_SCHEMA_ATTR_SYNTAX: &'static str = "85e8c2c7-3852-48dd-bfc9-d0982a50e2ef"; +pub static UUID_SCHEMA_ATTR_SYSTEMMAY: &'static str = "f3842165-90ad-4465-ad71-1de63f8c98a1"; +pub static UUID_SCHEMA_ATTR_MAY: &'static str = "7adb7e2d-af8f-492e-8f1c-c5d9b7c47b5f"; +pub static UUID_SCHEMA_ATTR_SYSTEMMUST: &'static str = "e2e4abc4-7083-41ea-a663-43d904d949ce"; +pub static UUID_SCHEMA_ATTR_MUST: &'static str = "40e88ca8-06d7-4a51-b538-1125e51c02e0"; + +pub static UUID_SCHEMA_CLASS_ATTRIBUTETYPE: &'static str = "ed65a356-a4d9-45a8-b4b9-5d40d9acdb7e"; +pub static UUID_SCHEMA_CLASS_CLASSTYPE: &'static str = "ec1964f6-0c72-4373-954f-f3a603c5f8bb"; +pub static UUID_SCHEMA_CLASS_OBJECT: &'static str = "579bb16d-1d85-4f8e-bb3b-6fc55af582fe"; +pub static UUID_SCHEMA_CLASS_EXTENSIBLEOBJECT: &'static str = + "0fb2171d-372b-4d0d-9194-9a4d6846c324"; + +// system supplementary +pub static UUID_SCHEMA_ATTR_DISPLAYNAME: &'static str = "201bc966-954b-48f5-bf25-99ffed759861"; +pub static UUID_SCHEMA_ATTR_MAIL: &'static str = "fae94676-720b-461b-9438-bfe8cfd7e6cd"; +pub static UUID_SCHEMA_ATTR_MEMBEROF: &'static str = "2ff1abc8-2f64-4f41-9e3d-33d44616a112"; +pub static UUID_SCHEMA_ATTR_SSH_PUBLICKEY: &'static str = "52f2f13f-d35c-4cca-9f43-90a12c968f72"; +pub static UUID_SCHEMA_ATTR_PASSWORD: &'static str = "a5121082-be54-4624-a307-383839b0366b"; +pub static UUID_SCHEMA_ATTR_MEMBER: &'static str = "cbb7cb55-1d48-4b89-8da7-8d570e755b47"; +pub static UUID_SCHEMA_ATTR_VERSION: &'static str = "896d5095-b3ae-451e-a91f-4314165b5395"; +pub static UUID_SCHEMA_ATTR_DOMAIN: &'static str = "c9926716-eaaa-4c83-a1ab-1ed4372a7491"; + +pub static UUID_SCHEMA_CLASS_PERSON: &'static str = "86c4d9e8-3820-45d7-8a8c-d3c522287010"; +pub static UUID_SCHEMA_CLASS_GROUP: &'static str = "c0e4e58c-1a2e-4bc3-ad56-5950ef810ea7"; +pub static UUID_SCHEMA_CLASS_ACCOUNT: &'static str = "8bbff87c-1731-455e-a0e7-bf1d0908e983"; +pub static UUID_SCHEMA_CLASS_SYSTEM_INFO: &'static str = "510b2a38-0577-4680-b0ad-836ca3415e6c"; diff --git a/src/lib/core.rs b/src/lib/core.rs index c5d3265c9..147e1479d 100644 --- a/src/lib/core.rs +++ b/src/lib/core.rs @@ -14,7 +14,7 @@ use super::config::Configuration; use super::event::{CreateEvent, SearchEvent}; use super::filter::Filter; use super::log; -use super::proto_v1::{CreateRequest, Response, SearchRequest, SearchResponse}; +use super::proto_v1::{CreateRequest, SearchRequest}; use super::server; struct AppState { @@ -157,9 +157,14 @@ pub fn create_server_core(config: Configuration) { // Until this point, we probably want to write to stderr // Start up the logging system: for now it just maps to stderr + + // The log server is started on it's own thread let log_addr = log::start(); log_event!(log_addr, "Starting rsidm with configuration: {:?}", config); + // Similar, create a stats thread which aggregates statistics from the + // server as they come in. + // Start the query server with the given be path: future config let server_addr = server::start(log_addr.clone(), config.db_path.as_str(), config.threads); // Copy the max size diff --git a/src/lib/entry.rs b/src/lib/entry.rs index 80b340cc4..25cdea7ef 100644 --- a/src/lib/entry.rs +++ b/src/lib/entry.rs @@ -1,5 +1,6 @@ // use serde_json::{Error, Value}; use super::proto_v1::Entry as ProtoEntry; +use filter::Filter; use std::collections::btree_map::{Iter as BTreeIter, IterMut as BTreeIterMut}; use std::collections::BTreeMap; use std::slice::Iter as SliceIter; @@ -159,7 +160,7 @@ impl Entry { }) } - pub fn attribute_substring(&self, attr: &str, subvalue: &str) -> bool { + pub fn attribute_substring(&self, _attr: &str, _subvalue: &str) -> bool { unimplemented!(); } @@ -183,6 +184,10 @@ impl Entry { } } + pub fn filter_from_attrs(&self, attrs: Vec<&str>) -> Filter { + unimplemented!() + } + // FIXME: Can we consume protoentry? pub fn from(e: &ProtoEntry) -> Self { // Why not the trait? In the future we may want to extend @@ -278,51 +283,11 @@ struct User { credentials: Vec, } -impl User { - pub fn new(username: &str, displayname: &str) -> Self { - // Build a blank value - User { - username: String::from(username), - class: Vec::new(), - displayname: String::from(displayname), - legalname: None, - email: Vec::new(), - memberof: Vec::new(), - sshpublickey: Vec::new(), - credentials: Vec::new(), - } - } - - // We need a way to "diff" two User objects - // as on a modification we want to track the set of changes - // that is occuring -- needed for indexing to function. - - // Basically we just need to check if it changed, remove - // the "former" and add the "newer" value. - - // We have to sort vecs ... - - // Is there a way to call this on serialise? - fn validate(&self) -> Result<(), ()> { - // Given a schema, validate our object is sane. - - Ok(()) - } -} - #[cfg(test)] mod tests { use super::{Entry, User}; use serde_json; - #[test] - fn test_user_basic() { - let u: User = User::new("william", "William Brown"); - let d = serde_json::to_string_pretty(&u).unwrap(); - - let _u2: User = serde_json::from_str(d.as_str()).unwrap(); - } - #[test] fn test_entry_basic() { let mut e: Entry = Entry::new(); diff --git a/src/lib/filter.rs b/src/lib/filter.rs index fbe172df6..fa0059a84 100644 --- a/src/lib/filter.rs +++ b/src/lib/filter.rs @@ -35,14 +35,16 @@ impl Filter { // If an or/not/and condition has no items, remove it // // If its the root item? - self.clone() + // self.clone() + unimplemented!() } // This is probably not safe, so it's for internal test cases // only because I'm familiar with the syntax ... you have been warned. fn from_ldap_string(_ldap_string: String) -> Result { + unimplemented!() // For now return an empty filters - Ok(Filter::And(Vec::new())) + // Ok(Filter::And(Vec::new())) } // What other parse types do we need? diff --git a/src/lib/lib.rs b/src/lib/lib.rs index f8b178ab4..d78d308e1 100644 --- a/src/lib/lib.rs +++ b/src/lib/lib.rs @@ -20,6 +20,8 @@ extern crate regex; #[macro_use] extern crate lazy_static; +extern crate concread; + // use actix::prelude::*; // use actix_web::{ // http, middleware, App, AsyncResponder, FutureResponse, HttpRequest, HttpResponse, Path, State, @@ -33,6 +35,7 @@ mod log; #[macro_use] mod audit; mod be; +mod constants; mod entry; mod event; mod identity; diff --git a/src/lib/log.rs b/src/lib/log.rs index 8b7016c78..33b6b380a 100644 --- a/src/lib/log.rs +++ b/src/lib/log.rs @@ -1,5 +1,4 @@ use actix::prelude::*; -use serde_json; use super::audit::AuditScope; diff --git a/src/lib/plugins/base.rs b/src/lib/plugins/base.rs new file mode 100644 index 000000000..7b08372d7 --- /dev/null +++ b/src/lib/plugins/base.rs @@ -0,0 +1,431 @@ +use plugins::Plugin; +use uuid::Uuid; + +use audit::AuditScope; +use be::{BackendTransaction, BackendReadTransaction, BackendWriteTransaction}; +use entry::Entry; +use error::OperationError; +use event::CreateEvent; +use filter::Filter; +use schema::{SchemaTransaction, SchemaWriteTransaction}; + +// TO FINISH +/* +Add normalisation step +Add filter normaliser to search. +Add principal name generation +*/ + +pub struct Base {} + +impl Plugin for Base { + fn id() -> &'static str { + "Base" + } + // Need to be given the backend(for testing ease) + // audit + // the mut set of entries to create + // the create event itself (immutable, for checking originals) + // contains who is creating them + // the schema of the running instance + + fn pre_create( + be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + _ce: &CreateEvent, + _schema: &SchemaWriteTransaction, + ) -> Result<(), OperationError> { + // For each candidate + for entry in cand.iter_mut() { + let name_uuid = String::from("uuid"); + + audit_log!(au, "Base check on entry: {:?}", entry); + + // First, ensure we have the 'object', class in the class set. + entry.add_ava(String::from("class"), String::from("object")); + + audit_log!(au, "Object should now be in entry: {:?}", entry); + + // If they have a name, but no principal name, derive it. + + // if they don't have uuid, create it. + // TODO: get_ava should have a str version for effeciency? + let mut c_uuid = match entry.get_ava(&name_uuid) { + Some(u) => { + // Actually check we have a value, could be empty array ... + // TODO: Should this be left to schema to assert the value? + if u.len() > 1 { + audit_log!(au, "Entry defines uuid attr, but multiple values."); + return Err(OperationError::Plugin); + }; + + let v = match u.first() { + Some(v) => v, + None => { + // TODO: Should this be forgiving and just generate the UUID? + audit_log!(au, "Entry defines uuid attr, but no value."); + return Err(OperationError::Plugin); + } + }; + + // This could actually fail, so we probably need to handle + // this better .... + // TODO: Make this a SCHEMA check, not a manual one. + // + match Uuid::parse_str(v.as_str()) { + Ok(up) => up, + Err(_) => { + audit_log!( + au, + "Entry contains invalid Base content, rejecting out of principle." + ); + return Err(OperationError::Plugin); + } + } + } + None => Uuid::new_v4(), + }; + + // Make it a string, so we can filter. + let str_uuid = format!("{}", c_uuid); + + let mut au_be = AuditScope::new("be_exist"); + + // We need to clone to the filter because it owns the content + let filt = Filter::Eq(name_uuid.clone(), str_uuid.clone()); + + let r = be.exists(&mut au_be, &filt); + + au.append_scope(au_be); + // end the scope for the be operation. + + match r { + Ok(b) => { + if b == true { + audit_log!(au, "Base already exists, rejecting."); + return Err(OperationError::Plugin); + } + } + Err(e) => { + audit_log!(au, "Backend error occured checking Base existance. {:?}", e); + return Err(OperationError::Plugin); + } + } + + let str_uuid = format!("{}", c_uuid); + audit_log!(au, "Setting UUID {} to entry", str_uuid); + let ava_uuid: Vec = vec![str_uuid]; + + entry.set_avas(name_uuid, ava_uuid); + audit_log!(au, "Final entry state: {:?}", entry); + } + // done! + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::super::Plugin; + use super::Base; + + use audit::AuditScope; + use be::{Backend, BackendWriteTransaction}; + use entry::Entry; + use event::CreateEvent; + use schema::{Schema, SchemaWriteTransaction}; + + macro_rules! run_pre_create_test { + ( + $preload_entries:ident, + $create_entries:ident, + $ident:ident, + $internal:ident, + $test_fn:expr + ) => {{ + let mut au = AuditScope::new("run_pre_create_test"); + audit_segment!(au, || { + // Create an in memory BE + let be = Backend::new(&mut au, "").unwrap(); + let be_txn = be.write(); + + // TODO: Preload entries here! + if !$preload_entries.is_empty() { + assert!(be_txn.create(&mut au, &$preload_entries).is_ok()); + }; + + let ce = CreateEvent::from_vec($create_entries.clone()); + let mut schema_be = Schema::new(&mut au).unwrap(); + let mut schema = schema_be.write(); + schema.bootstrap_core(&mut au).unwrap(); + + let mut au_test = AuditScope::new("pre_create_test"); + audit_segment!(au_test, || $test_fn( + &be_txn, + &mut au_test, + &mut $create_entries, + &ce, + &schema, + )); + + + schema.commit(); + be_txn.commit(); + + + au.append_scope(au_test); + }); + // Dump the raw audit log. + println!("{}", au); + }}; + } + + // Check empty create + #[test] + fn test_pre_create_empty() { + let preload: Vec = Vec::new(); + let mut create: Vec = Vec::new(); + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + + assert!(r.is_ok()); + // Nothing should have changed. + assert!(cand.len() == 0); + } + ); + } + + // check create where no uuid + #[test] + fn test_pre_create_no_uuid() { + let preload: Vec = Vec::new(); + + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["person"], + "name": ["testperson"], + "description": ["testperson"], + "displayname": ["testperson"] + } + }"#, + ) + .unwrap(); + + let mut create = vec![e]; + + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + assert!(r.is_ok()); + // Assert that the entry contains the attr "uuid" now. + let ue = cand.first().unwrap(); + assert!(ue.attribute_pres("uuid")); + } + ); + } + + // check unparseable uuid + #[test] + fn test_pre_create_uuid_invalid() { + let preload: Vec = Vec::new(); + + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["person"], + "name": ["testperson"], + "description": ["testperson"], + "displayname": ["testperson"], + "uuid": ["xxxxxx"] + } + }"#, + ) + .unwrap(); + + let mut create = vec![e.clone()]; + + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + assert!(r.is_err()); + } + ); + } + + // check entry where uuid is empty list + #[test] + fn test_pre_create_uuid_empty() { + let preload: Vec = Vec::new(); + + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["person"], + "name": ["testperson"], + "description": ["testperson"], + "displayname": ["testperson"], + "uuid": [] + } + }"#, + ) + .unwrap(); + + let mut create = vec![e.clone()]; + + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + assert!(r.is_err()); + } + ); + } + + // check create where provided uuid is valid. It should be unchanged. + #[test] + fn test_pre_create_uuid_valid() { + let preload: Vec = Vec::new(); + + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["person"], + "name": ["testperson"], + "description": ["testperson"], + "displayname": ["testperson"], + "uuid": ["79724141-3603-4060-b6bb-35c72772611d"] + } + }"#, + ) + .unwrap(); + + let mut create = vec![e.clone()]; + + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + assert!(r.is_ok()); + let ue = cand.first().unwrap(); + assert!(ue.attribute_equality("uuid", "79724141-3603-4060-b6bb-35c72772611d")); + } + ); + } + + #[test] + fn test_pre_create_uuid_valid_multi() { + let preload: Vec = Vec::new(); + + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["person"], + "name": ["testperson"], + "description": ["testperson"], + "displayname": ["testperson"], + "uuid": ["79724141-3603-4060-b6bb-35c72772611d", "79724141-3603-4060-b6bb-35c72772611d"] + } + }"#, + ) + .unwrap(); + + let mut create = vec![e.clone()]; + + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + assert!(r.is_err()); + } + ); + } + + // check create where uuid already exists. + #[test] + fn test_pre_create_uuid_exist() { + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["person"], + "name": ["testperson"], + "description": ["testperson"], + "displayname": ["testperson"], + "uuid": ["79724141-3603-4060-b6bb-35c72772611d"] + } + }"#, + ) + .unwrap(); + + let mut create = vec![e.clone()]; + let preload = vec![e]; + + run_pre_create_test!( + preload, + create, + false, + false, + |be: &BackendWriteTransaction, + au: &mut AuditScope, + cand: &mut Vec, + ce: &CreateEvent, + schema: &SchemaWriteTransaction| { + let r = Base::pre_create(be, au, cand, ce, schema); + assert!(r.is_err()); + } + ); + } + + // check create where uuid is a well-known + // WARNING: This actually requires me to implement backend migrations and + // creation of default objects in the DB on new() if they don't exist, and + // to potentially support migrations of said objects. +} diff --git a/src/lib/plugins/mod.rs b/src/lib/plugins/mod.rs index 63073e388..72e551a1a 100644 --- a/src/lib/plugins/mod.rs +++ b/src/lib/plugins/mod.rs @@ -1,21 +1,23 @@ use audit::AuditScope; -use be::Backend; +use be::{BackendTransaction, BackendWriteTransaction}; use entry::Entry; use error::OperationError; use event::CreateEvent; -use schema::Schema; +use schema::{SchemaTransaction, SchemaWriteTransaction}; mod base; +mod protected; trait Plugin { fn id() -> &'static str; fn pre_create( - be: &mut Backend, - au: &mut AuditScope, - cand: &mut Vec, - ce: &CreateEvent, - schema: &Schema, + // TODO: I think this is wrong, it should be a query server + _be: &BackendWriteTransaction, + _au: &mut AuditScope, + _cand: &mut Vec, + _ce: &CreateEvent, + _schema: &SchemaWriteTransaction, ) -> Result<(), OperationError> { Ok(()) } @@ -53,7 +55,7 @@ pub struct Plugins {} macro_rules! run_pre_create_plugin { ( - $be:ident, + $be_txn:ident, $au:ident, $cand:ident, $ce:ident, @@ -62,7 +64,7 @@ macro_rules! run_pre_create_plugin { ) => {{ let mut audit_scope = AuditScope::new(<($target_plugin)>::id()); let r = audit_segment!(audit_scope, || <($target_plugin)>::pre_create( - $be, + $be_txn, &mut audit_scope, $cand, $ce, @@ -75,15 +77,15 @@ macro_rules! run_pre_create_plugin { impl Plugins { pub fn run_pre_create( - be: &mut Backend, + be_txn: &BackendWriteTransaction, au: &mut AuditScope, cand: &mut Vec, ce: &CreateEvent, - schema: &Schema, + schema: &SchemaWriteTransaction, ) -> Result<(), OperationError> { - audit_segment!(audit_plugin_pre, || { + audit_segment!(au, || { // map chain? - let base_res = run_pre_create_plugin!(be, au, cand, ce, schema, base::Base); + let base_res = run_pre_create_plugin!(be_txn, au, cand, ce, schema, base::Base); // TODO, actually return the right thing ... base_res diff --git a/src/lib/plugins/protected.rs b/src/lib/plugins/protected.rs new file mode 100644 index 000000000..02d8fbdbd --- /dev/null +++ b/src/lib/plugins/protected.rs @@ -0,0 +1,2 @@ +// Objects matching some filter condition should +// be protected from modification / deletion diff --git a/src/lib/proto_v1.rs b/src/lib/proto_v1.rs index 5b4186b9e..cb4d32ae5 100644 --- a/src/lib/proto_v1.rs +++ b/src/lib/proto_v1.rs @@ -62,3 +62,16 @@ impl CreateRequest { CreateRequest { entries: entries } } } + +// Login is a multi-step process potentially. First the client says who they +// want to request +// +// we respond with a set of possible authentications that can proceed, and perhaps +// we indicate which options must/may? +// +// The client can then step and negotiate each. +// +// This continues until a LoginSuccess, or LoginFailure is returned. +// +// On loginSuccess, we send a cookie, and that allows the token to be +// generated. The cookie can be shared between servers. diff --git a/src/lib/schema.rs b/src/lib/schema.rs index 24770acd9..df8465deb 100644 --- a/src/lib/schema.rs +++ b/src/lib/schema.rs @@ -1,3 +1,5 @@ +use super::audit::AuditScope; +use super::constants::*; use super::entry::Entry; use super::error::SchemaError; use super::filter::Filter; @@ -8,6 +10,8 @@ use std::convert::TryFrom; use std::str::FromStr; use uuid::Uuid; +use concread::cowcell::{CowCell, CowCellReadTxn, CowCellWriteTxn}; + // representations of schema that confines object types, classes // and attributes. This ties in deeply with "Entry". // This only defines the types, and how they are represented. For @@ -16,6 +20,21 @@ use uuid::Uuid; // In the future this will parse/read it's schema from the db // but we have to bootstrap with some core types. +// TODO: Schema should be copy-on-write + +// TODO: Account should be a login-bind-able object +// needs account lock, timeout, policy? + +// TODO: system_info metadata object schema + +// TODO: system class to indicate the type is a system object? +// just a class? Does the class imply protections? +// probably just protection from delete and modify, except systemmay/systemmust/index? + +// TODO: Schema types -> Entry conversion + +// TODO: prefix on all schema types that are system? + #[derive(Debug, PartialEq)] enum Ternary { Empty, @@ -90,6 +109,7 @@ pub struct SchemaAttribute { // Is this ... used? // class: Vec, name: String, + uuid: Uuid, // Perhaps later add aliases? description: String, system: bool, @@ -233,7 +253,7 @@ impl SchemaAttribute { v.to_lowercase() } - pub fn normalise_uuid(&self, v:&String) -> String { + pub fn normalise_uuid(&self, v: &String) -> String { // We unwrap here as we should already have been validated ... let c_uuid = Uuid::parse_str(v.as_str()).unwrap(); c_uuid.to_hyphenated().to_string() @@ -257,6 +277,7 @@ pub struct SchemaClass { // Is this used? // class: Vec, name: String, + uuid: Uuid, description: String, // This allows modification of system types to be extended in custom ways systemmay: Vec, @@ -267,67 +288,73 @@ pub struct SchemaClass { impl SchemaClass { // Implement Validation and Normalisation against entries - pub fn validate_entry(&self, entry: &Entry) -> Result<(), ()> { - Err(()) + pub fn validate_entry(&self, _entry: &Entry) -> Result<(), ()> { + unimplemented!() } } #[derive(Debug, Clone)] -pub struct Schema { +pub struct SchemaInner { // We contain sets of classes and attributes. classes: HashMap, attributes: HashMap, } -impl Schema { - pub fn new() -> Self { - // - let mut s = Schema { - classes: HashMap::new(), - attributes: HashMap::new(), - }; - // Bootstrap in definitions of our own schema types - // First, add all the needed core attributes for schema parsing - s.attributes.insert( - String::from("class"), - SchemaAttribute { - name: String::from("class"), - description: String::from("The set of classes defining an object"), - system: true, - secret: false, - multivalue: true, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); - s.attributes.insert( - String::from("uuid"), - SchemaAttribute { - name: String::from("uuid"), - description: String::from("The universal unique id of the object"), - system: true, - secret: false, - multivalue: false, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UUID, - }, - ); - s.attributes.insert( - String::from("name"), - SchemaAttribute { - name: String::from("name"), - description: String::from("The shortform name of an object"), - system: true, - secret: false, - multivalue: false, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); - s.attributes.insert( +impl SchemaInner { + pub fn new(audit: &mut AuditScope) -> Result { + let mut au = AuditScope::new("schema_new"); + let r = audit_segment!(au, || { + // + let mut s = SchemaInner { + classes: HashMap::new(), + attributes: HashMap::new(), + }; + // Bootstrap in definitions of our own schema types + // First, add all the needed core attributes for schema parsing + s.attributes.insert( + String::from("class"), + SchemaAttribute { + name: String::from("class"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_CLASS).unwrap(), + description: String::from("The set of classes defining an object"), + system: true, + secret: false, + multivalue: true, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + s.attributes.insert( + String::from("uuid"), + SchemaAttribute { + name: String::from("uuid"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_UUID).unwrap(), + description: String::from("The universal unique id of the object"), + system: true, + secret: false, + multivalue: false, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UUID, + }, + ); + s.attributes.insert( + String::from("name"), + SchemaAttribute { + name: String::from("name"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_NAME).unwrap(), + description: String::from("The shortform name of an object"), + system: true, + secret: false, + multivalue: false, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + s.attributes.insert( String::from("principal_name"), SchemaAttribute { name: String::from("principal_name"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_PRINCIPAL_NAME).unwrap(), description: String::from("The longform name of an object, derived from name and domain. Example: alice@project.org"), system: true, secret: false, @@ -336,34 +363,41 @@ impl Schema { syntax: SyntaxType::UTF8STRING_PRINCIPAL, }, ); - s.attributes.insert( - String::from("description"), - SchemaAttribute { - name: String::from("description"), - description: String::from("A description of an attribute, object or class"), - system: true, - secret: false, - multivalue: false, - index: vec![], - syntax: SyntaxType::UTF8STRING, - }, - ); - s.attributes.insert( - String::from("system"), - SchemaAttribute { - name: String::from("system"), - description: String::from( - "Is this object or attribute provided from the core system?", - ), - system: true, - secret: false, - multivalue: false, - index: vec![], - syntax: SyntaxType::BOOLEAN, - }, - ); - s.attributes.insert(String::from("secret"), SchemaAttribute { + s.attributes.insert( + String::from("description"), + SchemaAttribute { + name: String::from("description"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_DESCRIPTION).unwrap(), + description: String::from("A description of an attribute, object or class"), + system: true, + secret: false, + multivalue: false, + index: vec![], + syntax: SyntaxType::UTF8STRING, + }, + ); + s.attributes.insert( + // FIXME: Rename to system_provided? Or should we eschew this in favour of class? + // system_provided attr seems easier to provide access controls on, and can be + // part of object ... + String::from("system"), + SchemaAttribute { + name: String::from("system"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_SYSTEM).unwrap(), + description: String::from( + "Is this object or attribute provided from the core system?", + ), + system: true, + secret: false, + multivalue: false, + index: vec![], + syntax: SyntaxType::BOOLEAN, + }, + ); + s.attributes.insert(String::from("secret"), SchemaAttribute { + // FIXME: Rename from system to schema_private? system_private? attr_private? private_attr? name: String::from("secret"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_SECRET).unwrap(), description: String::from("If true, this value is always hidden internally to the server, even beyond access controls."), system: true, secret: false, @@ -371,8 +405,9 @@ impl Schema { index: vec![], syntax: SyntaxType::BOOLEAN, }); - s.attributes.insert(String::from("multivalue"), SchemaAttribute { + s.attributes.insert(String::from("multivalue"), SchemaAttribute { name: String::from("multivalue"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_MULTIVALUE).unwrap(), description: String::from("If true, this attribute is able to store multiple values rather than just a single value."), system: true, secret: false, @@ -380,306 +415,438 @@ impl Schema { index: vec![], syntax: SyntaxType::BOOLEAN, }); - s.attributes.insert( - String::from("index"), - SchemaAttribute { - name: String::from("index"), - description: String::from( - "Describe the indexes to apply to instances of this attribute.", - ), - system: true, - secret: false, - multivalue: false, - index: vec![], - syntax: SyntaxType::INDEX_ID, - }, - ); - s.attributes.insert( - String::from("syntax"), - SchemaAttribute { - name: String::from("syntax"), - description: String::from( - "Describe the syntax of this attribute. This affects indexing and sorting.", - ), - system: true, - secret: false, - multivalue: false, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::SYNTAX_ID, - }, - ); - s.attributes.insert( - String::from("systemmay"), - SchemaAttribute { - name: String::from("systemmay"), - description: String::from( - "A list of system provided optional attributes this class can store.", - ), - system: true, - secret: false, - multivalue: true, - index: vec![], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); - s.attributes.insert( - String::from("may"), - SchemaAttribute { - name: String::from("may"), - description: String::from( - "A user modifiable list of optional attributes this class can store.", - ), - system: true, - secret: false, - multivalue: false, - index: vec![], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); - s.attributes.insert( - String::from("systemmust"), - SchemaAttribute { - name: String::from("systemmust"), - description: String::from( - "A list of system provided required attributes this class must store.", - ), - system: true, - secret: false, - multivalue: false, - index: vec![], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); - s.attributes.insert( - String::from("must"), - SchemaAttribute { - name: String::from("must"), - description: String::from( - "A user modifiable list of required attributes this class must store.", - ), - system: true, - secret: false, - multivalue: false, - index: vec![], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); + s.attributes.insert( + // FIXME: Rename to index_attribute? attr_index? + String::from("index"), + SchemaAttribute { + name: String::from("index"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_INDEX).unwrap(), + description: String::from( + "Describe the indexes to apply to instances of this attribute.", + ), + system: true, + secret: false, + multivalue: false, + index: vec![], + syntax: SyntaxType::INDEX_ID, + }, + ); + s.attributes.insert( + // FIXME: Rename to attr_syntax? + String::from("syntax"), + SchemaAttribute { + name: String::from("syntax"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_SYNTAX).unwrap(), + description: String::from( + "Describe the syntax of this attribute. This affects indexing and sorting.", + ), + system: true, + secret: false, + multivalue: false, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::SYNTAX_ID, + }, + ); + s.attributes.insert( + // FIXME: Rename to attribute_systemmay? + String::from("systemmay"), + SchemaAttribute { + name: String::from("systemmay"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_SYSTEMMAY).unwrap(), + description: String::from( + "A list of system provided optional attributes this class can store.", + ), + system: true, + secret: false, + multivalue: true, + index: vec![], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + s.attributes.insert( + // FIXME: Rename to attribute_may? schema_may? + String::from("may"), + SchemaAttribute { + name: String::from("may"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_MAY).unwrap(), + description: String::from( + "A user modifiable list of optional attributes this class can store.", + ), + system: true, + secret: false, + multivalue: false, + index: vec![], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + s.attributes.insert( + // FIXME: Rename to attribute_systemmust? schema_systemmust? + String::from("systemmust"), + SchemaAttribute { + name: String::from("systemmust"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_SYSTEMMUST).unwrap(), + description: String::from( + "A list of system provided required attributes this class must store.", + ), + system: true, + secret: false, + multivalue: false, + index: vec![], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + s.attributes.insert( + // FIXME: Rename to attribute_must? schema_must? + String::from("must"), + SchemaAttribute { + name: String::from("must"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_MUST).unwrap(), + description: String::from( + "A user modifiable list of required attributes this class must store.", + ), + system: true, + secret: false, + multivalue: false, + index: vec![], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); - s.classes.insert( - String::from("attributetype"), - SchemaClass { - name: String::from("attributetype"), - description: String::from("Definition of a schema attribute"), - systemmay: vec![String::from("index")], - may: vec![], - systemmust: vec![ - String::from("class"), - String::from("name"), - String::from("system"), - String::from("secret"), - String::from("multivalue"), - String::from("syntax"), - String::from("description"), - ], - must: vec![], - }, - ); - s.classes.insert( - String::from("classtype"), - SchemaClass { - name: String::from("classtype"), - description: String::from("Definition of a schema classtype"), - systemmay: vec![ - String::from("systemmay"), - String::from("may"), - String::from("systemmust"), - String::from("must"), - ], - may: vec![], - systemmust: vec![ - String::from("class"), - String::from("name"), - String::from("description"), - ], - must: vec![], - }, - ); - s.classes.insert( - String::from("object"), - SchemaClass { - name: String::from("object"), - description: String::from("A system created class that all objects must contain"), - systemmay: vec![ - String::from("principal_name"), - ], - may: vec![], - systemmust: vec![String::from("uuid")], - must: vec![], - }, - ); - s.classes.insert( - String::from("extensibleobject"), - SchemaClass { - name: String::from("extensibleobject"), - description: String::from("A class type that turns off all rules ..."), - systemmay: vec![], - may: vec![], - systemmust: vec![], - must: vec![], - }, - ); + s.classes.insert( + String::from("attributetype"), + SchemaClass { + name: String::from("attributetype"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_ATTRIBUTETYPE).unwrap(), + description: String::from("Definition of a schema attribute"), + systemmay: vec![String::from("index")], + may: vec![], + systemmust: vec![ + String::from("class"), + String::from("name"), + String::from("system"), + String::from("secret"), + String::from("multivalue"), + String::from("syntax"), + String::from("description"), + ], + must: vec![], + }, + ); + s.classes.insert( + String::from("classtype"), + SchemaClass { + name: String::from("classtype"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_CLASSTYPE).unwrap(), + description: String::from("Definition of a schema classtype"), + systemmay: vec![ + String::from("systemmay"), + String::from("may"), + String::from("systemmust"), + String::from("must"), + ], + may: vec![], + systemmust: vec![ + String::from("class"), + String::from("name"), + String::from("description"), + ], + must: vec![], + }, + ); + s.classes.insert( + String::from("object"), + SchemaClass { + name: String::from("object"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_OBJECT).unwrap(), + description: String::from( + "A system created class that all objects must contain", + ), + systemmay: vec![ + // FIXME: Owner? Responsible? Contact? + String::from("description"), + String::from("principal_name"), + ], + may: vec![], + systemmust: vec![ + String::from("class"), + String::from("name"), + String::from("uuid"), + ], + must: vec![], + }, + ); + s.classes.insert( + String::from("extensibleobject"), + SchemaClass { + name: String::from("extensibleobject"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_EXTENSIBLEOBJECT).unwrap(), + description: String::from("A class type that turns off all rules ..."), + systemmay: vec![], + may: vec![], + systemmust: vec![], + must: vec![], + }, + ); - s + match s.validate(&mut au) { + Ok(_) => Ok(s), + Err(e) => Err(e), + } + }); + + audit.append_scope(au); + + r } // This shouldn't fail? - pub fn bootstrap_core(&mut self) { + pub fn bootstrap_core(&mut self, audit: &mut AuditScope) -> Result<(), ()> { // This will create a set of sane, system core schema that we can use // main types are users, groups + let mut au = AuditScope::new("schema_bootstrap_core"); + let r = audit_segment!(au, || { + // Create attributes + // displayname // single + self.attributes.insert( + String::from("displayname"), + SchemaAttribute { + name: String::from("displayname"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_DISPLAYNAME).unwrap(), + description: String::from("The publicly visible display name of this person"), + system: true, + secret: false, + multivalue: false, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING, + }, + ); + // name // single + // mail // multi + self.attributes.insert( + String::from("mail"), + SchemaAttribute { + name: String::from("mail"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_MAIL).unwrap(), + description: String::from("mail addresses of the object"), + system: true, + secret: false, + multivalue: true, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING, + }, + ); + // memberof // multi + self.attributes.insert( + String::from("memberof"), + SchemaAttribute { + name: String::from("memberof"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_MEMBEROF).unwrap(), + description: String::from("reverse group membership of the object"), + system: true, + secret: false, + multivalue: true, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + // ssh_publickey // multi + self.attributes.insert( + String::from("ssh_publickey"), + SchemaAttribute { + name: String::from("ssh_publickey"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_SSH_PUBLICKEY).unwrap(), + description: String::from("SSH public keys of the object"), + system: true, + secret: false, + multivalue: true, + index: vec![], + syntax: SyntaxType::UTF8STRING, + }, + ); + // password // secret, multi + self.attributes.insert( + String::from("password"), + SchemaAttribute { + name: String::from("password"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_PASSWORD).unwrap(), + description: String::from( + "password hash material of the object for authentication", + ), + system: true, + secret: true, + multivalue: true, + index: vec![], + syntax: SyntaxType::UTF8STRING, + }, + ); + // + // member + self.attributes.insert( + String::from("member"), + SchemaAttribute { + name: String::from("member"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_MEMBER).unwrap(), + description: String::from("List of members of the group"), + system: true, + secret: false, + multivalue: true, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); - // Create attributes - // displayname // single - self.attributes.insert( - String::from("displayname"), - SchemaAttribute { - name: String::from("displayname"), - description: String::from("The publicly visible display name of this person"), - system: true, - secret: false, - multivalue: false, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UTF8STRING, - }, - ); - // name // single - // mail // multi - self.attributes.insert( - String::from("mail"), - SchemaAttribute { - name: String::from("mail"), - description: String::from("mail addresses of the object"), - system: true, - secret: false, - multivalue: true, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UTF8STRING, - }, - ); - // memberof // multi - self.attributes.insert( - String::from("memberof"), - SchemaAttribute { - name: String::from("memberof"), - description: String::from("reverse group membership of the object"), - system: true, - secret: false, - multivalue: true, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); - // ssh_publickey // multi - self.attributes.insert( - String::from("ssh_publickey"), - SchemaAttribute { - name: String::from("ssh_publickey"), - description: String::from("SSH public keys of the object"), - system: true, - secret: false, - multivalue: true, - index: vec![], - syntax: SyntaxType::UTF8STRING, - }, - ); - // password // secret, multi - self.attributes.insert( - String::from("password"), - SchemaAttribute { - name: String::from("password"), - description: String::from( - "password hash material of the object for authentication", - ), - system: true, - secret: true, - multivalue: true, - index: vec![], - syntax: SyntaxType::UTF8STRING, - }, - ); - // - // member - self.attributes.insert( - String::from("member"), - SchemaAttribute { - name: String::from("member"), - description: String::from("List of members of the group"), - system: true, - secret: false, - multivalue: true, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UTF8STRING_INSENSITIVE, - }, - ); + self.attributes.insert( + String::from("version"), + SchemaAttribute { + name: String::from("version"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_VERSION).unwrap(), + description: String::from( + "The systems internal migration version for provided objects", + ), + system: true, + secret: true, + multivalue: false, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); - // Create the classes that use it - // person - self.classes.insert( - String::from("person"), - SchemaClass { - name: String::from("person"), - description: String::from("Object representation of a person"), - systemmay: vec![ - String::from("description"), - String::from("mail"), - String::from("ssh_publickey"), - String::from("memberof"), - String::from("password"), - ], - may: vec![], - systemmust: vec![ - String::from("class"), - String::from("name"), - String::from("displayname"), - ], - must: vec![], - }, - ); - // group - self.classes.insert( - String::from("group"), - SchemaClass { - name: String::from("group"), - description: String::from("Object representation of a group"), - systemmay: vec![String::from("description"), String::from("member")], - may: vec![], - systemmust: vec![ - String::from("class"), - String::from("name"), - ], - must: vec![], - }, - ); + self.attributes.insert( + String::from("domain"), + SchemaAttribute { + name: String::from("domain"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_DOMAIN).unwrap(), + description: String::from("A DNS Domain name entry."), + system: true, + secret: false, + multivalue: true, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UTF8STRING_INSENSITIVE, + }, + ); + // Create the classes that use it + // FIXME: Add account lock + self.classes.insert( + String::from("account"), + SchemaClass { + name: String::from("account"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_ACCOUNT).unwrap(), + description: String::from("Object representation of a person"), + systemmay: vec![ + String::from("password"), + String::from("ssh_publickey"), + String::from("memberof"), + // String::from("uidnumber"), + // String::from("gidnumber"), + ], + may: vec![], + systemmust: vec![String::from("displayname")], + must: vec![], + }, + ); + self.classes.insert( + String::from("person"), + SchemaClass { + name: String::from("person"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_PERSON).unwrap(), + description: String::from("Object representation of a person"), + systemmay: vec![ + String::from("mail"), + String::from("memberof"), + // String::from("password"), + ], + may: vec![], + systemmust: vec![String::from("displayname")], + must: vec![], + }, + ); + self.classes.insert( + String::from("group"), + SchemaClass { + name: String::from("group"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_GROUP).unwrap(), + description: String::from("Object representation of a group"), + systemmay: vec![ + String::from("member"), + // String::from("gidnumber"), + ], + may: vec![], + systemmust: vec![], + must: vec![], + }, + ); + self.classes.insert( + String::from("system_info"), + SchemaClass { + name: String::from("system_info"), + uuid: Uuid::parse_str(UUID_SCHEMA_CLASS_SYSTEM_INFO).unwrap(), + description: String::from("System metadata object class"), + systemmay: vec![], + may: vec![], + systemmust: vec![ + String::from("version"), + // Needed when we implement principalnames? + String::from("domain"), + // String::from("hostname"), + ], + must: vec![], + }, + ); + + // Finally, validate our content is sane. + self.validate(&mut au) + }); + + audit.append_scope(au); + + r } - pub fn validate(&self) -> Result<(), ()> { + pub fn validate(&self, audit: &mut AuditScope) -> Result<(), ()> { // FIXME: How can we make this return a proper result? // - // Do we need some functional bullshit? - // Validate our schema content is sane - // For now we only have a few basic methods for this, such as - // checking all our classes must/may are correct. + // TODO: Does this need to validate anything further at all? The UUID + // will be checked as part of the schema migration on startup, so I think + // just that all the content is sane is fine. for class in self.classes.values() { + // report the class we are checking for a in &class.systemmay { - assert!(self.attributes.contains_key(a)); + // report the attribute. + audit_log!( + audit, + "validate systemmay class:attr -> {}:{}", + class.name, + a + ); + if !self.attributes.contains_key(a) { + return Err(()); + } } for a in &class.may { - assert!(self.attributes.contains_key(a)); + // report the attribute. + audit_log!(audit, "validate may class:attr -> {}:{}", class.name, a); + if !self.attributes.contains_key(a) { + return Err(()); + } } for a in &class.systemmust { - assert!(self.attributes.contains_key(a)); + // report the attribute. + audit_log!( + audit, + "validate systemmust class:attr -> {}:{}", + class.name, + a + ); + if !self.attributes.contains_key(a) { + return Err(()); + } } for a in &class.must { - assert!(self.attributes.contains_key(a)); + // report the attribute. + audit_log!(audit, "validate must class:attr -> {}:{}", class.name, a); + if !self.attributes.contains_key(a) { + return Err(()); + } } } @@ -772,7 +939,8 @@ impl Schema { Ok(()) } - pub fn normalise_entry(&mut self, entry: &Entry) -> Entry { + // pub fn normalise_entry(&mut self, entry: &mut Entry) -> Result<(), SchemaError> { + pub fn normalise_entry(&self, entry: &Entry) -> Entry { // We duplicate the entry here, because we can't // modify what we got on the protocol level. It also // lets us extend and change things. @@ -878,14 +1046,101 @@ impl Schema { } } + +// type Schema = CowCell; + +pub struct Schema { + inner: CowCell +} + +pub struct SchemaWriteTransaction<'a> { + inner: CowCellWriteTxn<'a, SchemaInner> +} + +impl<'a> SchemaWriteTransaction<'a> { + pub fn bootstrap_core(&mut self, audit: &mut AuditScope) -> Result<(), ()> { + self.inner.bootstrap_core(audit) + } + + pub fn validate_entry(&self, entry: &Entry) -> Result<(), SchemaError> { + self.inner.validate_entry(entry) + } + + pub fn normalise_entry(&self, entry: &Entry) -> Entry { + self.inner.normalise_entry(entry) + } + + pub fn commit(mut self) -> Result<(), ()> { + unimplemented!(); + } + + pub fn validate_filter(&self, filt: &Filter) -> Result<(), SchemaError> { + self.inner.validate_filter(filt) + } +} + +impl<'a> Drop for SchemaWriteTransaction<'a> { + fn drop(&mut self) { + // If commited != true, what do? abort? + // Is it valid to commit the schema, but not the be? + // This sounds like a problem for the query server. + // TODO: William of the future, don't be shit. + unimplemented!(); + } +} + +pub struct SchemaTransaction { + inner: CowCellReadTxn +} + +impl SchemaTransaction { + pub fn validate(&self, audit: &mut AuditScope) -> Result<(), ()> { + self.inner.validate(audit) + } + + pub fn validate_entry(&self, entry: &Entry) -> Result<(), SchemaError> { + self.inner.validate_entry(entry) + } + + pub fn validate_filter(&self, filt: &Filter) -> Result<(), SchemaError> { + self.inner.validate_filter(filt) + } +} + +impl Schema { + pub fn new(audit: &mut AuditScope) -> Result { + SchemaInner::new(audit) + .map(|si| { + Schema { + inner: CowCell::new(si) + } + }) + } + + pub fn read(&self) -> SchemaTransaction { + SchemaTransaction { + inner: self.inner.read() + } + } + + pub fn write(&self) -> SchemaWriteTransaction { + SchemaWriteTransaction { + inner: self.inner.write() + } + } +} + #[cfg(test)] mod tests { + use super::super::audit::AuditScope; + use super::super::constants::*; use super::super::entry::Entry; use super::super::error::SchemaError; use super::super::filter::Filter; use super::{IndexType, Schema, SchemaAttribute, SchemaClass, SyntaxType}; use serde_json; use std::convert::TryFrom; + use uuid::Uuid; #[test] fn test_schema_index_tryfrom() { @@ -927,6 +1182,7 @@ mod tests { fn test_schema_syntax_principal() { let sa = SchemaAttribute { name: String::from("principal_name"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_PRINCIPAL_NAME).unwrap(), description: String::from("The longform name of an object, derived from name and domain. Example: alice@project.org"), system: true, secret: false, @@ -954,19 +1210,19 @@ mod tests { #[test] fn test_schema_normalise_uuid() { let sa = SchemaAttribute { - name: String::from("uuid"), - description: String::from("The universal unique id of the object"), - system: true, - secret: false, - multivalue: false, - index: vec![IndexType::EQUALITY], - syntax: SyntaxType::UUID, - }; + name: String::from("uuid"), + uuid: Uuid::parse_str(UUID_SCHEMA_ATTR_UUID).unwrap(), + description: String::from("The universal unique id of the object"), + system: true, + secret: false, + multivalue: false, + index: vec![IndexType::EQUALITY], + syntax: SyntaxType::UUID, + }; let u1 = String::from("936DA01F9ABD4d9d80C702AF85C822A8"); let un1 = sa.normalise_value(&u1); assert_eq!(un1, "936da01f-9abd-4d9d-80c7-02af85c822a8"); - } #[test] @@ -977,6 +1233,7 @@ mod tests { let single_value_string = SchemaAttribute { // class: vec![String::from("attributetype")], name: String::from("single_value"), + uuid: Uuid::new_v4(), description: String::from(""), system: true, secret: false, @@ -997,6 +1254,7 @@ mod tests { let multi_value_string = SchemaAttribute { // class: vec![String::from("attributetype")], name: String::from("mv_string"), + uuid: Uuid::new_v4(), description: String::from(""), system: true, secret: false, @@ -1012,6 +1270,7 @@ mod tests { let multi_value_boolean = SchemaAttribute { // class: vec![String::from("attributetype")], name: String::from("mv_bool"), + uuid: Uuid::new_v4(), description: String::from(""), system: true, secret: false, @@ -1032,6 +1291,7 @@ mod tests { let single_value_syntax = SchemaAttribute { // class: vec![String::from("attributetype")], name: String::from("sv_syntax"), + uuid: Uuid::new_v4(), description: String::from(""), system: true, secret: false, @@ -1049,6 +1309,7 @@ mod tests { let single_value_index = SchemaAttribute { // class: vec![String::from("attributetype")], name: String::from("sv_index"), + uuid: Uuid::new_v4(), description: String::from(""), system: true, secret: false, @@ -1072,8 +1333,11 @@ mod tests { #[test] fn test_schema_simple() { - let schema = Schema::new(); - assert!(schema.validate().is_ok()); + let mut audit = AuditScope::new("test_schema_simple"); + let schema = Schema::new(&mut audit).unwrap(); + let schema_ro = schema.read(); + assert!(schema_ro.validate(&mut audit).is_ok()); + println!("{}", audit); } #[test] @@ -1086,7 +1350,9 @@ mod tests { fn test_schema_entries() { // Given an entry, assert it's schema is valid // We do - let schema = Schema::new(); + let mut audit = AuditScope::new("test_schema_entries"); + let schema_outer = Schema::new(&mut audit).unwrap(); + let schema = schema_outer.read(); let e_no_class: Entry = serde_json::from_str( r#"{ "attrs": {} @@ -1182,13 +1448,16 @@ mod tests { ) .unwrap(); assert_eq!(schema.validate_entry(&e_ok), Ok(())); + println!("{}", audit); } #[test] fn test_schema_entry_normalise() { // Check that entries can be normalised sanely - let mut schema = Schema::new(); - schema.bootstrap_core(); + let mut audit = AuditScope::new("test_schema_entry_normalise"); + let mut schema_outer = Schema::new(&mut audit).unwrap(); + let mut schema = schema_outer.write(); + schema.bootstrap_core(&mut audit).unwrap(); // Check syntax to upper // check index to upper @@ -1229,11 +1498,14 @@ mod tests { assert_eq!(schema.validate_entry(&e_normalised), Ok(())); assert_eq!(e_expect, e_normalised); + println!("{}", audit); } #[test] fn test_schema_extensible() { - let schema = Schema::new(); + let mut audit = AuditScope::new("test_schema_extensible"); + let schema_outer = Schema::new(&mut audit).unwrap(); + let schema = schema_outer.read(); // Just because you are extensible, doesn't mean you can be lazy let e_extensible_bad: Entry = serde_json::from_str( @@ -1263,6 +1535,7 @@ mod tests { /* Is okay because extensible! */ assert_eq!(schema.validate_entry(&e_extensible), Ok(())); + println!("{}", audit); } #[test] @@ -1272,8 +1545,10 @@ mod tests { #[test] fn test_schema_bootstrap() { - let mut schema = Schema::new(); - schema.bootstrap_core(); + let mut audit = AuditScope::new("test_schema_bootstrap"); + let mut schema_outer = Schema::new(&mut audit).unwrap(); + let mut schema = schema_outer.write(); + schema.bootstrap_core(&mut audit).unwrap(); // now test some entries let e_person: Entry = serde_json::from_str( @@ -1302,11 +1577,14 @@ mod tests { ) .unwrap(); assert_eq!(schema.validate_entry(&e_group), Ok(())); + println!("{}", audit); } #[test] fn test_schema_filter_validation() { - let schema = Schema::new(); + let mut audit = AuditScope::new("test_schema_filter_validation"); + let schema_outer = Schema::new(&mut audit).unwrap(); + let schema = schema_outer.read(); // Test mixed case attr name let f_mixed: Filter = serde_json::from_str( r#"{ @@ -1392,6 +1670,7 @@ mod tests { ) .unwrap(); assert_eq!(schema.validate_filter(&f_or_ok), Ok(())); + println!("{}", audit); } #[test] diff --git a/src/lib/server.rs b/src/lib/server.rs index 3e421b54f..f28304ff6 100644 --- a/src/lib/server.rs +++ b/src/lib/server.rs @@ -1,34 +1,86 @@ use actix::prelude::*; +// This is really only used for long lived, high level types that need clone +// that otherwise can't be cloned. Think Mutex. +use std::sync::Arc; + use audit::AuditScope; -use be::{Backend, BackendError}; +use be::{Backend, BackendError, BackendReadTransaction, BackendTransaction, BackendWriteTransaction}; use entry::Entry; use error::OperationError; use event::{CreateEvent, OpResult, SearchEvent, SearchResult}; +use filter::Filter; use log::EventLog; use plugins::Plugins; -use schema::Schema; +use schema::{Schema, SchemaTransaction, SchemaWriteTransaction}; pub fn start(log: actix::Addr, path: &str, threads: usize) -> actix::Addr { let mut audit = AuditScope::new("server_start"); let log_inner = log.clone(); let qs_addr = audit_segment!(audit, || { - // Create the BE connection - // probably need a config type soon .... - - // Create a new backend audit scope + // Create "just enough" schema for us to be able to load from + // disk ... Schema loading is one time where we validate the + // entries as we read them, so we need this here. + // FIXME: Handle results in start correctly + let schema = Arc::new(Schema::new(&mut audit).unwrap()); let mut audit_be = AuditScope::new("backend_new"); - let be = Backend::new(&mut audit_be, path); - audit.append_scope(audit_be); + let be = Backend::new(&mut audit_be, path).unwrap(); + { + // Create a new backend audit scope + let mut be_txn = be.write(); + let mut schema_write = schema.write(); + audit.append_scope(audit_be); + + // Now, we have the initial schema in memory. Use this to trigger + // an index of the be for the core schema. + + // Now search for the schema itself, and validate that the system + // in memory matches the BE on disk, and that it's syntactically correct. + // Write it out if changes are needed. + + // Now load the remaining backend schema into memory. + // TODO: Schema elements should be versioned individually. + schema_write.bootstrap_core(&mut audit).unwrap(); + + // TODO: Backend setup indexes as needed from schema, for the core + // system schema. + // TODO: Trigger an index? This could be costly ... + // Perhaps a config option to say if we index on startup or not. + schema_write.commit(); + be_txn.commit(); + } + + // Create a temporary query_server implementation + let query_server = QueryServer::new(log_inner.clone(), be.clone(), schema.clone()); + // Start the qs txn + let query_server_write = query_server.write(); + + // TODO: Create required system objects if they are missing + + // These will each manage their own transaction per operation, so the + // we don't need to maintain the be_txn again. + + // First, check the system_info object. This stores some server information + // and details. It's a pretty static thing. + let mut audit_si = AuditScope::new("start_system_info"); + audit_segment!(audit_si, || start_system_info(&mut audit_si, &query_server_write)); + audit.append_scope(audit_si); + + // Check the anonymous object exists (migrations). + let mut audit_an = AuditScope::new("start_anonymous"); + audit_segment!(audit_an, || start_anonymous(&mut audit_an, &query_server_write)); + audit.append_scope(audit_an); + + // Check the admin object exists (migrations). + + // Load access profiles and configure them. + + // We are good to go! Finally commit and consume the txn. + + query_server_write.commit(); - let mut schema = Schema::new(); - schema.bootstrap_core(); - // now we clone it out in the startup I think - // Should the be need a log clone ref? or pass it around? - // it probably needs it ... - // audit.end_event("server_new"); SyncArbiter::start(threads, move || { QueryServer::new(log_inner.clone(), be.clone(), schema.clone()) }) @@ -37,6 +89,55 @@ pub fn start(log: actix::Addr, path: &str, threads: usize) -> actix::A qs_addr } +fn start_system_info(audit: &mut AuditScope, qs: &QueryServerWriteTransaction) { + // FIXME: Get the domain from the config + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["object", "system_info"], + "name": ["system_info"], + "uuid": [], + "description": ["System info and metadata object."], + "version": ["1"], + "domain": ["example.com"] + } + }"#, + ) + .unwrap(); + + // Does it exist? + // if yes, load + // if no, create + // TODO: internal_create function to allow plugin + schema checks + // check it's version + // migrate + + qs.internal_assert_or_create(e); +} + +fn start_anonymous(audit: &mut AuditScope, qs: &QueryServerWriteTransaction) { + // Does it exist? + let e: Entry = serde_json::from_str( + r#"{ + "attrs": { + "class": ["object", "account"], + "name": ["anonymous"], + "uuid": [], + "description": ["Anonymous access account."], + "version": ["1"] + + } + }"#, + ) + .unwrap(); + + // if yes, load + // if no, create + // check it's version + // migrate + qs.internal_migrate_or_create(e); +} + // This is the core of the server. It implements all // the search and modify actions, applies access controls // and get's everything ready to push back to the fe code @@ -44,32 +145,13 @@ pub fn start(log: actix::Addr, path: &str, threads: usize) -> actix::A // This is it's own actor, so we can have a write addr and a read addr, // and it allows serialisation that way rather than relying on // the backend +pub trait QueryServerReadTransaction { + type BackendTransactionType: BackendReadTransaction; -pub struct QueryServer { - log: actix::Addr, - // be: actix::Addr, - // This probably needs to be Arc, or a ref. How do we want to manage this? - // I think the BE is build, configured and cloned? Maybe Backend - // is a wrapper type to Arc or something. - be: Backend, - schema: Schema, -} + fn get_be_txn(&self) -> &Self::BackendTransactionType; -impl QueryServer { - pub fn new(log: actix::Addr, be: Backend, schema: Schema) -> Self { - log_event!(log, "Starting query worker ..."); - QueryServer { - log: log, - be: be, - schema: schema, - } - } - - // Actually conduct a search request - // This is the core of the server, as it processes the entire event - // applies all parts required in order and more. - pub fn search( - &mut self, + fn search( + &self, au: &mut AuditScope, se: &SearchEvent, ) -> Result, OperationError> { @@ -81,11 +163,12 @@ impl QueryServer { // TODO: Normalise the filter + // TODO: Assert access control allows the filter and requested attrs. + // TODO: Pre-search plugins let mut audit_be = AuditScope::new("backend_search"); - let res = self - .be + let res = self.get_be_txn() .search(&mut audit_be, &se.filter) .map(|r| r) .map_err(|_| OperationError::Backend); @@ -99,6 +182,83 @@ impl QueryServer { res } + // Specialisation of search for exists or not + fn internal_exists(&self, filter: Filter) -> Result { + unimplemented!() + } + + fn internal_search(&self, filter: Filter) -> Result<(), ()> { + unimplemented!() + } +} + +pub struct QueryServerTransaction { + be_txn: BackendTransaction, + // Anything else? In the future, we'll need to have a schema transaction + // type, maybe others? + schema: SchemaTransaction, +} + + +// Actually conduct a search request +// This is the core of the server, as it processes the entire event +// applies all parts required in order and more. +impl QueryServerReadTransaction for QueryServerTransaction { + type BackendTransactionType = BackendTransaction; + + fn get_be_txn(&self) -> &BackendTransaction { + &self.be_txn + } +} + +pub struct QueryServerWriteTransaction<'a> { + committed: bool, + // be_write_txn: BackendWriteTransaction, + // schema_write: SchemaWriteTransaction, + // read: QueryServerTransaction, + + be_txn: BackendWriteTransaction, + schema: SchemaWriteTransaction<'a>, +} + +impl<'a> QueryServerReadTransaction for QueryServerWriteTransaction<'a> { + type BackendTransactionType = BackendWriteTransaction; + + fn get_be_txn(&self) -> &BackendWriteTransaction { + &self.be_txn + } +} + +pub struct QueryServer { + log: actix::Addr, + // be: actix::Addr, + // This probably needs to be Arc, or a ref. How do we want to manage this? + // I think the BE is build, configured and cloned? Maybe Backend + // is a wrapper type to Arc or something. + be: Backend, + schema: Arc, +} + +impl QueryServer { + pub fn new(log: actix::Addr, be: Backend, schema: Arc) -> Self { + log_event!(log, "Starting query worker ..."); + QueryServer { + log: log, + be: be, + schema: schema, + } + } + + pub fn read(&self) -> QueryServerTransaction { + unimplemented!() + } + + pub fn write(&self) -> QueryServerWriteTransaction { + unimplemented!() + } +} + +impl<'a> QueryServerWriteTransaction<'a> { pub fn create(&mut self, au: &mut AuditScope, ce: &CreateEvent) -> Result<(), OperationError> { // The create event is a raw, read only representation of the request // that was made to us, including information about the identity @@ -112,15 +272,13 @@ impl QueryServer { // Copy the entries to a writeable form. let mut candidates: Vec = ce.entries.iter().map(|er| er.clone()).collect(); - // Start a txn - // run any pre plugins, giving them the list of mutable candidates. // pre-plugins are defined here in their correct order of calling! // I have no intent to make these dynamic or configurable. let mut audit_plugin_pre = AuditScope::new("plugin_pre_create"); let plug_pre_res = Plugins::run_pre_create( - &mut self.be, + &self.be_txn, &mut audit_plugin_pre, &mut candidates, ce, @@ -147,13 +305,18 @@ impl QueryServer { return r; } - // FIXME: Normalise all entries now. + // Normalise all the data now it's validated. + // FIXME: This normalisation COPIES everything, which may be + // slow. + let norm_cand: Vec = candidates + .iter() + .map(|e| self.schema.normalise_entry(&e)) + .collect(); let mut audit_be = AuditScope::new("backend_create"); // We may change from ce.entries later to something else? - let res = self - .be - .create(&mut audit_be, &candidates) + let res = self.be_txn + .create(&mut audit_be, &norm_cand) .map(|_| ()) .map_err(|e| match e { BackendError::EmptyRequest => OperationError::EmptyRequest, @@ -162,18 +325,91 @@ impl QueryServer { au.append_scope(audit_be); if res.is_err() { + // be_txn is dropped, ie aborted here. audit_log!(au, "Create operation failed (backend), {:?}", r); return res; } // Run any post plugins // Commit the txn + // let commit, commit! + // be_txn.commit(); // We are complete, finalise logging and return audit_log!(au, "Create operation success"); res } + + // internal server operation types. + // These just wrap the fn create/search etc, but they allow + // creating the needed create event with the correct internal flags + // and markers. They act as though they have the highest level privilege + // IE there are no access control checks. + + pub fn internal_exists_or_create(&self, e: Entry) -> Result<(), ()> { + // If the thing exists, stop. + // if not, create from Entry. + unimplemented!() + } + + pub fn internal_migrate_or_create(&self, e: Entry) -> Result<(), ()> { + // if the thing exists, ensure the set of attributes on + // Entry A match and are present (but don't delete multivalue, or extended + // attributes in the situation. + // If not exist, create from Entry B + // + // WARNING: this requires schema awareness for multivalue types! + // We need to either do a schema aware merge, or we just overwrite those + // few attributes. + // + // This will extra classes an attributes alone! + unimplemented!() + } + + // Should this take a be_txn? + pub fn internal_assert_or_create(&self, e: Entry) -> Result<(), ()> { + // If exists, ensure the object is exactly as provided + // else, if not exists, create it. IE no extra or excess + // attributes and classes. + + // Create a filter from the entry for assertion. + let filt = e.filter_from_attrs(vec!["name"]); + + // Does it exist? + match self.internal_exists(filt) { + Ok(true) => { + // it exists. We need to ensure the content now. + unimplemented!() + } + Ok(false) => { + // It does not exist. Create it. + unimplemented!() + } + Err(e) => { + // An error occured. pass it back up. + Err(()) + } + } + // If exist, check. + // if not the same, delete, then create + + // If not exist, create. + } + + // These are where searches and other actions are actually implemented. This + // is the "internal" version, where we define the event as being internal + // only, allowing certain plugin by passes etc. + + pub fn internal_create(qs: &QueryServer) -> Result<(), ()> { + // This will call qs.create(), after we generate a createEvent with internal + // types etc. + unimplemented!() + } + + pub fn commit(self) -> Result<(), ()> { + unimplemented!() + } } impl Actor for QueryServer { @@ -198,20 +434,21 @@ impl Handler for QueryServer { let mut audit = AuditScope::new("search"); let res = audit_segment!(&mut audit, || { audit_log!(audit, "Begin event {:?}", msg); + // Begin a read + let qs_read = self.read(); // Parse what we need from the event? // What kind of event is it? // In the future we'll likely change search event ... + // End the read + // was this ok? - match self.search(&mut audit, &msg) { + match qs_read.search(&mut audit, &msg) { Ok(entries) => Ok(SearchResult::new(entries)), Err(e) => Err(e), } - - // audit_log!(audit, "End event {:?}", msg); - // audit.end_event("search"); }); // At the end of the event we send it for logging. self.log.do_send(audit); @@ -227,14 +464,18 @@ impl Handler for QueryServer { let res = audit_segment!(&mut audit, || { audit_log!(audit, "Begin create event {:?}", msg); - match self.create(&mut audit, &msg) { - Ok(()) => Ok(OpResult {}), + let mut qs_write = self.write(); + + match qs_write.create(&mut audit, &msg) { + Ok(()) => { + qs_write.commit(); + Ok(OpResult {}) + } Err(e) => Err(e), } }); // At the end of the event we send it for logging. self.log.do_send(audit); - // At the end of the event we send it for logging. res } } @@ -249,11 +490,12 @@ mod tests { extern crate futures; use futures::future; use futures::future::Future; + use std::sync::Arc; extern crate tokio; use super::super::audit::AuditScope; - use super::super::be::Backend; + use super::super::be::{Backend, BackendTransaction}; use super::super::entry::Entry; use super::super::event::{CreateEvent, SearchEvent}; use super::super::filter::Filter; @@ -261,7 +503,7 @@ mod tests { use super::super::proto_v1::Entry as ProtoEntry; use super::super::proto_v1::{CreateRequest, SearchRequest}; use super::super::schema::Schema; - use super::super::server::QueryServer; + use super::super::server::{QueryServer, QueryServerWriteTransaction, QueryServerReadTransaction}; macro_rules! run_test { ($test_fn:expr) => {{ @@ -269,10 +511,14 @@ mod tests { let mut audit = AuditScope::new("run_test"); let test_log = log::start(); - let be = Backend::new(&mut audit, ""); - let mut schema = Schema::new(); - schema.bootstrap_core(); - let test_server = QueryServer::new(test_log.clone(), be, schema); + let be = Backend::new(&mut audit, "").unwrap(); + let mut schema_outer = Schema::new(&mut audit).unwrap(); + { + let mut schema = schema_outer.write(); + schema.bootstrap_core(&mut audit).unwrap(); + schema.commit(); + } + let test_server = QueryServer::new(test_log.clone(), be, Arc::new(schema_outer)); // Could wrap another future here for the future::ok bit... let fut = $test_fn(test_log.clone(), test_server, &mut audit); @@ -291,6 +537,7 @@ mod tests { #[test] fn test_be_create_user() { run_test!(|_log, mut server: QueryServer, audit: &mut AuditScope| { + let mut server_txn = server.write(); let filt = Filter::Pres(String::from("name")); let se1 = SearchEvent::from_request(SearchRequest::new(filt.clone())); @@ -313,18 +560,20 @@ mod tests { let ce = CreateEvent::from_vec(expected.clone()); - let r1 = server.search(audit, &se1).unwrap(); + let r1 = server_txn.search(audit, &se1).unwrap(); assert!(r1.len() == 0); - let cr = server.create(audit, &ce); + let cr = server_txn.create(audit, &ce); assert!(cr.is_ok()); - let r2 = server.search(audit, &se2).unwrap(); + let r2 = server_txn.search(audit, &se2).unwrap(); println!("--> {:?}", r2); assert!(r2.len() == 1); assert_eq!(r2, expected); + assert!(server_txn.commit().is_ok()); + future::ok(()) }); } diff --git a/src/server/main.rs b/src/server/main.rs index 972301120..67b609c60 100644 --- a/src/server/main.rs +++ b/src/server/main.rs @@ -1,4 +1,3 @@ -#[macro_use] extern crate actix; extern crate rsidm;