mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 20:47:01 +01:00
Add DBVersioning for entries (#47)
This commit is contained in:
parent
3bbe9943c0
commit
ff828e4f4a
|
@ -296,3 +296,34 @@ system yet), method two probably is the best, but you need token constraint
|
|||
to make sure you can't replay to another host.
|
||||
|
||||
|
||||
|
||||
Brain Dump Internal Details
|
||||
===========================
|
||||
|
||||
Credentials should be a real struct on entry, that is serialised to str to dbentry. This allows repl
|
||||
to still work, but then we can actually keep detailed structures for types in the DB instead. When
|
||||
we send to proto entry, we could probably keep it as a real struct on protoentry, but then we could
|
||||
eliminate all private types from transmission.
|
||||
|
||||
|
||||
When we login, we need to know what groups/roles are relevant to that authentication. To achieve this
|
||||
we can have each group contain a policy of auth types (the credentials above all provide an auth
|
||||
type). The login then has a known auth type of "how" they logged in, so when we go to generate
|
||||
the users "token" for that session, we can correlate these, and only attach groups that satisfy
|
||||
the authentication type requirements.
|
||||
|
||||
IE the session associates the method you used to login to your token and a cookie.
|
||||
|
||||
If you require extra groups, then we should support a token refresh that given the prior auth +
|
||||
extra factors, we can then re-issue the token to support the extra groups as presented. We may
|
||||
also want some auth types to NOT allow refresh.
|
||||
|
||||
We may want groups to support expiry where they are not valid past some time stamp. This may
|
||||
required tagging or other details.
|
||||
|
||||
|
||||
How do we ensure integrity of the token? Do we have to? Is the clients job to trust the token given
|
||||
the TLS tunnel?
|
||||
|
||||
|
||||
|
||||
|
|
20
src/lib/be/dbentry.rs
Normal file
20
src/lib/be/dbentry.rs
Normal file
|
@ -0,0 +1,20 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct DbEntryV1 {
|
||||
pub attrs: BTreeMap<String, Vec<String>>,
|
||||
}
|
||||
|
||||
// REMEMBER: If you add a new version here, you MUST
|
||||
// update entry.rs into_dbentry to export to the latest
|
||||
// type always!!
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub enum DbEntryVers {
|
||||
V1(DbEntryV1),
|
||||
}
|
||||
|
||||
// This is actually what we store into the DB.
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
pub struct DbEntry {
|
||||
pub ent: DbEntryVers,
|
||||
}
|
|
@ -5,38 +5,28 @@ use r2d2_sqlite::SqliteConnectionManager;
|
|||
use rusqlite::types::ToSql;
|
||||
use rusqlite::NO_PARAMS;
|
||||
use serde_json;
|
||||
use std::convert::TryFrom;
|
||||
use std::fs;
|
||||
// use uuid;
|
||||
|
||||
use crate::audit::AuditScope;
|
||||
use crate::be::dbentry::DbEntry;
|
||||
use crate::entry::{Entry, EntryCommitted, EntryNew, EntryValid};
|
||||
use crate::error::{ConsistencyError, OperationError};
|
||||
use crate::filter::{Filter, FilterValid};
|
||||
|
||||
pub mod dbentry;
|
||||
mod idl;
|
||||
mod mem_be;
|
||||
mod sqlite_be;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct IdEntry {
|
||||
// FIXME: This should be u64, but sqlite uses i64 ...
|
||||
// TODO: for now this is i64 to make sqlite work, but entry is u64 for indexing reasons!
|
||||
id: i64,
|
||||
data: String,
|
||||
}
|
||||
|
||||
/*
|
||||
pub enum BackendType {
|
||||
Memory, // isn't memory just sqlite with file :memory: ?
|
||||
SQLite,
|
||||
}
|
||||
*/
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum BackendError {
|
||||
EmptyRequest,
|
||||
EntryMissingId,
|
||||
}
|
||||
|
||||
pub struct Backend {
|
||||
pool: Pool<SqliteConnectionManager>,
|
||||
}
|
||||
|
@ -59,7 +49,7 @@ pub trait BackendReadTransaction {
|
|||
&self,
|
||||
au: &mut AuditScope,
|
||||
filt: &Filter<FilterValid>,
|
||||
) -> Result<Vec<Entry<EntryValid, EntryCommitted>>, BackendError> {
|
||||
) -> Result<Vec<Entry<EntryValid, EntryCommitted>>, OperationError> {
|
||||
// Do things
|
||||
// Alloc a vec for the entries.
|
||||
// FIXME: Make this actually a good size for the result set ...
|
||||
|
@ -99,13 +89,17 @@ pub trait BackendReadTransaction {
|
|||
}
|
||||
// Do other things
|
||||
// Now, de-serialise the raw_entries back to entries, and populate their ID's
|
||||
|
||||
let entries: Vec<Entry<EntryValid, EntryCommitted>> = raw_entries
|
||||
.iter()
|
||||
.filter_map(|id_ent| {
|
||||
// TODO: Should we do better than unwrap?
|
||||
let mut e: Entry<EntryValid, EntryCommitted> =
|
||||
serde_json::from_str(id_ent.data.as_str()).unwrap();
|
||||
e.id = Some(id_ent.id);
|
||||
.map(|id_ent| {
|
||||
// TODO: Should we do better than unwrap? And if so, how?
|
||||
// we need to map this error, so probably need to collect to Result<Vec<_>, _>
|
||||
// and then to try_audit! on that.
|
||||
let db_e = serde_json::from_str(id_ent.data.as_str()).unwrap();
|
||||
Entry::from_dbentry(db_e, u64::try_from(id_ent.id).unwrap())
|
||||
})
|
||||
.filter_map(|e| {
|
||||
if e.entry_match_no_index(&filt) {
|
||||
Some(e)
|
||||
} else {
|
||||
|
@ -126,7 +120,7 @@ pub trait BackendReadTransaction {
|
|||
&self,
|
||||
au: &mut AuditScope,
|
||||
filt: &Filter<FilterValid>,
|
||||
) -> Result<bool, BackendError> {
|
||||
) -> Result<bool, OperationError> {
|
||||
// Do a final optimise of the filter
|
||||
// At the moment, technically search will do this, but it won't always be the
|
||||
// case once this becomes a standalone function.
|
||||
|
@ -236,14 +230,65 @@ impl BackendWriteTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
fn internal_create<T: serde::Serialize>(
|
||||
fn internal_create(
|
||||
&self,
|
||||
au: &mut AuditScope,
|
||||
entries: &Vec<Entry<EntryValid, T>>,
|
||||
dbentries: &Vec<DbEntry>,
|
||||
) -> Result<(), OperationError> {
|
||||
audit_segment!(au, || {
|
||||
// Start be audit timer
|
||||
// Get the max id from the db. We store this ourselves to avoid max() calls.
|
||||
let mut id_max = self.get_id2entry_max_id();
|
||||
|
||||
let ser_entries: Vec<IdEntry> = dbentries
|
||||
.iter()
|
||||
.map(|ser_db_e| {
|
||||
id_max = id_max + 1;
|
||||
|
||||
IdEntry {
|
||||
id: id_max,
|
||||
// TODO: Should we do better than unwrap?
|
||||
data: serde_json::to_string(&ser_db_e).unwrap(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
audit_log!(au, "serialising: {:?}", ser_entries);
|
||||
|
||||
// THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION
|
||||
{
|
||||
let mut stmt = try_audit!(
|
||||
au,
|
||||
self.conn
|
||||
.prepare("INSERT INTO id2entry (id, data) VALUES (:id, :data)"),
|
||||
"rusqlite error {:?}",
|
||||
OperationError::SQLiteError
|
||||
);
|
||||
|
||||
// write them all
|
||||
for ser_entry in ser_entries {
|
||||
// TODO: Prepared statement.
|
||||
try_audit!(
|
||||
au,
|
||||
stmt.execute_named(&[
|
||||
(":id", &ser_entry.id as &ToSql),
|
||||
(":data", &ser_entry.data as &ToSql)
|
||||
]),
|
||||
"rusqlite error {:?}",
|
||||
OperationError::SQLiteError
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn create(
|
||||
&self,
|
||||
au: &mut AuditScope,
|
||||
entries: &Vec<Entry<EntryValid, EntryNew>>,
|
||||
) -> Result<(), OperationError> {
|
||||
// figured we would want a audit_segment to wrap internal_create so when doing profiling we can
|
||||
// tell which function is calling it. either this one or restore.
|
||||
audit_segment!(au, || {
|
||||
if entries.is_empty() {
|
||||
// TODO: Better error
|
||||
// End the timer
|
||||
|
@ -254,111 +299,80 @@ impl BackendWriteTransaction {
|
|||
// we do this outside the txn to avoid blocking needlessly.
|
||||
// However, it could be pointless due to the extra string allocs ...
|
||||
|
||||
// Get the max id from the db. We store this ourselves to avoid max().
|
||||
let mut id_max = self.get_id2entry_max_id();
|
||||
let dbentries: Vec<_> = entries.iter().map(|e| e.into_dbentry()).collect();
|
||||
|
||||
let ser_entries: Vec<IdEntry> = entries
|
||||
.iter()
|
||||
.map(|val| {
|
||||
// TODO: Should we do better than unwrap?
|
||||
id_max = id_max + 1;
|
||||
IdEntry {
|
||||
id: id_max,
|
||||
data: serde_json::to_string(&val).unwrap(),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
self.internal_create(au, &dbentries)
|
||||
|
||||
audit_log!(au, "serialising: {:?}", ser_entries);
|
||||
|
||||
// THIS IS PROBABLY THE BIT WHERE YOU NEED DB ABSTRACTION
|
||||
{
|
||||
let mut stmt = try_audit!(
|
||||
au,
|
||||
self.conn
|
||||
.prepare("INSERT INTO id2entry (id, data) VALUES (:id, :data)"),
|
||||
"rusqlite error {:?}",
|
||||
OperationError::SQLiteError
|
||||
);
|
||||
|
||||
// write them all
|
||||
for ser_entry in ser_entries {
|
||||
// TODO: Prepared statement.
|
||||
try_audit!(
|
||||
au,
|
||||
stmt.execute_named(&[
|
||||
(":id", &ser_entry.id as &ToSql),
|
||||
(":data", &ser_entry.data as &ToSql)
|
||||
]),
|
||||
"rusqlite error {:?}",
|
||||
OperationError::SQLiteError
|
||||
);
|
||||
}
|
||||
|
||||
// TODO: update indexes (as needed)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
// TODO: update indexes (as needed)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn create(
|
||||
&self,
|
||||
au: &mut AuditScope,
|
||||
entries: &Vec<Entry<EntryValid, EntryNew>>,
|
||||
) -> Result<(), OperationError> {
|
||||
// figured we would want a audit_segment to wrap internal_create so when doing profiling we can
|
||||
// tell which function is calling it. either this one or restore.
|
||||
audit_segment!(au, || self.internal_create(au, entries))
|
||||
}
|
||||
|
||||
pub fn modify(
|
||||
&self,
|
||||
au: &mut AuditScope,
|
||||
entries: &Vec<Entry<EntryValid, EntryCommitted>>,
|
||||
) -> Result<(), BackendError> {
|
||||
) -> Result<(), OperationError> {
|
||||
if entries.is_empty() {
|
||||
// TODO: Better error
|
||||
return Err(BackendError::EmptyRequest);
|
||||
return Err(OperationError::EmptyRequest);
|
||||
}
|
||||
|
||||
// Assert the Id's exist on the entry, and serialise them.
|
||||
let ser_entries: Vec<IdEntry> = entries
|
||||
// Now, that means the ID must be > 0!!!
|
||||
let ser_entries: Result<Vec<IdEntry>, _> = entries
|
||||
.iter()
|
||||
.filter_map(|e| {
|
||||
match e.id {
|
||||
Some(id) => {
|
||||
Some(IdEntry {
|
||||
id: id,
|
||||
// TODO: Should we do better than unwrap?
|
||||
data: serde_json::to_string(&e).unwrap(),
|
||||
})
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
.map(|e| {
|
||||
let db_e = e.into_dbentry();
|
||||
|
||||
let id = i64::try_from(e.get_id())
|
||||
.map_err(|_| OperationError::InvalidEntryID)
|
||||
.and_then(|id| {
|
||||
if id == 0 {
|
||||
Err(OperationError::InvalidEntryID)
|
||||
} else {
|
||||
Ok(id)
|
||||
}
|
||||
})?;
|
||||
|
||||
let data =
|
||||
serde_json::to_string(&db_e).map_err(|_| OperationError::SerdeJsonError)?;
|
||||
|
||||
Ok(IdEntry {
|
||||
// TODO: Instead of getting these from the entry, we could lookup
|
||||
// uuid -> id in the index.
|
||||
id: id,
|
||||
data: data,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let ser_entries = try_audit!(au, ser_entries);
|
||||
|
||||
audit_log!(au, "serialising: {:?}", ser_entries);
|
||||
|
||||
// Simple: If the list of id's is not the same as the input list, we are missing id's
|
||||
// TODO: This check won't be needed once I rebuild the entry state types.
|
||||
if entries.len() != ser_entries.len() {
|
||||
return Err(BackendError::EntryMissingId);
|
||||
return Err(OperationError::InvalidEntryState);
|
||||
}
|
||||
|
||||
// Now, given the list of id's, update them
|
||||
{
|
||||
// TODO: ACTUALLY HANDLE THIS ERROR WILLIAM YOU LAZY SHIT.
|
||||
let mut stmt = self
|
||||
.conn
|
||||
.prepare("UPDATE id2entry SET data = :data WHERE id = :id")
|
||||
.unwrap();
|
||||
let mut stmt = try_audit!(
|
||||
au,
|
||||
self.conn
|
||||
.prepare("UPDATE id2entry SET data = :data WHERE id = :id"),
|
||||
"RusqliteError: {:?}",
|
||||
OperationError::SQLiteError
|
||||
);
|
||||
|
||||
ser_entries.iter().for_each(|ser_ent| {
|
||||
stmt.execute_named(&[(":id", &ser_ent.id), (":data", &ser_ent.data)])
|
||||
.unwrap();
|
||||
});
|
||||
for ser_ent in ser_entries.iter() {
|
||||
try_audit!(
|
||||
au,
|
||||
stmt.execute_named(&[(":id", &ser_ent.id), (":data", &ser_ent.data)]),
|
||||
"RusqliteError: {:?}",
|
||||
OperationError::SQLiteError
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
@ -368,21 +382,36 @@ impl BackendWriteTransaction {
|
|||
&self,
|
||||
au: &mut AuditScope,
|
||||
entries: &Vec<Entry<EntryValid, EntryCommitted>>,
|
||||
) -> Result<(), BackendError> {
|
||||
) -> Result<(), OperationError> {
|
||||
// Perform a search for the entries --> This is a problem for the caller
|
||||
audit_segment!(au, || {
|
||||
if entries.is_empty() {
|
||||
// TODO: Better error
|
||||
return Err(BackendError::EmptyRequest);
|
||||
return Err(OperationError::EmptyRequest);
|
||||
}
|
||||
|
||||
// Assert the Id's exist on the entry.
|
||||
let id_list: Vec<i64> = entries.iter().filter_map(|entry| entry.id).collect();
|
||||
let id_list: Result<Vec<i64>, _> = entries
|
||||
.iter()
|
||||
.map(|e| {
|
||||
i64::try_from(e.get_id())
|
||||
.map_err(|_| OperationError::InvalidEntryID)
|
||||
.and_then(|id| {
|
||||
if id == 0 {
|
||||
Err(OperationError::InvalidEntryID)
|
||||
} else {
|
||||
Ok(id)
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let id_list = try_audit!(au, id_list);
|
||||
|
||||
// Simple: If the list of id's is not the same as the input list, we are missing id's
|
||||
// TODO: This check won't be needed once I rebuild the entry state types.
|
||||
if entries.len() != id_list.len() {
|
||||
return Err(BackendError::EntryMissingId);
|
||||
return Err(OperationError::InvalidEntryState);
|
||||
}
|
||||
|
||||
// Now, given the list of id's, delete them.
|
||||
|
@ -429,14 +458,9 @@ impl BackendWriteTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
let entries: Vec<Entry<EntryValid, EntryCommitted>> = raw_entries
|
||||
let entries: Vec<DbEntry> = raw_entries
|
||||
.iter()
|
||||
.filter_map(|id_ent| {
|
||||
let mut e: Entry<EntryValid, EntryCommitted> =
|
||||
serde_json::from_str(id_ent.data.as_str()).unwrap();
|
||||
e.id = Some(id_ent.id);
|
||||
Some(e)
|
||||
})
|
||||
.map(|id_ent| serde_json::from_str(id_ent.data.as_str()).unwrap())
|
||||
.collect();
|
||||
|
||||
let mut serializedEntries = serde_json::to_string_pretty(&entries);
|
||||
|
@ -488,7 +512,7 @@ impl BackendWriteTransaction {
|
|||
self.purge(audit);
|
||||
}
|
||||
|
||||
let entriesOption: Result<Vec<Entry<EntryValid, EntryCommitted>>, serde_json::Error> =
|
||||
let entriesOption: Result<Vec<DbEntry>, serde_json::Error> =
|
||||
serde_json::from_str(&serializedString);
|
||||
|
||||
let entries = try_audit!(
|
||||
|
@ -500,8 +524,9 @@ impl BackendWriteTransaction {
|
|||
|
||||
self.internal_create(audit, &entries)
|
||||
|
||||
// run re-index after db is restored
|
||||
// run db verify
|
||||
// TODO: run re-index after db is restored
|
||||
// TODO; run db verify
|
||||
// self.verify(audit)
|
||||
}
|
||||
|
||||
pub fn commit(mut self) -> Result<(), OperationError> {
|
||||
|
@ -664,9 +689,7 @@ mod tests {
|
|||
use super::super::audit::AuditScope;
|
||||
use super::super::entry::{Entry, EntryInvalid, EntryNew};
|
||||
use super::super::filter::Filter;
|
||||
use super::{
|
||||
Backend, BackendError, BackendReadTransaction, BackendWriteTransaction, OperationError,
|
||||
};
|
||||
use super::{Backend, BackendReadTransaction, BackendWriteTransaction, OperationError};
|
||||
|
||||
macro_rules! run_test {
|
||||
($test_fn:expr) => {{
|
||||
|
@ -729,9 +752,27 @@ mod tests {
|
|||
|
||||
#[test]
|
||||
fn test_simple_search() {
|
||||
run_test!(|audit: &mut AuditScope, _be: &BackendWriteTransaction| {
|
||||
run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| {
|
||||
audit_log!(audit, "Simple Search");
|
||||
unimplemented!();
|
||||
|
||||
let mut e: Entry<EntryInvalid, EntryNew> = Entry::new();
|
||||
e.add_ava(String::from("userid"), String::from("claire"));
|
||||
let e = unsafe { e.to_valid_new() };
|
||||
|
||||
let single_result = be.create(audit, &vec![e.clone()]);
|
||||
assert!(single_result.is_ok());
|
||||
// Test a simple EQ search
|
||||
|
||||
let filt = Filter::Eq("userid".to_string(), "claire".to_string());
|
||||
|
||||
let r = be.search(audit, &filt);
|
||||
assert!(r.expect("Search failed!").len() == 1);
|
||||
|
||||
// Test empty search
|
||||
|
||||
// Test class pres
|
||||
|
||||
// Search with no results
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,9 @@ use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid};
|
|||
use crate::proto_v1::Entry as ProtoEntry;
|
||||
use crate::schema::{SchemaAttribute, SchemaClass, SchemaReadTransaction};
|
||||
use crate::server::{QueryServerReadTransaction, QueryServerWriteTransaction};
|
||||
|
||||
use crate::be::dbentry::{DbEntry, DbEntryV1, DbEntryVers};
|
||||
|
||||
use std::collections::btree_map::{Iter as BTreeIter, IterMut as BTreeIterMut};
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::HashMap;
|
||||
|
@ -120,24 +123,23 @@ impl<'a> Iterator for EntryAvasMut<'a> {
|
|||
// This is specifically important for the commit to the backend, as we only want to
|
||||
// commit validated types.
|
||||
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
||||
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||
pub struct EntryNew; // new
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
||||
pub struct EntryCommitted; // It's been in the DB, so it has an id
|
||||
// pub struct EntryPurged;
|
||||
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||
pub struct EntryCommitted {
|
||||
id: u64,
|
||||
} // It's been in the DB, so it has an id
|
||||
// pub struct EntryPurged;
|
||||
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
||||
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||
pub struct EntryValid; // Asserted with schema.
|
||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
||||
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||
pub struct EntryInvalid; // Modified
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug)]
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct Entry<VALID, STATE> {
|
||||
valid: VALID,
|
||||
state: STATE,
|
||||
pub id: Option<i64>,
|
||||
// Flag if we have been schema checked or not.
|
||||
// pub schema_validated: bool,
|
||||
attrs: BTreeMap<String, Vec<String>>,
|
||||
}
|
||||
|
||||
|
@ -148,13 +150,12 @@ impl Entry<EntryInvalid, EntryNew> {
|
|||
// This means NEVER COMMITED
|
||||
valid: EntryInvalid,
|
||||
state: EntryNew,
|
||||
id: None,
|
||||
attrs: BTreeMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Can we consume protoentry?
|
||||
pub fn from(
|
||||
pub fn from_proto_entry(
|
||||
audit: &mut AuditScope,
|
||||
e: &ProtoEntry,
|
||||
qs: &QueryServerWriteTransaction,
|
||||
|
@ -192,7 +193,6 @@ impl Entry<EntryInvalid, EntryNew> {
|
|||
// sets so that BST works.
|
||||
state: EntryNew,
|
||||
valid: EntryInvalid,
|
||||
id: None,
|
||||
attrs: x,
|
||||
})
|
||||
}
|
||||
|
@ -208,7 +208,6 @@ impl<STATE> Entry<EntryInvalid, STATE> {
|
|||
let Entry {
|
||||
valid: _,
|
||||
state,
|
||||
id,
|
||||
attrs,
|
||||
} = self;
|
||||
|
||||
|
@ -253,7 +252,6 @@ impl<STATE> Entry<EntryInvalid, STATE> {
|
|||
let ne = Entry {
|
||||
valid: EntryValid,
|
||||
state: state,
|
||||
id: id,
|
||||
attrs: new_attrs,
|
||||
};
|
||||
// Now validate.
|
||||
|
@ -359,7 +357,6 @@ where
|
|||
Entry {
|
||||
valid: self.valid,
|
||||
state: self.state,
|
||||
id: self.id,
|
||||
attrs: self.attrs.clone(),
|
||||
}
|
||||
}
|
||||
|
@ -375,22 +372,32 @@ impl<VALID, STATE> Entry<VALID, STATE> {
|
|||
Entry {
|
||||
valid: EntryValid,
|
||||
state: EntryNew,
|
||||
id: self.id,
|
||||
attrs: self.attrs,
|
||||
}
|
||||
}
|
||||
}
|
||||
// Both invalid states can be reached from "entry -> invalidate"
|
||||
|
||||
impl<VALID> Entry<VALID, EntryNew> {
|
||||
#[cfg(test)]
|
||||
pub unsafe fn to_valid_committed(self) -> Entry<EntryValid, EntryCommitted> {
|
||||
Entry {
|
||||
valid: EntryValid,
|
||||
state: EntryCommitted,
|
||||
id: self.id,
|
||||
state: EntryCommitted { id: 0 },
|
||||
attrs: self.attrs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Both invalid states can be reached from "entry -> invalidate"
|
||||
impl<VALID> Entry<VALID, EntryCommitted> {
|
||||
#[cfg(test)]
|
||||
pub unsafe fn to_valid_committed(self) -> Entry<EntryValid, EntryCommitted> {
|
||||
Entry {
|
||||
valid: EntryValid,
|
||||
state: self.state,
|
||||
attrs: self.attrs,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Entry<EntryValid, EntryNew> {
|
||||
|
@ -418,23 +425,48 @@ impl Entry<EntryValid, EntryCommitted> {
|
|||
|
||||
Entry {
|
||||
valid: EntryValid,
|
||||
state: EntryCommitted,
|
||||
id: self.id,
|
||||
state: self.state,
|
||||
attrs: attrs_new,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_id(&self) -> i64 {
|
||||
self.id.expect("ID corrupted!?!?")
|
||||
pub fn get_id(&self) -> u64 {
|
||||
self.state.id
|
||||
}
|
||||
|
||||
pub fn from_dbentry(db_e: DbEntry, id: u64) -> Self {
|
||||
Entry {
|
||||
valid: EntryValid,
|
||||
state: EntryCommitted { id },
|
||||
attrs: match db_e.ent {
|
||||
DbEntryVers::V1(v1) => v1.attrs,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<STATE> Entry<EntryValid, STATE> {
|
||||
// Returns the entry in the latest DbEntry format we are aware of.
|
||||
pub fn into_dbentry(&self) -> DbEntry {
|
||||
// In the future this will do extra work to process uuid
|
||||
// into "attributes" suitable for dbentry storage.
|
||||
|
||||
// How will this work with replication?
|
||||
//
|
||||
// Alternately, we may have higher-level types that translate entry
|
||||
// into proper structures, and they themself emit/modify entries?
|
||||
|
||||
DbEntry {
|
||||
ent: DbEntryVers::V1(DbEntryV1 {
|
||||
attrs: self.attrs.clone(),
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn invalidate(self) -> Entry<EntryInvalid, STATE> {
|
||||
Entry {
|
||||
valid: EntryInvalid,
|
||||
state: self.state,
|
||||
id: self.id,
|
||||
attrs: self.attrs,
|
||||
}
|
||||
}
|
||||
|
@ -442,8 +474,9 @@ impl<STATE> Entry<EntryValid, STATE> {
|
|||
pub fn seal(self) -> Entry<EntryValid, EntryCommitted> {
|
||||
Entry {
|
||||
valid: self.valid,
|
||||
state: EntryCommitted,
|
||||
id: self.id,
|
||||
state: EntryCommitted {
|
||||
id: unimplemented!(),
|
||||
},
|
||||
attrs: self.attrs,
|
||||
}
|
||||
}
|
||||
|
@ -699,7 +732,6 @@ where
|
|||
let mut ne: Entry<EntryInvalid, STATE> = Entry {
|
||||
valid: self.valid,
|
||||
state: self.state,
|
||||
id: self.id,
|
||||
attrs: self.attrs.clone(),
|
||||
};
|
||||
|
||||
|
@ -769,8 +801,6 @@ mod tests {
|
|||
let mut e: Entry<EntryInvalid, EntryNew> = Entry::new();
|
||||
|
||||
e.add_ava(String::from("userid"), String::from("william"));
|
||||
|
||||
let _d = serde_json::to_string_pretty(&e).unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -23,6 +23,7 @@ pub enum OperationError {
|
|||
Plugin,
|
||||
FilterGeneration,
|
||||
InvalidDBState,
|
||||
InvalidEntryID,
|
||||
InvalidRequestState,
|
||||
InvalidState,
|
||||
InvalidEntryState,
|
||||
|
@ -38,8 +39,8 @@ pub enum ConsistencyError {
|
|||
// Class, Attribute
|
||||
SchemaClassMissingAttribute(String, String),
|
||||
QueryServerSearchFailure,
|
||||
EntryUuidCorrupt(i64),
|
||||
EntryUuidCorrupt(u64),
|
||||
UuidIndexCorrupt(String),
|
||||
UuidNotUnique(String),
|
||||
RefintNotUpheld(i64),
|
||||
RefintNotUpheld(u64),
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ impl CreateEvent {
|
|||
let rentries: Result<Vec<_>, _> = request
|
||||
.entries
|
||||
.iter()
|
||||
.map(|e| Entry::from(audit, e, qs))
|
||||
.map(|e| Entry::from_proto_entry(audit, e, qs))
|
||||
.collect();
|
||||
match rentries {
|
||||
Ok(entries) => Ok(CreateEvent {
|
||||
|
|
|
@ -53,7 +53,6 @@ impl Plugin for ReferentialIntegrity {
|
|||
"referential_integrity"
|
||||
}
|
||||
|
||||
|
||||
// Why are these checks all in post?
|
||||
//
|
||||
// There is a situation to account for which is that a create or mod
|
||||
|
|
|
@ -4,9 +4,7 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use crate::audit::AuditScope;
|
||||
use crate::be::{
|
||||
Backend, BackendError, BackendReadTransaction, BackendTransaction, BackendWriteTransaction,
|
||||
};
|
||||
use crate::be::{Backend, BackendReadTransaction, BackendTransaction, BackendWriteTransaction};
|
||||
|
||||
use crate::constants::{JSON_ANONYMOUS_V1, JSON_SYSTEM_INFO_V1};
|
||||
use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntryValid};
|
||||
|
@ -106,7 +104,6 @@ pub trait QueryServerReadTransaction {
|
|||
// How to get schema?
|
||||
let vf = match ee.filter.validate(self.get_schema()) {
|
||||
Ok(f) => f,
|
||||
// TODO: Do something with this error
|
||||
Err(e) => return Err(OperationError::SchemaViolation(e)),
|
||||
};
|
||||
|
||||
|
@ -649,15 +646,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
|
||||
let mut audit_be = AuditScope::new("backend_modify");
|
||||
|
||||
let res = self
|
||||
.be_txn
|
||||
// Change this to an update, not delete.
|
||||
.modify(&mut audit_be, &del_cand)
|
||||
.map(|_| ())
|
||||
.map_err(|e| match e {
|
||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
||||
});
|
||||
let res = self.be_txn.modify(&mut audit_be, &del_cand);
|
||||
au.append_scope(audit_be);
|
||||
|
||||
if res.is_err() {
|
||||
|
@ -700,12 +689,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
let res = self
|
||||
.be_txn
|
||||
// Change this to an update, not delete.
|
||||
.delete(&mut audit_be, &ts)
|
||||
.map(|_| ())
|
||||
.map_err(|e| match e {
|
||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
||||
});
|
||||
.delete(&mut audit_be, &ts);
|
||||
au.append_scope(audit_be);
|
||||
|
||||
if res.is_err() {
|
||||
|
@ -735,14 +719,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// Backend Modify
|
||||
let mut audit_be = AuditScope::new("backend_modify");
|
||||
|
||||
let res = self
|
||||
.be_txn
|
||||
.modify(&mut audit_be, &tombstone_cand)
|
||||
.map(|_| ())
|
||||
.map_err(|e| match e {
|
||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
||||
});
|
||||
let res = self.be_txn.modify(&mut audit_be, &tombstone_cand);
|
||||
au.append_scope(audit_be);
|
||||
|
||||
if res.is_err() {
|
||||
|
@ -871,14 +848,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
// Backend Modify
|
||||
let mut audit_be = AuditScope::new("backend_modify");
|
||||
|
||||
let res = self
|
||||
.be_txn
|
||||
.modify(&mut audit_be, &norm_cand)
|
||||
.map(|_| ())
|
||||
.map_err(|e| match e {
|
||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
||||
});
|
||||
let res = self.be_txn.modify(&mut audit_be, &norm_cand);
|
||||
au.append_scope(audit_be);
|
||||
|
||||
if res.is_err() {
|
||||
|
|
Loading…
Reference in a new issue