mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-24 04:57:00 +01:00
Add DBVersioning for entries (#47)
This commit is contained in:
parent
3bbe9943c0
commit
ff828e4f4a
|
@ -296,3 +296,34 @@ system yet), method two probably is the best, but you need token constraint
|
||||||
to make sure you can't replay to another host.
|
to make sure you can't replay to another host.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Brain Dump Internal Details
|
||||||
|
===========================
|
||||||
|
|
||||||
|
Credentials should be a real struct on entry, that is serialised to str to dbentry. This allows repl
|
||||||
|
to still work, but then we can actually keep detailed structures for types in the DB instead. When
|
||||||
|
we send to proto entry, we could probably keep it as a real struct on protoentry, but then we could
|
||||||
|
eliminate all private types from transmission.
|
||||||
|
|
||||||
|
|
||||||
|
When we login, we need to know what groups/roles are relevant to that authentication. To achieve this
|
||||||
|
we can have each group contain a policy of auth types (the credentials above all provide an auth
|
||||||
|
type). The login then has a known auth type of "how" they logged in, so when we go to generate
|
||||||
|
the users "token" for that session, we can correlate these, and only attach groups that satisfy
|
||||||
|
the authentication type requirements.
|
||||||
|
|
||||||
|
IE the session associates the method you used to login to your token and a cookie.
|
||||||
|
|
||||||
|
If you require extra groups, then we should support a token refresh that given the prior auth +
|
||||||
|
extra factors, we can then re-issue the token to support the extra groups as presented. We may
|
||||||
|
also want some auth types to NOT allow refresh.
|
||||||
|
|
||||||
|
We may want groups to support expiry where they are not valid past some time stamp. This may
|
||||||
|
required tagging or other details.
|
||||||
|
|
||||||
|
|
||||||
|
How do we ensure integrity of the token? Do we have to? Is the clients job to trust the token given
|
||||||
|
the TLS tunnel?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
20
src/lib/be/dbentry.rs
Normal file
20
src/lib/be/dbentry.rs
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct DbEntryV1 {
|
||||||
|
pub attrs: BTreeMap<String, Vec<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// REMEMBER: If you add a new version here, you MUST
|
||||||
|
// update entry.rs into_dbentry to export to the latest
|
||||||
|
// type always!!
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub enum DbEntryVers {
|
||||||
|
V1(DbEntryV1),
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is actually what we store into the DB.
|
||||||
|
#[derive(Serialize, Deserialize, Debug)]
|
||||||
|
pub struct DbEntry {
|
||||||
|
pub ent: DbEntryVers,
|
||||||
|
}
|
|
@ -5,38 +5,28 @@ use r2d2_sqlite::SqliteConnectionManager;
|
||||||
use rusqlite::types::ToSql;
|
use rusqlite::types::ToSql;
|
||||||
use rusqlite::NO_PARAMS;
|
use rusqlite::NO_PARAMS;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
use std::convert::TryFrom;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
// use uuid;
|
// use uuid;
|
||||||
|
|
||||||
use crate::audit::AuditScope;
|
use crate::audit::AuditScope;
|
||||||
|
use crate::be::dbentry::DbEntry;
|
||||||
use crate::entry::{Entry, EntryCommitted, EntryNew, EntryValid};
|
use crate::entry::{Entry, EntryCommitted, EntryNew, EntryValid};
|
||||||
use crate::error::{ConsistencyError, OperationError};
|
use crate::error::{ConsistencyError, OperationError};
|
||||||
use crate::filter::{Filter, FilterValid};
|
use crate::filter::{Filter, FilterValid};
|
||||||
|
|
||||||
|
pub mod dbentry;
|
||||||
mod idl;
|
mod idl;
|
||||||
mod mem_be;
|
mod mem_be;
|
||||||
mod sqlite_be;
|
mod sqlite_be;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct IdEntry {
|
struct IdEntry {
|
||||||
// FIXME: This should be u64, but sqlite uses i64 ...
|
// TODO: for now this is i64 to make sqlite work, but entry is u64 for indexing reasons!
|
||||||
id: i64,
|
id: i64,
|
||||||
data: String,
|
data: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
pub enum BackendType {
|
|
||||||
Memory, // isn't memory just sqlite with file :memory: ?
|
|
||||||
SQLite,
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
pub enum BackendError {
|
|
||||||
EmptyRequest,
|
|
||||||
EntryMissingId,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Backend {
|
pub struct Backend {
|
||||||
pool: Pool<SqliteConnectionManager>,
|
pool: Pool<SqliteConnectionManager>,
|
||||||
}
|
}
|
||||||
|
@ -59,7 +49,7 @@ pub trait BackendReadTransaction {
|
||||||
&self,
|
&self,
|
||||||
au: &mut AuditScope,
|
au: &mut AuditScope,
|
||||||
filt: &Filter<FilterValid>,
|
filt: &Filter<FilterValid>,
|
||||||
) -> Result<Vec<Entry<EntryValid, EntryCommitted>>, BackendError> {
|
) -> Result<Vec<Entry<EntryValid, EntryCommitted>>, OperationError> {
|
||||||
// Do things
|
// Do things
|
||||||
// Alloc a vec for the entries.
|
// Alloc a vec for the entries.
|
||||||
// FIXME: Make this actually a good size for the result set ...
|
// FIXME: Make this actually a good size for the result set ...
|
||||||
|
@ -99,13 +89,17 @@ pub trait BackendReadTransaction {
|
||||||
}
|
}
|
||||||
// Do other things
|
// Do other things
|
||||||
// Now, de-serialise the raw_entries back to entries, and populate their ID's
|
// Now, de-serialise the raw_entries back to entries, and populate their ID's
|
||||||
|
|
||||||
let entries: Vec<Entry<EntryValid, EntryCommitted>> = raw_entries
|
let entries: Vec<Entry<EntryValid, EntryCommitted>> = raw_entries
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|id_ent| {
|
.map(|id_ent| {
|
||||||
// TODO: Should we do better than unwrap?
|
// TODO: Should we do better than unwrap? And if so, how?
|
||||||
let mut e: Entry<EntryValid, EntryCommitted> =
|
// we need to map this error, so probably need to collect to Result<Vec<_>, _>
|
||||||
serde_json::from_str(id_ent.data.as_str()).unwrap();
|
// and then to try_audit! on that.
|
||||||
e.id = Some(id_ent.id);
|
let db_e = serde_json::from_str(id_ent.data.as_str()).unwrap();
|
||||||
|
Entry::from_dbentry(db_e, u64::try_from(id_ent.id).unwrap())
|
||||||
|
})
|
||||||
|
.filter_map(|e| {
|
||||||
if e.entry_match_no_index(&filt) {
|
if e.entry_match_no_index(&filt) {
|
||||||
Some(e)
|
Some(e)
|
||||||
} else {
|
} else {
|
||||||
|
@ -126,7 +120,7 @@ pub trait BackendReadTransaction {
|
||||||
&self,
|
&self,
|
||||||
au: &mut AuditScope,
|
au: &mut AuditScope,
|
||||||
filt: &Filter<FilterValid>,
|
filt: &Filter<FilterValid>,
|
||||||
) -> Result<bool, BackendError> {
|
) -> Result<bool, OperationError> {
|
||||||
// Do a final optimise of the filter
|
// Do a final optimise of the filter
|
||||||
// At the moment, technically search will do this, but it won't always be the
|
// At the moment, technically search will do this, but it won't always be the
|
||||||
// case once this becomes a standalone function.
|
// case once this becomes a standalone function.
|
||||||
|
@ -236,35 +230,23 @@ impl BackendWriteTransaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn internal_create<T: serde::Serialize>(
|
fn internal_create(
|
||||||
&self,
|
&self,
|
||||||
au: &mut AuditScope,
|
au: &mut AuditScope,
|
||||||
entries: &Vec<Entry<EntryValid, T>>,
|
dbentries: &Vec<DbEntry>,
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
audit_segment!(au, || {
|
// Get the max id from the db. We store this ourselves to avoid max() calls.
|
||||||
// Start be audit timer
|
|
||||||
|
|
||||||
if entries.is_empty() {
|
|
||||||
// TODO: Better error
|
|
||||||
// End the timer
|
|
||||||
return Err(OperationError::EmptyRequest);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Turn all the entries into relevent json/cbor types
|
|
||||||
// we do this outside the txn to avoid blocking needlessly.
|
|
||||||
// However, it could be pointless due to the extra string allocs ...
|
|
||||||
|
|
||||||
// Get the max id from the db. We store this ourselves to avoid max().
|
|
||||||
let mut id_max = self.get_id2entry_max_id();
|
let mut id_max = self.get_id2entry_max_id();
|
||||||
|
|
||||||
let ser_entries: Vec<IdEntry> = entries
|
let ser_entries: Vec<IdEntry> = dbentries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|val| {
|
.map(|ser_db_e| {
|
||||||
// TODO: Should we do better than unwrap?
|
|
||||||
id_max = id_max + 1;
|
id_max = id_max + 1;
|
||||||
|
|
||||||
IdEntry {
|
IdEntry {
|
||||||
id: id_max,
|
id: id_max,
|
||||||
data: serde_json::to_string(&val).unwrap(),
|
// TODO: Should we do better than unwrap?
|
||||||
|
data: serde_json::to_string(&ser_db_e).unwrap(),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -294,12 +276,9 @@ impl BackendWriteTransaction {
|
||||||
OperationError::SQLiteError
|
OperationError::SQLiteError
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: update indexes (as needed)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn create(
|
pub fn create(
|
||||||
|
@ -309,56 +288,91 @@ impl BackendWriteTransaction {
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
// figured we would want a audit_segment to wrap internal_create so when doing profiling we can
|
// figured we would want a audit_segment to wrap internal_create so when doing profiling we can
|
||||||
// tell which function is calling it. either this one or restore.
|
// tell which function is calling it. either this one or restore.
|
||||||
audit_segment!(au, || self.internal_create(au, entries))
|
audit_segment!(au, || {
|
||||||
|
if entries.is_empty() {
|
||||||
|
// TODO: Better error
|
||||||
|
// End the timer
|
||||||
|
return Err(OperationError::EmptyRequest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Turn all the entries into relevent json/cbor types
|
||||||
|
// we do this outside the txn to avoid blocking needlessly.
|
||||||
|
// However, it could be pointless due to the extra string allocs ...
|
||||||
|
|
||||||
|
let dbentries: Vec<_> = entries.iter().map(|e| e.into_dbentry()).collect();
|
||||||
|
|
||||||
|
self.internal_create(au, &dbentries)
|
||||||
|
|
||||||
|
// TODO: update indexes (as needed)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn modify(
|
pub fn modify(
|
||||||
&self,
|
&self,
|
||||||
au: &mut AuditScope,
|
au: &mut AuditScope,
|
||||||
entries: &Vec<Entry<EntryValid, EntryCommitted>>,
|
entries: &Vec<Entry<EntryValid, EntryCommitted>>,
|
||||||
) -> Result<(), BackendError> {
|
) -> Result<(), OperationError> {
|
||||||
if entries.is_empty() {
|
if entries.is_empty() {
|
||||||
// TODO: Better error
|
return Err(OperationError::EmptyRequest);
|
||||||
return Err(BackendError::EmptyRequest);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert the Id's exist on the entry, and serialise them.
|
// Assert the Id's exist on the entry, and serialise them.
|
||||||
let ser_entries: Vec<IdEntry> = entries
|
// Now, that means the ID must be > 0!!!
|
||||||
|
let ser_entries: Result<Vec<IdEntry>, _> = entries
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|e| {
|
.map(|e| {
|
||||||
match e.id {
|
let db_e = e.into_dbentry();
|
||||||
Some(id) => {
|
|
||||||
Some(IdEntry {
|
let id = i64::try_from(e.get_id())
|
||||||
|
.map_err(|_| OperationError::InvalidEntryID)
|
||||||
|
.and_then(|id| {
|
||||||
|
if id == 0 {
|
||||||
|
Err(OperationError::InvalidEntryID)
|
||||||
|
} else {
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let data =
|
||||||
|
serde_json::to_string(&db_e).map_err(|_| OperationError::SerdeJsonError)?;
|
||||||
|
|
||||||
|
Ok(IdEntry {
|
||||||
|
// TODO: Instead of getting these from the entry, we could lookup
|
||||||
|
// uuid -> id in the index.
|
||||||
id: id,
|
id: id,
|
||||||
// TODO: Should we do better than unwrap?
|
data: data,
|
||||||
data: serde_json::to_string(&e).unwrap(),
|
|
||||||
})
|
})
|
||||||
}
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
let ser_entries = try_audit!(au, ser_entries);
|
||||||
|
|
||||||
audit_log!(au, "serialising: {:?}", ser_entries);
|
audit_log!(au, "serialising: {:?}", ser_entries);
|
||||||
|
|
||||||
// Simple: If the list of id's is not the same as the input list, we are missing id's
|
// Simple: If the list of id's is not the same as the input list, we are missing id's
|
||||||
// TODO: This check won't be needed once I rebuild the entry state types.
|
// TODO: This check won't be needed once I rebuild the entry state types.
|
||||||
if entries.len() != ser_entries.len() {
|
if entries.len() != ser_entries.len() {
|
||||||
return Err(BackendError::EntryMissingId);
|
return Err(OperationError::InvalidEntryState);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now, given the list of id's, update them
|
// Now, given the list of id's, update them
|
||||||
{
|
{
|
||||||
// TODO: ACTUALLY HANDLE THIS ERROR WILLIAM YOU LAZY SHIT.
|
let mut stmt = try_audit!(
|
||||||
let mut stmt = self
|
au,
|
||||||
.conn
|
self.conn
|
||||||
.prepare("UPDATE id2entry SET data = :data WHERE id = :id")
|
.prepare("UPDATE id2entry SET data = :data WHERE id = :id"),
|
||||||
.unwrap();
|
"RusqliteError: {:?}",
|
||||||
|
OperationError::SQLiteError
|
||||||
|
);
|
||||||
|
|
||||||
ser_entries.iter().for_each(|ser_ent| {
|
for ser_ent in ser_entries.iter() {
|
||||||
stmt.execute_named(&[(":id", &ser_ent.id), (":data", &ser_ent.data)])
|
try_audit!(
|
||||||
.unwrap();
|
au,
|
||||||
});
|
stmt.execute_named(&[(":id", &ser_ent.id), (":data", &ser_ent.data)]),
|
||||||
|
"RusqliteError: {:?}",
|
||||||
|
OperationError::SQLiteError
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -368,21 +382,36 @@ impl BackendWriteTransaction {
|
||||||
&self,
|
&self,
|
||||||
au: &mut AuditScope,
|
au: &mut AuditScope,
|
||||||
entries: &Vec<Entry<EntryValid, EntryCommitted>>,
|
entries: &Vec<Entry<EntryValid, EntryCommitted>>,
|
||||||
) -> Result<(), BackendError> {
|
) -> Result<(), OperationError> {
|
||||||
// Perform a search for the entries --> This is a problem for the caller
|
// Perform a search for the entries --> This is a problem for the caller
|
||||||
audit_segment!(au, || {
|
audit_segment!(au, || {
|
||||||
if entries.is_empty() {
|
if entries.is_empty() {
|
||||||
// TODO: Better error
|
// TODO: Better error
|
||||||
return Err(BackendError::EmptyRequest);
|
return Err(OperationError::EmptyRequest);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assert the Id's exist on the entry.
|
// Assert the Id's exist on the entry.
|
||||||
let id_list: Vec<i64> = entries.iter().filter_map(|entry| entry.id).collect();
|
let id_list: Result<Vec<i64>, _> = entries
|
||||||
|
.iter()
|
||||||
|
.map(|e| {
|
||||||
|
i64::try_from(e.get_id())
|
||||||
|
.map_err(|_| OperationError::InvalidEntryID)
|
||||||
|
.and_then(|id| {
|
||||||
|
if id == 0 {
|
||||||
|
Err(OperationError::InvalidEntryID)
|
||||||
|
} else {
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let id_list = try_audit!(au, id_list);
|
||||||
|
|
||||||
// Simple: If the list of id's is not the same as the input list, we are missing id's
|
// Simple: If the list of id's is not the same as the input list, we are missing id's
|
||||||
// TODO: This check won't be needed once I rebuild the entry state types.
|
// TODO: This check won't be needed once I rebuild the entry state types.
|
||||||
if entries.len() != id_list.len() {
|
if entries.len() != id_list.len() {
|
||||||
return Err(BackendError::EntryMissingId);
|
return Err(OperationError::InvalidEntryState);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now, given the list of id's, delete them.
|
// Now, given the list of id's, delete them.
|
||||||
|
@ -429,14 +458,9 @@ impl BackendWriteTransaction {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let entries: Vec<Entry<EntryValid, EntryCommitted>> = raw_entries
|
let entries: Vec<DbEntry> = raw_entries
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|id_ent| {
|
.map(|id_ent| serde_json::from_str(id_ent.data.as_str()).unwrap())
|
||||||
let mut e: Entry<EntryValid, EntryCommitted> =
|
|
||||||
serde_json::from_str(id_ent.data.as_str()).unwrap();
|
|
||||||
e.id = Some(id_ent.id);
|
|
||||||
Some(e)
|
|
||||||
})
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut serializedEntries = serde_json::to_string_pretty(&entries);
|
let mut serializedEntries = serde_json::to_string_pretty(&entries);
|
||||||
|
@ -488,7 +512,7 @@ impl BackendWriteTransaction {
|
||||||
self.purge(audit);
|
self.purge(audit);
|
||||||
}
|
}
|
||||||
|
|
||||||
let entriesOption: Result<Vec<Entry<EntryValid, EntryCommitted>>, serde_json::Error> =
|
let entriesOption: Result<Vec<DbEntry>, serde_json::Error> =
|
||||||
serde_json::from_str(&serializedString);
|
serde_json::from_str(&serializedString);
|
||||||
|
|
||||||
let entries = try_audit!(
|
let entries = try_audit!(
|
||||||
|
@ -500,8 +524,9 @@ impl BackendWriteTransaction {
|
||||||
|
|
||||||
self.internal_create(audit, &entries)
|
self.internal_create(audit, &entries)
|
||||||
|
|
||||||
// run re-index after db is restored
|
// TODO: run re-index after db is restored
|
||||||
// run db verify
|
// TODO; run db verify
|
||||||
|
// self.verify(audit)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn commit(mut self) -> Result<(), OperationError> {
|
pub fn commit(mut self) -> Result<(), OperationError> {
|
||||||
|
@ -664,9 +689,7 @@ mod tests {
|
||||||
use super::super::audit::AuditScope;
|
use super::super::audit::AuditScope;
|
||||||
use super::super::entry::{Entry, EntryInvalid, EntryNew};
|
use super::super::entry::{Entry, EntryInvalid, EntryNew};
|
||||||
use super::super::filter::Filter;
|
use super::super::filter::Filter;
|
||||||
use super::{
|
use super::{Backend, BackendReadTransaction, BackendWriteTransaction, OperationError};
|
||||||
Backend, BackendError, BackendReadTransaction, BackendWriteTransaction, OperationError,
|
|
||||||
};
|
|
||||||
|
|
||||||
macro_rules! run_test {
|
macro_rules! run_test {
|
||||||
($test_fn:expr) => {{
|
($test_fn:expr) => {{
|
||||||
|
@ -729,9 +752,27 @@ mod tests {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_simple_search() {
|
fn test_simple_search() {
|
||||||
run_test!(|audit: &mut AuditScope, _be: &BackendWriteTransaction| {
|
run_test!(|audit: &mut AuditScope, be: &BackendWriteTransaction| {
|
||||||
audit_log!(audit, "Simple Search");
|
audit_log!(audit, "Simple Search");
|
||||||
unimplemented!();
|
|
||||||
|
let mut e: Entry<EntryInvalid, EntryNew> = Entry::new();
|
||||||
|
e.add_ava(String::from("userid"), String::from("claire"));
|
||||||
|
let e = unsafe { e.to_valid_new() };
|
||||||
|
|
||||||
|
let single_result = be.create(audit, &vec![e.clone()]);
|
||||||
|
assert!(single_result.is_ok());
|
||||||
|
// Test a simple EQ search
|
||||||
|
|
||||||
|
let filt = Filter::Eq("userid".to_string(), "claire".to_string());
|
||||||
|
|
||||||
|
let r = be.search(audit, &filt);
|
||||||
|
assert!(r.expect("Search failed!").len() == 1);
|
||||||
|
|
||||||
|
// Test empty search
|
||||||
|
|
||||||
|
// Test class pres
|
||||||
|
|
||||||
|
// Search with no results
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,9 @@ use crate::modify::{Modify, ModifyInvalid, ModifyList, ModifyValid};
|
||||||
use crate::proto_v1::Entry as ProtoEntry;
|
use crate::proto_v1::Entry as ProtoEntry;
|
||||||
use crate::schema::{SchemaAttribute, SchemaClass, SchemaReadTransaction};
|
use crate::schema::{SchemaAttribute, SchemaClass, SchemaReadTransaction};
|
||||||
use crate::server::{QueryServerReadTransaction, QueryServerWriteTransaction};
|
use crate::server::{QueryServerReadTransaction, QueryServerWriteTransaction};
|
||||||
|
|
||||||
|
use crate::be::dbentry::{DbEntry, DbEntryV1, DbEntryVers};
|
||||||
|
|
||||||
use std::collections::btree_map::{Iter as BTreeIter, IterMut as BTreeIterMut};
|
use std::collections::btree_map::{Iter as BTreeIter, IterMut as BTreeIterMut};
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
@ -120,24 +123,23 @@ impl<'a> Iterator for EntryAvasMut<'a> {
|
||||||
// This is specifically important for the commit to the backend, as we only want to
|
// This is specifically important for the commit to the backend, as we only want to
|
||||||
// commit validated types.
|
// commit validated types.
|
||||||
|
|
||||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||||
pub struct EntryNew; // new
|
pub struct EntryNew; // new
|
||||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||||
pub struct EntryCommitted; // It's been in the DB, so it has an id
|
pub struct EntryCommitted {
|
||||||
|
id: u64,
|
||||||
|
} // It's been in the DB, so it has an id
|
||||||
// pub struct EntryPurged;
|
// pub struct EntryPurged;
|
||||||
|
|
||||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||||
pub struct EntryValid; // Asserted with schema.
|
pub struct EntryValid; // Asserted with schema.
|
||||||
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
|
#[derive(Clone, Copy, Debug, Deserialize)]
|
||||||
pub struct EntryInvalid; // Modified
|
pub struct EntryInvalid; // Modified
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize, Debug)]
|
#[derive(Debug, Deserialize)]
|
||||||
pub struct Entry<VALID, STATE> {
|
pub struct Entry<VALID, STATE> {
|
||||||
valid: VALID,
|
valid: VALID,
|
||||||
state: STATE,
|
state: STATE,
|
||||||
pub id: Option<i64>,
|
|
||||||
// Flag if we have been schema checked or not.
|
|
||||||
// pub schema_validated: bool,
|
|
||||||
attrs: BTreeMap<String, Vec<String>>,
|
attrs: BTreeMap<String, Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,13 +150,12 @@ impl Entry<EntryInvalid, EntryNew> {
|
||||||
// This means NEVER COMMITED
|
// This means NEVER COMMITED
|
||||||
valid: EntryInvalid,
|
valid: EntryInvalid,
|
||||||
state: EntryNew,
|
state: EntryNew,
|
||||||
id: None,
|
|
||||||
attrs: BTreeMap::new(),
|
attrs: BTreeMap::new(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: Can we consume protoentry?
|
// FIXME: Can we consume protoentry?
|
||||||
pub fn from(
|
pub fn from_proto_entry(
|
||||||
audit: &mut AuditScope,
|
audit: &mut AuditScope,
|
||||||
e: &ProtoEntry,
|
e: &ProtoEntry,
|
||||||
qs: &QueryServerWriteTransaction,
|
qs: &QueryServerWriteTransaction,
|
||||||
|
@ -192,7 +193,6 @@ impl Entry<EntryInvalid, EntryNew> {
|
||||||
// sets so that BST works.
|
// sets so that BST works.
|
||||||
state: EntryNew,
|
state: EntryNew,
|
||||||
valid: EntryInvalid,
|
valid: EntryInvalid,
|
||||||
id: None,
|
|
||||||
attrs: x,
|
attrs: x,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,6 @@ impl<STATE> Entry<EntryInvalid, STATE> {
|
||||||
let Entry {
|
let Entry {
|
||||||
valid: _,
|
valid: _,
|
||||||
state,
|
state,
|
||||||
id,
|
|
||||||
attrs,
|
attrs,
|
||||||
} = self;
|
} = self;
|
||||||
|
|
||||||
|
@ -253,7 +252,6 @@ impl<STATE> Entry<EntryInvalid, STATE> {
|
||||||
let ne = Entry {
|
let ne = Entry {
|
||||||
valid: EntryValid,
|
valid: EntryValid,
|
||||||
state: state,
|
state: state,
|
||||||
id: id,
|
|
||||||
attrs: new_attrs,
|
attrs: new_attrs,
|
||||||
};
|
};
|
||||||
// Now validate.
|
// Now validate.
|
||||||
|
@ -359,7 +357,6 @@ where
|
||||||
Entry {
|
Entry {
|
||||||
valid: self.valid,
|
valid: self.valid,
|
||||||
state: self.state,
|
state: self.state,
|
||||||
id: self.id,
|
|
||||||
attrs: self.attrs.clone(),
|
attrs: self.attrs.clone(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -375,22 +372,32 @@ impl<VALID, STATE> Entry<VALID, STATE> {
|
||||||
Entry {
|
Entry {
|
||||||
valid: EntryValid,
|
valid: EntryValid,
|
||||||
state: EntryNew,
|
state: EntryNew,
|
||||||
id: self.id,
|
|
||||||
attrs: self.attrs,
|
attrs: self.attrs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
// Both invalid states can be reached from "entry -> invalidate"
|
||||||
|
|
||||||
|
impl<VALID> Entry<VALID, EntryNew> {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub unsafe fn to_valid_committed(self) -> Entry<EntryValid, EntryCommitted> {
|
pub unsafe fn to_valid_committed(self) -> Entry<EntryValid, EntryCommitted> {
|
||||||
Entry {
|
Entry {
|
||||||
valid: EntryValid,
|
valid: EntryValid,
|
||||||
state: EntryCommitted,
|
state: EntryCommitted { id: 0 },
|
||||||
id: self.id,
|
|
||||||
attrs: self.attrs,
|
attrs: self.attrs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Both invalid states can be reached from "entry -> invalidate"
|
impl<VALID> Entry<VALID, EntryCommitted> {
|
||||||
|
#[cfg(test)]
|
||||||
|
pub unsafe fn to_valid_committed(self) -> Entry<EntryValid, EntryCommitted> {
|
||||||
|
Entry {
|
||||||
|
valid: EntryValid,
|
||||||
|
state: self.state,
|
||||||
|
attrs: self.attrs,
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Entry<EntryValid, EntryNew> {
|
impl Entry<EntryValid, EntryNew> {
|
||||||
|
@ -418,23 +425,48 @@ impl Entry<EntryValid, EntryCommitted> {
|
||||||
|
|
||||||
Entry {
|
Entry {
|
||||||
valid: EntryValid,
|
valid: EntryValid,
|
||||||
state: EntryCommitted,
|
state: self.state,
|
||||||
id: self.id,
|
|
||||||
attrs: attrs_new,
|
attrs: attrs_new,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_id(&self) -> i64 {
|
pub fn get_id(&self) -> u64 {
|
||||||
self.id.expect("ID corrupted!?!?")
|
self.state.id
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_dbentry(db_e: DbEntry, id: u64) -> Self {
|
||||||
|
Entry {
|
||||||
|
valid: EntryValid,
|
||||||
|
state: EntryCommitted { id },
|
||||||
|
attrs: match db_e.ent {
|
||||||
|
DbEntryVers::V1(v1) => v1.attrs,
|
||||||
|
},
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<STATE> Entry<EntryValid, STATE> {
|
impl<STATE> Entry<EntryValid, STATE> {
|
||||||
|
// Returns the entry in the latest DbEntry format we are aware of.
|
||||||
|
pub fn into_dbentry(&self) -> DbEntry {
|
||||||
|
// In the future this will do extra work to process uuid
|
||||||
|
// into "attributes" suitable for dbentry storage.
|
||||||
|
|
||||||
|
// How will this work with replication?
|
||||||
|
//
|
||||||
|
// Alternately, we may have higher-level types that translate entry
|
||||||
|
// into proper structures, and they themself emit/modify entries?
|
||||||
|
|
||||||
|
DbEntry {
|
||||||
|
ent: DbEntryVers::V1(DbEntryV1 {
|
||||||
|
attrs: self.attrs.clone(),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn invalidate(self) -> Entry<EntryInvalid, STATE> {
|
pub fn invalidate(self) -> Entry<EntryInvalid, STATE> {
|
||||||
Entry {
|
Entry {
|
||||||
valid: EntryInvalid,
|
valid: EntryInvalid,
|
||||||
state: self.state,
|
state: self.state,
|
||||||
id: self.id,
|
|
||||||
attrs: self.attrs,
|
attrs: self.attrs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -442,8 +474,9 @@ impl<STATE> Entry<EntryValid, STATE> {
|
||||||
pub fn seal(self) -> Entry<EntryValid, EntryCommitted> {
|
pub fn seal(self) -> Entry<EntryValid, EntryCommitted> {
|
||||||
Entry {
|
Entry {
|
||||||
valid: self.valid,
|
valid: self.valid,
|
||||||
state: EntryCommitted,
|
state: EntryCommitted {
|
||||||
id: self.id,
|
id: unimplemented!(),
|
||||||
|
},
|
||||||
attrs: self.attrs,
|
attrs: self.attrs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -699,7 +732,6 @@ where
|
||||||
let mut ne: Entry<EntryInvalid, STATE> = Entry {
|
let mut ne: Entry<EntryInvalid, STATE> = Entry {
|
||||||
valid: self.valid,
|
valid: self.valid,
|
||||||
state: self.state,
|
state: self.state,
|
||||||
id: self.id,
|
|
||||||
attrs: self.attrs.clone(),
|
attrs: self.attrs.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -769,8 +801,6 @@ mod tests {
|
||||||
let mut e: Entry<EntryInvalid, EntryNew> = Entry::new();
|
let mut e: Entry<EntryInvalid, EntryNew> = Entry::new();
|
||||||
|
|
||||||
e.add_ava(String::from("userid"), String::from("william"));
|
e.add_ava(String::from("userid"), String::from("william"));
|
||||||
|
|
||||||
let _d = serde_json::to_string_pretty(&e).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
|
|
@ -23,6 +23,7 @@ pub enum OperationError {
|
||||||
Plugin,
|
Plugin,
|
||||||
FilterGeneration,
|
FilterGeneration,
|
||||||
InvalidDBState,
|
InvalidDBState,
|
||||||
|
InvalidEntryID,
|
||||||
InvalidRequestState,
|
InvalidRequestState,
|
||||||
InvalidState,
|
InvalidState,
|
||||||
InvalidEntryState,
|
InvalidEntryState,
|
||||||
|
@ -38,8 +39,8 @@ pub enum ConsistencyError {
|
||||||
// Class, Attribute
|
// Class, Attribute
|
||||||
SchemaClassMissingAttribute(String, String),
|
SchemaClassMissingAttribute(String, String),
|
||||||
QueryServerSearchFailure,
|
QueryServerSearchFailure,
|
||||||
EntryUuidCorrupt(i64),
|
EntryUuidCorrupt(u64),
|
||||||
UuidIndexCorrupt(String),
|
UuidIndexCorrupt(String),
|
||||||
UuidNotUnique(String),
|
UuidNotUnique(String),
|
||||||
RefintNotUpheld(i64),
|
RefintNotUpheld(u64),
|
||||||
}
|
}
|
||||||
|
|
|
@ -156,7 +156,7 @@ impl CreateEvent {
|
||||||
let rentries: Result<Vec<_>, _> = request
|
let rentries: Result<Vec<_>, _> = request
|
||||||
.entries
|
.entries
|
||||||
.iter()
|
.iter()
|
||||||
.map(|e| Entry::from(audit, e, qs))
|
.map(|e| Entry::from_proto_entry(audit, e, qs))
|
||||||
.collect();
|
.collect();
|
||||||
match rentries {
|
match rentries {
|
||||||
Ok(entries) => Ok(CreateEvent {
|
Ok(entries) => Ok(CreateEvent {
|
||||||
|
|
|
@ -53,7 +53,6 @@ impl Plugin for ReferentialIntegrity {
|
||||||
"referential_integrity"
|
"referential_integrity"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Why are these checks all in post?
|
// Why are these checks all in post?
|
||||||
//
|
//
|
||||||
// There is a situation to account for which is that a create or mod
|
// There is a situation to account for which is that a create or mod
|
||||||
|
|
|
@ -4,9 +4,7 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::audit::AuditScope;
|
use crate::audit::AuditScope;
|
||||||
use crate::be::{
|
use crate::be::{Backend, BackendReadTransaction, BackendTransaction, BackendWriteTransaction};
|
||||||
Backend, BackendError, BackendReadTransaction, BackendTransaction, BackendWriteTransaction,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::constants::{JSON_ANONYMOUS_V1, JSON_SYSTEM_INFO_V1};
|
use crate::constants::{JSON_ANONYMOUS_V1, JSON_SYSTEM_INFO_V1};
|
||||||
use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntryValid};
|
use crate::entry::{Entry, EntryCommitted, EntryInvalid, EntryNew, EntryValid};
|
||||||
|
@ -106,7 +104,6 @@ pub trait QueryServerReadTransaction {
|
||||||
// How to get schema?
|
// How to get schema?
|
||||||
let vf = match ee.filter.validate(self.get_schema()) {
|
let vf = match ee.filter.validate(self.get_schema()) {
|
||||||
Ok(f) => f,
|
Ok(f) => f,
|
||||||
// TODO: Do something with this error
|
|
||||||
Err(e) => return Err(OperationError::SchemaViolation(e)),
|
Err(e) => return Err(OperationError::SchemaViolation(e)),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -649,15 +646,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
|
|
||||||
let mut audit_be = AuditScope::new("backend_modify");
|
let mut audit_be = AuditScope::new("backend_modify");
|
||||||
|
|
||||||
let res = self
|
let res = self.be_txn.modify(&mut audit_be, &del_cand);
|
||||||
.be_txn
|
|
||||||
// Change this to an update, not delete.
|
|
||||||
.modify(&mut audit_be, &del_cand)
|
|
||||||
.map(|_| ())
|
|
||||||
.map_err(|e| match e {
|
|
||||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
|
||||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
|
||||||
});
|
|
||||||
au.append_scope(audit_be);
|
au.append_scope(audit_be);
|
||||||
|
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
|
@ -700,12 +689,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
let res = self
|
let res = self
|
||||||
.be_txn
|
.be_txn
|
||||||
// Change this to an update, not delete.
|
// Change this to an update, not delete.
|
||||||
.delete(&mut audit_be, &ts)
|
.delete(&mut audit_be, &ts);
|
||||||
.map(|_| ())
|
|
||||||
.map_err(|e| match e {
|
|
||||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
|
||||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
|
||||||
});
|
|
||||||
au.append_scope(audit_be);
|
au.append_scope(audit_be);
|
||||||
|
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
|
@ -735,14 +719,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
// Backend Modify
|
// Backend Modify
|
||||||
let mut audit_be = AuditScope::new("backend_modify");
|
let mut audit_be = AuditScope::new("backend_modify");
|
||||||
|
|
||||||
let res = self
|
let res = self.be_txn.modify(&mut audit_be, &tombstone_cand);
|
||||||
.be_txn
|
|
||||||
.modify(&mut audit_be, &tombstone_cand)
|
|
||||||
.map(|_| ())
|
|
||||||
.map_err(|e| match e {
|
|
||||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
|
||||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
|
||||||
});
|
|
||||||
au.append_scope(audit_be);
|
au.append_scope(audit_be);
|
||||||
|
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
|
@ -871,14 +848,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
// Backend Modify
|
// Backend Modify
|
||||||
let mut audit_be = AuditScope::new("backend_modify");
|
let mut audit_be = AuditScope::new("backend_modify");
|
||||||
|
|
||||||
let res = self
|
let res = self.be_txn.modify(&mut audit_be, &norm_cand);
|
||||||
.be_txn
|
|
||||||
.modify(&mut audit_be, &norm_cand)
|
|
||||||
.map(|_| ())
|
|
||||||
.map_err(|e| match e {
|
|
||||||
BackendError::EmptyRequest => OperationError::EmptyRequest,
|
|
||||||
BackendError::EntryMissingId => OperationError::InvalidRequestState,
|
|
||||||
});
|
|
||||||
au.append_scope(audit_be);
|
au.append_scope(audit_be);
|
||||||
|
|
||||||
if res.is_err() {
|
if res.is_err() {
|
||||||
|
|
Loading…
Reference in a new issue