Open tickets for most todos, fix more.

This commit is contained in:
William Brown 2019-07-27 15:54:31 +09:00
parent ed99da58d0
commit 0fbd181f9f
19 changed files with 102 additions and 102 deletions

View file

@ -32,6 +32,7 @@ log = "0.4"
env_logger = "0.6"
reqwest = "0.9"
# reqwest = { path = "../reqwest" }
rand = "0.6"
chrono = "0.4"
cookie = "0.11"
@ -52,6 +53,7 @@ r2d2 = "0.8"
r2d2_sqlite = "0.7"
structopt = { version = "0.2", default-features = false }
time = "0.1"
concread = "0.1"

View file

@ -35,7 +35,6 @@ use crate::event::{CreateEvent, DeleteEvent, EventOrigin, ModifyEvent, SearchEve
#[derive(Debug, Clone)]
pub struct AccessControlSearch {
acp: AccessControlProfile,
// TODO: Make this a BTreeSet?
attrs: Vec<String>,
}
@ -132,9 +131,7 @@ impl AccessControlDelete {
#[derive(Debug, Clone)]
pub struct AccessControlCreate {
acp: AccessControlProfile,
// TODO: Make this a BTreeSet?
classes: Vec<String>,
// TODO: Make this a BTreeSet?
attrs: Vec<String>,
}
@ -193,11 +190,8 @@ impl AccessControlCreate {
#[derive(Debug, Clone)]
pub struct AccessControlModify {
acp: AccessControlProfile,
// TODO: Make this a BTreeSet?
classes: Vec<String>,
// TODO: Make this a BTreeSet?
presattrs: Vec<String>,
// TODO: Make this a BTreeSet?
remattrs: Vec<String>,
}
@ -514,7 +508,6 @@ pub trait AccessControlsTransaction {
&self,
audit: &mut AuditScope,
se: &SearchEvent,
// TODO: This could actually take a &mut Vec and modify in place!
entries: Vec<Entry<EntryValid, EntryCommitted>>,
) -> Result<Vec<Entry<EntryReduced, EntryCommitted>>, OperationError> {
/*
@ -568,7 +561,8 @@ pub trait AccessControlsTransaction {
audit_log!(audit, "Related acs -> {:?}", related_acp);
// Get the set of attributes requested by the caller - TODO: This currently
// Get the set of attributes requested by the caller
// TODO #69: This currently
// is ALL ATTRIBUTES, so we actually work here to just remove things we
// CAN'T see instead.
@ -754,7 +748,7 @@ pub trait AccessControlsTransaction {
let scoped_acp: Vec<&AccessControlModify> = related_acp
.iter()
.filter_map(|acm: &&AccessControlModify| {
// TODO: We are continually compiling and using these
// We are continually compiling and using these
// in a tight loop, so this is a possible oppurtunity
// to cache or handle these filters better - filter compiler
// cache maybe?

View file

@ -23,7 +23,7 @@ mod sqlite_be;
#[derive(Debug)]
struct IdEntry {
// TODO: for now this is i64 to make sqlite work, but entry is u64 for indexing reasons!
// TODO #20: for now this is i64 to make sqlite work, but entry is u64 for indexing reasons!
id: i64,
data: Vec<u8>,
}
@ -53,8 +53,8 @@ pub trait BackendTransaction {
) -> Result<Vec<Entry<EntryValid, EntryCommitted>>, OperationError> {
// Do things
// Alloc a vec for the entries.
// TODO: Make this actually a good size for the result set ...
// TODO: Actually compute indexes here.
// TODO #8: Make this actually a good size for the result set ...
// TODO #8: Actually compute indexes here.
// So to make this use indexes, we can use the filter type and
// destructure it to work out what we need to actually search (if
// possible) to create the candidate set.
@ -357,6 +357,8 @@ impl BackendWriteTransaction {
// write them all
for ser_entry in ser_entries {
// TODO: Prepared statement.
// Actually, I'm not sure we can - prepared stmt are per-conn, and we don't
// hold conns for that long? Maybe we should just rely on the stmt cache in sqlite?
try_audit!(
au,
stmt.execute_named(&[
@ -396,7 +398,7 @@ impl BackendWriteTransaction {
self.internal_create(au, &dbentries)
// TODO: update indexes (as needed)
// TODO #8: update indexes (as needed)
})
}
@ -433,7 +435,7 @@ impl BackendWriteTransaction {
let data = serde_cbor::to_vec(&db_e).map_err(|_| OperationError::SerdeCborError)?;
Ok(IdEntry {
// TODO: Instead of getting these from the server entry struct , we could lookup
// TODO #8: Instead of getting these from the server entry struct , we could lookup
// uuid -> id in the index.
//
// relies on the uuid -> id index being correct (and implemented)
@ -582,7 +584,7 @@ impl BackendWriteTransaction {
} else {
Err(OperationError::ConsistencyError(vr))
}
// TODO: run re-index after db is restored
// TODO #8: run re-index after db is restored
}
pub fn commit(mut self) -> Result<(), OperationError> {

View file

@ -1,3 +1,4 @@
use rand::prelude::*;
use std::path::PathBuf;
#[derive(Serialize, Deserialize, Debug)]
@ -5,15 +6,16 @@ pub struct Configuration {
pub address: String,
pub domain: String,
pub threads: usize,
// db type later
pub db_path: String,
pub maximum_request: usize,
// db type later
pub secure_cookies: bool,
pub cookie_key: [u8; 32],
}
impl Configuration {
pub fn new() -> Self {
Configuration {
let mut c = Configuration {
address: String::from("127.0.0.1:8080"),
domain: String::from("127.0.0.1"),
threads: 8,
@ -21,9 +23,13 @@ impl Configuration {
maximum_request: 262144, // 256k
// log type
// log path
// TODO: default true in prd
// TODO #63: default true in prd
secure_cookies: false,
}
cookie_key: [0; 32],
};
let mut rng = StdRng::from_entropy();
rng.fill(&mut c.cookie_key);
c
}
pub fn update_db_path(&mut self, p: &PathBuf) {

View file

@ -7,6 +7,7 @@ use actix_web::{
use bytes::BytesMut;
use futures::{future, Future, Stream};
use time::Duration;
use crate::config::Configuration;
@ -349,6 +350,7 @@ pub fn create_server_core(config: Configuration) {
let max_size = config.maximum_request;
let secure_cookies = config.secure_cookies;
let domain = config.domain.clone();
let cookie_key: [u8; 32] = config.cookie_key.clone();
// start the web server
actix_web::server::new(move || {
@ -359,12 +361,18 @@ pub fn create_server_core(config: Configuration) {
// Connect all our end points here.
.middleware(middleware::Logger::default())
.middleware(session::SessionStorage::new(
// TODO: Signed prevents tampering. this 32 byte key MUST
// be generated (probably stored in DB for cross-host access)
session::CookieSessionBackend::signed(&[0; 32])
// Signed prevents tampering. this 32 byte key MUST
// be generated (probably a cli option, and it's up to the
// server process to coordinate these on hosts). IE an RODC
// could have a different key than our write servers to prevent
// disclosure of a writeable token in case of compromise. It does
// mean that you can't load balance between the rodc and the write
// though, but that's tottaly reasonable.
session::CookieSessionBackend::signed(&cookie_key)
// Limit to path?
// .path("/")
//.max_age() duration of the token life TODO make this proper!
// TODO #63: make this configurable!
.max_age(Duration::hours(1))
// .domain(domain.as_str())
// .same_site(cookie::SameSite::Strict) // constrain to the domain
// Disallow from js and ...?

View file

@ -947,11 +947,9 @@ impl<VALID, STATE> Entry<VALID, STATE> {
pub fn attribute_substring(&self, attr: &str, subvalue: &str) -> bool {
match self.attrs.get(attr) {
Some(v_list) => {
v_list
Some(v_list) => v_list
.iter()
.fold(false, |acc, v| if acc { acc } else { v.contains(subvalue) })
}
.fold(false, |acc, v| if acc { acc } else { v.contains(subvalue) }),
None => false,
}
}

View file

@ -29,12 +29,6 @@ use crate::proto::v1::SearchRecycledRequest;
use actix::prelude::*;
use uuid::Uuid;
// Should the event Result have the log items?
// TODO: Remove seralising here - each type should
// have it's own result type!
// TODO: Every event should have a uuid for logging analysis
#[derive(Debug)]
pub struct OpResult {}
@ -119,7 +113,7 @@ impl Event {
let uat = uat.ok_or(OperationError::NotAuthenticated)?;
let e = try_audit!(audit, qs.internal_search_uuid(audit, uat.uuid.as_str()));
// TODO: Now apply claims from the uat into the Entry
// TODO #64: Now apply claims from the uat into the Entry
// to allow filtering.
Ok(Event {
@ -164,7 +158,7 @@ impl Event {
}
pub fn from_impersonate(event: &Self) -> Self {
// TODO: In the future, we could change some of this data
// TODO #64 ?: In the future, we could change some of this data
// to reflect the fact we are infact impersonating the action
// rather than the user explicitly requesting it. Could matter
// to audits and logs to determine what happened.
@ -342,7 +336,6 @@ pub struct CreateEvent {
// This may affect which plugins are run ...
}
// TODO: Should this actually be in createEvent handler?
impl CreateEvent {
pub fn from_request(
audit: &mut AuditScope,
@ -634,7 +627,6 @@ pub struct AuthEvent {
impl AuthEvent {
pub fn from_message(msg: AuthMessage) -> Result<Self, OperationError> {
Ok(AuthEvent {
// TODO: Change to AuthMessage, and fill in uat?
event: None,
step: AuthEventStep::from_authstep(msg.req.step, msg.sessionid)?,
})
@ -661,7 +653,6 @@ impl AuthEvent {
#[derive(Debug)]
pub struct AuthResult {
pub sessionid: Uuid,
// TODO: Make this an event specific authstate type?
pub state: AuthState,
}
@ -692,8 +683,6 @@ impl WhoamiResult {
}
}
// TODO: Are these part of the proto?
#[derive(Debug)]
pub struct PurgeTombstoneEvent {
pub event: Event,

View file

@ -266,7 +266,7 @@ impl Filter<FilterInvalid> {
})
}
// TODO: This has to have two versions to account for ro/rw traits, because RS can't
// This has to have two versions to account for ro/rw traits, because RS can't
// monomorphise on the trait to call clone_value. An option is to make a fn that
// takes "clone_value(t, a, v) instead, but that may have a similar issue.
pub fn from_ro(
@ -345,15 +345,11 @@ impl FilterComp {
}
pub fn validate(&self, schema: &SchemaTransaction) -> Result<FilterComp, SchemaError> {
// TODO:
// First, normalise (if possible)
// Then, validate
// Optimisation is done at another stage.
// This probably needs some rework
// TODO: Getting this each recursion could be slow. Maybe
// Getting this each recursion could be slow. Maybe
// we need an inner functon that passes the reference?
let schema_attributes = schema.get_attributes();
let schema_name = schema_attributes

View file

@ -12,7 +12,6 @@ use crate::proto::v1::{AuthAllowed, AuthCredential, AuthState};
enum CredState {
Success(Vec<Claim>),
Continue(Vec<AuthAllowed>),
// TODO: Should we have a reason in Denied so that we
Denied(&'static str),
}
@ -36,27 +35,32 @@ impl CredHandler {
match self {
CredHandler::Anonymous => {
creds.iter().fold(
CredState::Denied("non-anonymous credential provided"),
CredState::Continue(vec![AuthAllowed::Anonymous]),
|acc, cred| {
// TODO: if denied, continue returning denied.
// TODO: if continue, contunue returning continue.
// How to do this correctly?
// There is no "continuation" from this type.
// There is no "continuation" from this type - we only set it at
// the start assuming there is no values in the iter so we can tell
// the session to continue up to some timelimit.
match acc {
// If denied, continue returning denied.
CredState::Denied(_) => acc,
// We have a continue or success, it's important we keep checking here
// after the success, because if they sent "multiple" anonymous or
// they sent anon + password, we need to handle both cases. Double anon
// is okay, but anything else is instant failure, even if we already
// had a success.
_ => {
match cred {
AuthCredential::Anonymous => {
// For anonymous, no claims will ever be issued.
CredState::Success(Vec::new())
}
_ => {
// Should we have a reason in Denied so that we can say why denied?
acc
// CredState::Denied
_ => CredState::Denied("non-anonymous credential provided"),
}
}
} // end match acc
},
)
}
} // end credhandler::anonymous
}
}
@ -108,7 +112,7 @@ impl AuthSession {
// Is the whole account locked?
// What about in memory account locking? Is that something
// we store in the account somehow?
// TODO: Implement handler locking!
// TODO #59: Implement handler locking!
AuthSession {
account: account,
@ -149,7 +153,7 @@ impl AuthSession {
// Alternately, open a write, and commit the needed security metadata here
// now rather than async (probably better for lock-outs etc)
//
// TODO: Async message the account owner about the login?
// TODO #59: Async message the account owner about the login?
// If this fails, how can we in memory lock the account?
//
// The lockouts could also be an in-memory concept too?
@ -159,7 +163,6 @@ impl AuthSession {
}
pub fn valid_auth_mechs(&self) -> Vec<AuthAllowed> {
// TODO: This needs logging ....
if self.finished {
Vec::new()
} else {

View file

@ -18,7 +18,7 @@ pub struct IdmServer {
// variaous accounts, and we have a good idea of how to structure the
// in memory caches related to locking.
//
// TODO: This needs a mark-and-sweep gc to be added.
// TODO #60: This needs a mark-and-sweep gc to be added.
sessions: CowCell<BTreeMap<Uuid, AuthSession>>,
// Need a reference to the query server.
qs: QueryServer,
@ -39,7 +39,7 @@ pub struct IdmServerReadTransaction<'a> {
}
impl IdmServer {
// TODO: Make number of authsessions configurable!!!
// TODO #59: Make number of authsessions configurable!!!
pub fn new(qs: QueryServer) -> IdmServer {
IdmServer {
sessions: CowCell::new(BTreeMap::new()),
@ -135,7 +135,6 @@ impl<'a> IdmServerWriteTransaction<'a> {
Ok(AuthResult {
sessionid: sessionid,
// TODO: Change this to a better internal type?
state: AuthState::Continue(next_mech),
})
}

View file

@ -32,7 +32,7 @@ impl Actor for IntervalActor {
type Context = actix::Context<Self>;
fn started(&mut self, ctx: &mut Self::Context) {
// TODO: This timeout could be configurable from config?
// TODO #65: This timeout could be configurable from config?
ctx.run_interval(Duration::from_secs(PURGE_TIMEOUT), move |act, _ctx| {
act.purge_recycled();
});

View file

@ -10,7 +10,9 @@ extern crate actix_web;
extern crate futures;
extern crate r2d2;
extern crate r2d2_sqlite;
extern crate rand;
extern crate rusqlite;
extern crate time;
extern crate uuid;
extern crate bytes;
@ -39,11 +41,9 @@ mod async_log;
#[macro_use]
mod audit;
mod be;
// TODO: Should this be public?
pub mod constants;
mod entry;
mod event;
// TODO: Does this need pub?
mod filter;
mod interval;
mod modify;

View file

@ -60,9 +60,6 @@ pub struct ModifyList<VALID> {
mods: Vec<Modify>,
}
// TODO: ModifyList should be like filter and have valid/invalid to schema.
// Or do we not care because the entry will be invalid at the end?
impl<'a> IntoIterator for &'a ModifyList<ModifyValid> {
type Item = &'a Modify;
type IntoIter = slice::Iter<'a, Modify>;

View file

@ -106,7 +106,7 @@ fn apply_memberof(
let mut mo_set: Vec<_> = groups
.iter()
.map(|g| {
// TODO: This could be more effecient
// TODO #61: This could be more effecient
let mut v = vec![g.get_uuid().clone()];
match g.get_ava("memberof") {
Some(mos) => {
@ -129,8 +129,8 @@ fn apply_memberof(
// first add a purged memberof to remove all mo we no longer
// support.
// TODO: Could this be more efficient
// TODO: Could this affect replication? Or should the CL work out the
// TODO #61: Could this be more efficient
// TODO #68: Could this affect replication? Or should the CL work out the
// true diff of the operation?
let mo_purge = vec![
Modify::Present("class".to_string(), "memberof".to_string()),
@ -170,7 +170,7 @@ impl Plugin for MemberOf {
"memberof"
}
// TODO: We could make this more effecient by limiting change detection to ONLY member/memberof
// TODO #61: We could make this more effecient by limiting change detection to ONLY member/memberof
// attrs rather than any attrs.
fn post_create(

View file

@ -12,6 +12,19 @@ use std::collections::HashSet;
pub struct Protected {}
// Here is the declaration of all the attrs that can be altered by
// a call on a system object. We trust they are allowed because
// schema will have checked this, and we don't allow class changes!
lazy_static! {
static ref ALLOWED_ATTRS: HashSet<&'static str> = {
let mut m = HashSet::new();
m.insert("must");
m.insert("may");
m
};
}
impl Plugin for Protected {
fn id() -> &'static str {
"plugin_protected"
@ -102,15 +115,9 @@ impl Plugin for Protected {
Modify::Removed(a, _) => a,
Modify::Purged(a) => a,
};
// TODO: This should be a set or some kind of structure?
// I had issues statically allocating this though ... alternately
// we could make it a "configurable" type?
if a == "must" {
Ok(())
} else if a == "may" {
Ok(())
} else {
Err(OperationError::SystemProtectedObject)
match ALLOWED_ATTRS.get(a.as_str()) {
Some(_) => Ok(()),
None => Err(OperationError::SystemProtectedObject),
}
}
})

View file

@ -46,7 +46,7 @@ impl QueryServerV1 {
}
}
// TODO: We could move most of the be/schema/qs setup and startup
// TODO #54: We could move most of the be/schema/qs setup and startup
// outside of this call, then pass in "what we need" in a cloneable
// form, this way we could have seperate Idm vs Qs threads, and dedicated
// threads for write vs read
@ -71,7 +71,7 @@ impl QueryServerV1 {
let query_server = QueryServer::new(be, schema);
let mut audit_qsc = AuditScope::new("query_server_init");
// TODO: Should the IDM parts be broken out to the IdmSerner?
// TODO #62: Should the IDM parts be broken out to the IdmServer?
// What's important about this initial setup here is that it also triggers
// the schema and acp reload, so they are now configured correctly!
// Initialise the schema core.
@ -264,13 +264,14 @@ impl Handler<WhoamiMessage> for QueryServerV1 {
fn handle(&mut self, msg: WhoamiMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("whoami");
let res = audit_segment!(&mut audit, || {
// TODO: Move this to IdmServer!!!
// TODO #62: Move this to IdmServer!!!
// Begin a read
let qs_read = self.qs.read();
// Make an event from the whoami request. This will process the event and
// generate a selfuuid search.
// TODO: This current handles the unauthenticated check, and will
//
// This current handles the unauthenticated check, and will
// trigger the failure, but if we can manage to work out async
// then move this to core.rs, and don't allow Option<UAT> to get
// this far.

View file

@ -68,12 +68,10 @@ pub struct UserAuthToken {
/* ===== low level proto types ===== */
// TODO: We probably need a proto entry to transform our
// server core entry into. We also need to get from proto
// entry to our actual entry.
// ProtoEntry vs Entry
// There is a good future reason for this seperation. It allows changing
// the in memory server core entry type, without affecting the protoEntry type
//
// There is agood future reason for this seperation. It allows changing
// the in memory server core entry type, without affecting the proto
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Entry {

View file

@ -1183,7 +1183,7 @@ pub struct SchemaWriteTransaction<'a> {
}
impl<'a> SchemaWriteTransaction<'a> {
// TODO: Schema probably needs to be part of the backend, so that commits are wholly atomic
// Schema probably needs to be part of the backend, so that commits are wholly atomic
// but in the current design, we need to open be first, then schema, but we have to commit be
// first, then schema to ensure that the be content matches our schema. Saying this, if your
// schema commit fails we need to roll back still .... How great are transactions.

View file

@ -329,7 +329,7 @@ pub trait QueryServerTransaction {
//
// For passwords, hashing and changes will take place later.
//
// TODO: It could be argued that we should have a proper "Value" type, so that we can
// TODO #66: It could be argued that we should have a proper "Value" type, so that we can
// take care of this a bit cleaner, and do the checks in that, but I think for
// now this is good enough.
fn clone_value(
@ -581,7 +581,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// Log the request
// TODO: Do we need limits on number of creates, or do we constraint
// TODO #67: Do we need limits on number of creates, or do we constraint
// based on request size in the frontend?
// Copy the entries to a writeable form.
@ -853,7 +853,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
Err(e) => return Err(e),
};
// TODO: Has an appropriate amount of time/condition past (ie replication events?)
// TODO #68: Has an appropriate amount of time/condition past (ie replication events?)
// Delete them
let mut audit_be = AuditScope::new("backend_delete");