This commit is contained in:
William Brown 2018-09-29 17:54:16 +10:00
commit 31afcabd83
19 changed files with 665 additions and 0 deletions

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
/target
**/*.rs.bk

0
CODE_OF_CONDUCT.md Normal file
View file

23
Cargo.toml Normal file
View file

@ -0,0 +1,23 @@
[package]
name = "rsidm"
version = "0.1.0"
authors = ["William Brown <william@blackhats.net.au>"]
# We need three major binaries. The server itself, the unix client, and a cli
# mgmt tool.
[dependencies]
actix = "0.7"
actix-web = "0.7"
futures = "0.1"
uuid = { version = "0.5", features = ["serde", "v4"] }
serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
diesel = { version = "^1.1.0", features = ["sqlite", "r2d2"] }
r2d2 = "0.8"

0
LICENSE.md Normal file
View file

54
README.md Normal file
View file

@ -0,0 +1,54 @@
# Rs Identity Manager
rsidm is an identity management platform written in rust. Our goals are:
* Modern identity management platform
* Simple to deploy and integrate with
* extensible
* correct
## Code of Conduct
See CODE_OF_CONDUCT.md
## Examples
## MVP features
* Pam/nsswitch clients (with offline auth, and local totp)
* CLI for admin
* OIDC/Oauth
* SSH key distribution
* MFA (TOTP)
* In memory read cache (cow)
* backup/restore
## Planned features
* Replicated database backend (389-ds, couchdb, or custom repl proto)
* SAML
* Read Only Replicas
* Certificate distribution?
* Web UI for admin
* Account impersonation
* Webauthn
* Sudo rule distribution via nsswitch?
## Features we want to avoid
* Audit: This is better solved by ...
* Fully synchronous behaviour: ...
* Generic database: ... (max db size etc)
* Being LDAP: ...
## More?
## Get involved
## Designs
See the designs folder

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TABLE users

View file

@ -0,0 +1,6 @@
-- Your SQL goes here
CREATE TABLE entries (
id INTEGER NOT NULL PRIMARY KEY,
entry VARCHAR NOT NULL
)

5
src/be/filter.rs Normal file
View file

@ -0,0 +1,5 @@
// This represents a filtering query. This can be done
// in parallel map/reduce style, or directly on a single
// entry to assert it matches.

0
src/be/mem_be/mod.rs Normal file
View file

76
src/be/mod.rs Normal file
View file

@ -0,0 +1,76 @@
//! Db executor actor
use actix::prelude::*;
use diesel;
use diesel::prelude::*;
use diesel::r2d2::{self, ConnectionManager, Pool};
// use uuid;
use super::log::EventLog;
mod sqlite_be;
mod mem_be;
mod filter;
// HACK HACK HACK remove duplicate code
// Helper for internal logging.
macro_rules! log_event {
($log_addr:expr, $($arg:tt)*) => ({
use std::fmt;
use log::LogEvent;
$log_addr.do_send(
LogEvent {
msg: fmt::format(
format_args!($($arg)*)
)
}
)
})
}
// This contacts the needed backend and starts it up
pub enum BackendType {
Memory, // isn't memory just sqlite with file :memory: ?
SQLite,
}
pub fn start(log: actix::Addr<EventLog>, _betype: BackendType, path: &str) -> actix::Addr<BackendActor> {
// How can we allow different db names and types?
let manager = ConnectionManager::<SqliteConnection>::new(path);
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool");
SyncArbiter::start(8, move || {
BackendActor::new(log.clone(), pool.clone())
})
}
pub struct BackendActor {
log: actix::Addr<EventLog>,
pool: Pool<ConnectionManager<SqliteConnection>>
}
impl Actor for BackendActor {
type Context = SyncContext<Self>;
}
// In the future this will do the routing betwene the chosen backends etc.
impl BackendActor {
pub fn new(log: actix::Addr<EventLog>, pool: Pool<ConnectionManager<SqliteConnection>>) -> Self {
log_event!(log, "Starting DB worker ...");
BackendActor {
log: log,
pool: pool,
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn test_simple_create() {
println!("It works!");
}
}

10
src/be/sqlite_be/mod.rs Normal file
View file

@ -0,0 +1,10 @@
// We need tests too
// need a way to add an index
// need a way to do filters
// need a way to manage idls
mod models;
mod schema;

View file

@ -0,0 +1,38 @@
// Stores models of various types. Given the design of the DB, this
// is reasonably simple for our backend.
// We have a main id -> entries type, and everything else is a value -> (set id) type or value -> id
// I'll probably make IDL serialisable to cbor or something ...
use diesel;
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, Pool};
use super::schema::entries;
#[derive(Serialize, Queryable)]
pub struct Entry {
pub id: i64,
pub entry: String,
}
#[derive(Insertable)]
#[table_name = "entries"]
pub struct NewEntry<'a> {
pub id: i64,
pub entry: &'a str,
}
#[cfg(test)]
mod tests {
#[test]
fn test_simple_create() {
println!("It works!");
}
}

View file

@ -0,0 +1,8 @@
table! {
entries (id) {
id -> BigInt,
entry -> Text,
}
}

117
src/entry.rs Normal file
View file

@ -0,0 +1,117 @@
use serde_json::{Value, Error};
// make a trait entry for everything to adhere to?
// * How to get indexs out?
// * How to track pending diffs?
#[derive(Serialize, Deserialize, Debug)]
pub struct Entry {
}
// pub trait Entry {
//fn to_json_str(&self) -> String;
// fn to_index_diff -> ???
// from_json_str() -> Self;
//
// Does this match a filter or not?a
// fn apply_filter -> Result<bool, ()>
// }
//enum Credential {
//?
//}
#[derive(Serialize, Deserialize, Debug)]
enum Credential {
Password {
name: String,
hash: String,
},
TOTPPassword {
name: String,
hash: String,
totp_secret: String,
},
SshPublicKey {
name: String,
data: String,
},
}
#[derive(Serialize, Deserialize, Debug)]
struct User {
username: String,
// Could this be derived from self? Do we even need schema?
class: Vec<String>,
displayname: String,
legalname: Option<String>,
email: Vec<String>,
// uuid?
// need to support deref later ...
memberof: Vec<String>,
sshpublickey: Vec<String>,
credentials: Vec<Credential>,
}
impl User {
pub fn new(username: &str, displayname: &str) -> Self {
// Build a blank value
User {
username: String::from(username),
class: Vec::new(),
displayname: String::from(displayname),
legalname: None,
email: Vec::new(),
memberof: Vec::new(),
sshpublickey: Vec::new(),
credentials: Vec::new(),
}
}
// We need a way to "diff" two User objects
// as on a modification we want to track the set of changes
// that is occuring -- needed for indexing to function.
// Basically we just need to check if it changed, remove
// the "former" and add the "newer" value.
// We have to sort vecs ...
// Is there a way to call this on serialise?
fn validate() -> Result<(), ()> {
Err(())
}
}
#[cfg(test)]
mod tests {
use super::User;
use serde_json;
#[test]
fn test_user_basic() {
let u: User = User::new("william", "William Brown");
println!("u: {:?}", u);
let d = serde_json::to_string(&u).unwrap();
println!("d: {}", d.as_str());
let u2: User = serde_json::from_str(d.as_str()).unwrap();
println!("u2: {:?}", u2);
}
}

21
src/event.rs Normal file
View file

@ -0,0 +1,21 @@
use actix::prelude::*;
// This structure tracks and event lifecycle, and is eventually
// sent to the logging system where it's structured and written
// out to the current logging BE.
#[derive(Debug)]
pub struct Event {
time_start: (),
time_end: (),
// vec of start/end points of various parts of the event?
// We probably need some functions for this. Is there a way in rust
// to automatically annotate line numbers of code?
// This could probably store the request parameters too?
// The parallel in 389 would be operation struct
}
impl Message for Event {
type Result = ();
}

56
src/log.rs Normal file
View file

@ -0,0 +1,56 @@
use actix::prelude::*;
use super::event::Event;
// This is the core of the server. It implements all
// the search and modify actions, applies access controls
// and get's everything ready to push back to the fe code
// We need to pass in config for this later
pub fn start() -> actix::Addr<EventLog> {
SyncArbiter::start(1, move || {
EventLog{}
})
}
pub struct EventLog {
}
impl Actor for EventLog {
type Context = SyncContext<Self>;
}
// What messages can we be sent. Basically this is all the possible
// inputs we *could* recieve.
// Add a macro for easy msg write
pub struct LogEvent {
pub msg: String,
}
impl Message for LogEvent {
type Result = ();
}
impl Handler<LogEvent> for EventLog {
type Result = ();
fn handle(&mut self, event: LogEvent, _: &mut SyncContext<Self>) -> Self::Result {
println!("LOGEVENT: {}", event.msg );
}
}
impl Handler<Event> for EventLog {
type Result = ();
fn handle(&mut self, event: Event, _: &mut SyncContext<Self>) -> Self::Result {
println!("EVENT: {:?}", event)
}
}

126
src/main.rs Normal file
View file

@ -0,0 +1,126 @@
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate diesel;
extern crate actix;
extern crate actix_web;
extern crate r2d2;
extern crate uuid;
extern crate futures;
use actix::prelude::*;
use actix_web::{
http, middleware, App, AsyncResponder, FutureResponse, HttpResponse, Path, HttpRequest,
State,
};
use diesel::prelude::*;
use diesel::r2d2::ConnectionManager;
use futures::Future;
mod be;
mod entry;
mod server;
mod log;
mod event;
// Helper for internal logging.
macro_rules! log_event {
($log_addr:expr, $($arg:tt)*) => ({
use log::LogEvent;
$log_addr.do_send(
LogEvent {
msg: std::fmt::format(
format_args!($($arg)*)
)
}
)
})
}
struct AppState {
qe: actix::Addr<server::QueryServer>,
}
// Handle the various end points we need to expose
/// simple handle
fn index(req: &HttpRequest<AppState>) -> HttpResponse {
println!("{:?}", req);
HttpResponse::Ok().body("Hello\n")
}
fn class_list(
(name, state): (Path<String>, State<AppState>),
) -> FutureResponse<HttpResponse>
{
// println!("request to class_list");
state
.qe
.send(
server::ListClass {
class_name: name.into_inner(),
}
)
// What does this do?
.from_err()
.and_then(|res| match res {
// What type is entry?
Ok(entry) => Ok(HttpResponse::Ok().json(entry)),
// Can we properly report this?
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
// What does this do?
.responder()
}
fn main() {
let sys = actix::System::new("rsidm-server");
// read the config (if any?)
// Until this point, we probably want to write to stderr
// Start up the logging system: for now it just maps to stderr
let log_addr = log::start();
// Starting the BE chooses the path.
let be_addr = be::start(log_addr.clone(), be::BackendType::SQLite, "test.db");
// Start the query server with the given be
let server_addr = server::start(log_addr.clone(), be_addr);
// start the web server
actix_web::server::new(move || {
App::with_state(AppState {
qe: server_addr.clone(),
})
// Connect all our end points here.
// .middleware(middleware::Logger::default())
.resource("/", |r| r.f(index))
.resource("/{class_list}", |r| r.method(http::Method::GET).with(class_list))
.resource("/{class_list}/", |r| r.method(http::Method::GET).with(class_list))
})
.bind("127.0.0.1:8080")
.unwrap()
.start();
log_event!(log_addr, "Starting rsidm on 127.0.0.1:8080");
// all the needed routes / views
let _ = sys.run();
}
#[cfg(test)]
mod tests {
#[test]
fn test_simple_create() {
println!("It works!");
}
}

90
src/server.rs Normal file
View file

@ -0,0 +1,90 @@
use actix::prelude::*;
use be::BackendActor;
use log::EventLog;
use entry::Entry;
// HACK HACK HACK remove duplicate code
// Helper for internal logging.
macro_rules! log_event {
($log_addr:expr, $($arg:tt)*) => ({
use std::fmt;
use log::LogEvent;
$log_addr.do_send(
LogEvent {
msg: fmt::format(
format_args!($($arg)*)
)
}
)
})
}
pub fn start(
log: actix::Addr<EventLog>,
be: actix::Addr<BackendActor>
) -> actix::Addr<QueryServer>
{
SyncArbiter::start(8, move || {
QueryServer::new(log.clone(), be.clone())
})
}
// This is the core of the server. It implements all
// the search and modify actions, applies access controls
// and get's everything ready to push back to the fe code
pub struct QueryServer {
log: actix::Addr<EventLog>,
be: actix::Addr<BackendActor>,
}
impl QueryServer {
pub fn new (log: actix::Addr<EventLog>, be: actix::Addr<BackendActor>) -> Self {
log_event!(log, "Starting query worker ...");
QueryServer {
log: log,
be: be,
}
}
// Actually conduct a search request
// This is the core of the server, as it processes the entire event
// applies all parts required in order and more.
pub fn search() -> Result<Vec<Entry>, ()> {
Err(())
}
}
impl Actor for QueryServer {
type Context = SyncContext<Self>;
}
// What messages can we be sent. Basically this is all the possible
// inputs we *could* recieve.
// List All objects of type
pub struct ListClass {
pub class_name: String,
}
impl Message for ListClass {
type Result = Result<Vec<Entry>, ()>;
}
impl Handler<ListClass> for QueryServer {
type Result = Result<Vec<Entry>, ()>;
fn handle(&mut self, msg: ListClass, _: &mut Self::Context) -> Self::Result {
log_event!(self.log, "Class list for: {}", msg.class_name.as_str());
Err(())
}
}
// Get objects by filter
// Auth requests? How do we structure these ...

29
tests/integration_test.rs Normal file
View file

@ -0,0 +1,29 @@
use std::panic;
// Test external behaviorus of the service.
fn run_test<T>(test: T) -> ()
where T: FnOnce() -> () + panic::UnwindSafe
{
// setup
// Create the db: randomise the name of the file. Memory?
// call out to migrations
// Do we need any fixtures?
let result = panic::catch_unwind(||
test()
);
// teardown
// remove the db file
assert!(result.is_ok());
}
#[test]
fn test_schema() {
run_test(|| {
println!("It works");
});
}