Docker improvements (#81)

Update the dockerfile to work correctly with the newer server options and runtime.
This commit is contained in:
Firstyear 2019-09-06 13:05:27 +10:00 committed by GitHub
parent c798322ad8
commit b4fc71b27d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 36 additions and 18 deletions

View file

@ -1,21 +1,22 @@
FROM opensuse/tumbleweed:latest FROM opensuse/tumbleweed:latest
MAINTAINER william@blackhats.net.au MAINTAINER william@blackhats.net.au
EXPOSE 8080
COPY . /home/rsidm/ COPY . /home/rsidm/
WORKDIR /home/rsidm/ WORKDIR /home/rsidm/
RUN zypper install -y timezone cargo rust rust-std gcc && \ RUN zypper install -y timezone cargo rust gcc sqlite3-devel libopenssl-devel && \
RUSTC_BOOTSTRAP=1 cargo build --release && \ RUSTC_BOOTSTRAP=1 cargo build --release && \
zypper rm -u -y cargo rust rust-std gcc && \ zypper rm -u -y cargo rust gcc && \
zypper clean zypper clean
RUN cd /etc && \ RUN cd /etc && \
ln -sf ../usr/share/zoneinfo/Australia/Brisbane localtime ln -sf ../usr/share/zoneinfo/Australia/Brisbane localtime
RUN useradd -m -r rsidm VOLUME /data
USER rsidm
ENV RUST_BACKTRACE 1 ENV RUST_BACKTRACE 1
CMD ["/home/rsidm/target/release/rsidm"] CMD ["/home/rsidm/target/release/rsidmd", "server", "-D", "/data/kanidm.db"]

View file

@ -345,7 +345,7 @@ With regard to forwarding tokens (no consideration is made to security of this
system yet), method two probably is the best, but you need token constraint system yet), method two probably is the best, but you need token constraint
to make sure you can't replay to another host. to make sure you can't replay to another host.
https://techcommunity.microsoft.com/t5/Azure-Active-Directory-Identity/Your-Pa-word-doesn-t-matter/ba-p/731984
Brain Dump Internal Details Brain Dump Internal Details
=========================== ===========================

View file

@ -66,7 +66,7 @@ impl Handler<AuditScope> for EventLog {
type Result = (); type Result = ();
fn handle(&mut self, event: AuditScope, _: &mut SyncContext<Self>) -> Self::Result { fn handle(&mut self, event: AuditScope, _: &mut SyncContext<Self>) -> Self::Result {
info!("audit: {}", event); debug!("audit: {}", event);
} }
} }

View file

@ -360,8 +360,8 @@ impl BackendWriteTransaction {
try_audit!( try_audit!(
au, au,
stmt.execute_named(&[ stmt.execute_named(&[
(":id", &ser_entry.id as &ToSql), (":id", &ser_entry.id as &dyn ToSql),
(":data", &ser_entry.data as &ToSql) (":data", &ser_entry.data as &dyn ToSql)
]), ]),
"rusqlite error {:?}", "rusqlite error {:?}",
OperationError::SQLiteError OperationError::SQLiteError

View file

@ -71,7 +71,7 @@ macro_rules! json_event_post {
// `Future::and_then` can be used to merge an asynchronous workflow with a // `Future::and_then` can be used to merge an asynchronous workflow with a
// synchronous workflow // synchronous workflow
.and_then( .and_then(
move |body| -> Box<Future<Item = HttpResponse, Error = Error>> { move |body| -> Box<dyn Future<Item = HttpResponse, Error = Error>> {
// body is loaded, now we can deserialize serde-json // body is loaded, now we can deserialize serde-json
// let r_obj = serde_json::from_slice::<SearchRequest>(&body); // let r_obj = serde_json::from_slice::<SearchRequest>(&body);
let r_obj = serde_json::from_slice::<$message_type>(&body); let r_obj = serde_json::from_slice::<$message_type>(&body);
@ -180,7 +180,7 @@ fn auth(
} }
}) })
.and_then( .and_then(
move |body| -> Box<Future<Item = HttpResponse, Error = Error>> { move |body| -> Box<dyn Future<Item = HttpResponse, Error = Error>> {
let r_obj = serde_json::from_slice::<AuthRequest>(&body); let r_obj = serde_json::from_slice::<AuthRequest>(&body);
// Send to the db for action // Send to the db for action

View file

@ -415,7 +415,7 @@ impl<STATE> Entry<EntryInvalid, STATE> {
pub fn validate( pub fn validate(
self, self,
schema: &SchemaTransaction, schema: &dyn SchemaTransaction,
) -> Result<Entry<EntryValid, STATE>, SchemaError> { ) -> Result<Entry<EntryValid, STATE>, SchemaError> {
let schema_classes = schema.get_classes(); let schema_classes = schema.get_classes();
let schema_attributes = schema.get_attributes(); let schema_attributes = schema.get_attributes();
@ -998,7 +998,7 @@ impl<STATE> Entry<EntryValid, STATE> {
pub fn gen_modlist_assert( pub fn gen_modlist_assert(
&self, &self,
schema: &SchemaTransaction, schema: &dyn SchemaTransaction,
) -> Result<ModifyList<ModifyInvalid>, SchemaError> { ) -> Result<ModifyList<ModifyInvalid>, SchemaError> {
// Create a modlist from this entry. We make this assuming we want the entry // Create a modlist from this entry. We make this assuming we want the entry
// to have this one as a subset of values. This means if we have single // to have this one as a subset of values. This means if we have single

View file

@ -268,7 +268,10 @@ impl Filter<FilterInvalid> {
} }
} }
pub fn validate(&self, schema: &SchemaTransaction) -> Result<Filter<FilterValid>, SchemaError> { pub fn validate(
&self,
schema: &dyn SchemaTransaction,
) -> Result<Filter<FilterValid>, SchemaError> {
Ok(Filter { Ok(Filter {
state: FilterValid { state: FilterValid {
inner: self.state.inner.validate(schema)?, inner: self.state.inner.validate(schema)?,
@ -354,7 +357,7 @@ impl FilterComp {
} }
} }
pub fn validate(&self, schema: &SchemaTransaction) -> Result<FilterComp, SchemaError> { pub fn validate(&self, schema: &dyn SchemaTransaction) -> Result<FilterComp, SchemaError> {
// Optimisation is done at another stage. // Optimisation is done at another stage.
// This probably needs some rework // This probably needs some rework

View file

@ -117,7 +117,7 @@ impl ModifyList<ModifyInvalid> {
pub fn validate( pub fn validate(
&self, &self,
schema: &SchemaTransaction, schema: &dyn SchemaTransaction,
) -> Result<ModifyList<ModifyValid>, SchemaError> { ) -> Result<ModifyList<ModifyValid>, SchemaError> {
let schema_attributes = schema.get_attributes(); let schema_attributes = schema.get_attributes();
/* /*
@ -138,7 +138,6 @@ impl ModifyList<ModifyInvalid> {
None => Err(SchemaError::InvalidAttribute), None => Err(SchemaError::InvalidAttribute),
} }
} }
// TODO: Should this be a partial value type?
Modify::Removed(attr, value) => { Modify::Removed(attr, value) => {
let attr_norm = schema.normalise_attr_name(attr); let attr_norm = schema.normalise_attr_name(attr);
match schema_attributes.get(&attr_norm) { match schema_attributes.get(&attr_norm) {

View file

@ -64,6 +64,17 @@ enum Opt {
RecoverAccount(RecoverAccountOpt), RecoverAccount(RecoverAccountOpt),
} }
impl Opt {
fn debug(&self) -> bool {
match self {
Opt::Server(sopt) | Opt::Verify(sopt) => sopt.debug,
Opt::Backup(bopt) => bopt.serveropts.debug,
Opt::Restore(ropt) => ropt.serveropts.debug,
Opt::RecoverAccount(ropt) => ropt.serveropts.debug,
}
}
}
fn main() { fn main() {
// Read cli args, determine if we should backup/restore // Read cli args, determine if we should backup/restore
let opt = Opt::from_args(); let opt = Opt::from_args();
@ -74,7 +85,11 @@ fn main() {
// Configure the server logger. This could be adjusted based on what config // Configure the server logger. This could be adjusted based on what config
// says. // says.
// ::std::env::set_var("RUST_LOG", "actix_web=info,rsidm=info"); if opt.debug() {
::std::env::set_var("RUST_LOG", "actix_web=info,rsidm=debug");
} else {
::std::env::set_var("RUST_LOG", "actix_web=info,rsidm=info");
}
env_logger::init(); env_logger::init();
match opt { match opt {