20230125 pre rel cleanup (#1347)

This commit is contained in:
Firstyear 2023-01-25 16:09:54 +10:00 committed by GitHub
parent 08ebcc7901
commit 3894dd43df
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
19 changed files with 362 additions and 412 deletions

501
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -49,7 +49,7 @@ clap_complete = "^3.2.5"
chrono = "^0.4.23" chrono = "^0.4.23"
compact_jwt = "^0.2.3" compact_jwt = "^0.2.3"
# compact_jwt = { path = "../compact_jwt" } # compact_jwt = { path = "../compact_jwt" }
concread = "^0.4.0" concread = "^0.4.1"
# concread = { path = "../concread" } # concread = { path = "../concread" }
cron = "0.12.0" cron = "0.12.0"
crossbeam = "0.8.1" crossbeam = "0.8.1"
@ -81,16 +81,13 @@ kanidm_unix_int = { path = "./kanidm_unix_int" }
last-git-commit = "0.2.0" last-git-commit = "0.2.0"
# REMOVE this # REMOVE this
lazy_static = "^1.4.0" lazy_static = "^1.4.0"
# ldap3_client = "^0.3.0" ldap3_client = "^0.3.1"
# ldap3_proto = "^0.3.0" ldap3_proto = "^0.3.1"
# ldap3_client = { path = "../ldap3/client", version = "0.3.0" } # ldap3_client = { path = "../ldap3/client", version = "0.3.0" }
# ldap3_proto = { path = "../ldap3/proto", version = "0.3.0" } # ldap3_proto = { path = "../ldap3/proto", version = "0.3.0" }
# scim_proto = { path = "../scim/proto", version = "0.1.0" } # ldap3_client = { git = "https://github.com/kanidm/ldap3.git", version = "0.3.0" }
# ldap3_proto = { git = "https://github.com/kanidm/ldap3.git", version = "0.3.0" }
ldap3_client = { git = "https://github.com/kanidm/ldap3.git", version = "0.3.0" }
ldap3_proto = { git = "https://github.com/kanidm/ldap3.git", version = "0.3.0" }
scim_proto = { git = "https://github.com/kanidm/scim.git", version = "0.1.0" }
libc = "^0.2.139" libc = "^0.2.139"
libnss = "^0.4.0" libnss = "^0.4.0"
@ -114,6 +111,11 @@ regex = "1.7.1"
reqwest = { version = "0.11.14", default-features = false, features=["cookies", "json", "gzip", "native-tls"] } reqwest = { version = "0.11.14", default-features = false, features=["cookies", "json", "gzip", "native-tls"] }
rpassword = "^7.2.0" rpassword = "^7.2.0"
rusqlite = "^0.28.0" rusqlite = "^0.28.0"
scim_proto = "^0.1.1"
# scim_proto = { path = "../scim/proto", version = "0.1.1" }
# scim_proto = { git = "https://github.com/kanidm/scim.git", version = "0.1.1" }
serde = "^1.0.152" serde = "^1.0.152"
serde_cbor = { version = "0.12.0-dev", package = "serde_cbor_2" } serde_cbor = { version = "0.12.0-dev", package = "serde_cbor_2" }
serde_json = "^1.0.91" serde_json = "^1.0.91"

View file

@ -222,10 +222,8 @@ async fn driver_main(opt: Opt) {
if let Some(sh) = status_handle { if let Some(sh) = status_handle {
let _ = sh.await; let _ = sh.await;
} }
} else { } else if let Err(e) = run_sync(cb, &sync_config, &opt).await {
if let Err(e) = run_sync(cb, &sync_config, &opt).await { error!(?e, "Sync completed with error");
error!(?e, "Sync completed with error");
};
} }
} }
@ -463,17 +461,15 @@ async fn run_sync(
info!("dry-run complete"); info!("dry-run complete");
info!("Success!"); info!("Success!");
Ok(()) Ok(())
} else if let Err(e) = rsclient.scim_v1_sync_update(&scim_sync_request).await {
error!(
?e,
"Failed to submit scim sync update - see the kanidmd server log for more details."
);
Err(SyncError::SyncUpdate)
} else { } else {
if let Err(e) = rsclient.scim_v1_sync_update(&scim_sync_request).await { info!("Success!");
error!( Ok(())
?e,
"Failed to submit scim sync update - see the kanidmd server log for more details."
);
Err(SyncError::SyncUpdate)
} else {
info!("Success!");
Ok(())
}
} }
// done! // done!
} }
@ -640,7 +636,7 @@ async fn process_ipa_sync_result(
let empty_slice = Vec::default(); let empty_slice = Vec::default();
// Future - make this par-map // Future - make this par-map
let entries = entries entries
.into_iter() .into_iter()
.filter_map(|(dn, e)| { .filter_map(|(dn, e)| {
let e_config = entry_config_map let e_config = entry_config_map
@ -656,9 +652,7 @@ async fn process_ipa_sync_result(
Err(()) => Some(Err(())), Err(()) => Some(Err(())),
} }
}) })
.collect::<Result<Vec<_>, _>>(); .collect::<Result<Vec<_>, _>>()
entries
} }
// TODO: Allow re-map of uuid -> uuid // TODO: Allow re-map of uuid -> uuid
@ -880,9 +874,9 @@ fn config_security_checks(cfg_path: &Path) -> bool {
"Config missing from {} - cannot start up. Quitting.", "Config missing from {} - cannot start up. Quitting.",
cfg_path_str cfg_path_str
); );
return false; false
} else { } else {
let cfg_meta = match metadata(&cfg_path) { let cfg_meta = match metadata(cfg_path) {
Ok(v) => v, Ok(v) => v,
Err(e) => { Err(e) => {
error!( error!(
@ -945,11 +939,9 @@ fn main() {
if opt.skip_root_check { if opt.skip_root_check {
warn!("Skipping root user check, if you're running this for testing, ensure you clean up temporary files.") warn!("Skipping root user check, if you're running this for testing, ensure you clean up temporary files.")
// TODO: this wording is not great m'kay. // TODO: this wording is not great m'kay.
} else { } else if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 {
if cuid == 0 || ceuid == 0 || cgid == 0 || cegid == 0 { error!("Refusing to run - this process must not operate as root.");
error!("Refusing to run - this process must not operate as root."); return;
return;
}
}; };
if !config_security_checks(&opt.client_config) || !config_security_checks(&opt.ipa_sync_config) if !config_security_checks(&opt.client_config) || !config_security_checks(&opt.ipa_sync_config)

View file

@ -82,7 +82,7 @@ impl CacheLayer {
dbtxn.commit()?; dbtxn.commit()?;
} }
if pam_allow_groups.len() == 0 { if pam_allow_groups.is_empty() {
eprintln!("Will not be able to authenticate users, pam_allow_groups config is not configured."); eprintln!("Will not be able to authenticate users, pam_allow_groups config is not configured.");
} }
@ -170,7 +170,7 @@ impl CacheLayer {
// * uuid // * uuid
// Attempt to search these in the db. // Attempt to search these in the db.
let dbtxn = self.db.write().await; let dbtxn = self.db.write().await;
let r = dbtxn.get_account(&account_id)?; let r = dbtxn.get_account(account_id)?;
match r { match r {
Some((ut, ex)) => { Some((ut, ex)) => {
@ -222,7 +222,7 @@ impl CacheLayer {
// * uuid // * uuid
// Attempt to search these in the db. // Attempt to search these in the db.
let dbtxn = self.db.write().await; let dbtxn = self.db.write().await;
let r = dbtxn.get_group(&grp_id)?; let r = dbtxn.get_group(grp_id)?;
match r { match r {
Some((ut, ex)) => { Some((ut, ex)) => {
@ -864,7 +864,7 @@ impl CacheLayer {
pub async fn pam_account_allowed(&self, account_id: &str) -> Result<Option<bool>, ()> { pub async fn pam_account_allowed(&self, account_id: &str) -> Result<Option<bool>, ()> {
let token = self.get_usertoken(Id::Name(account_id.to_string())).await?; let token = self.get_usertoken(Id::Name(account_id.to_string())).await?;
if self.pam_allow_groups.len() == 0 { if self.pam_allow_groups.is_empty() {
// can't allow anything if the group list is zero... // can't allow anything if the group list is zero...
eprintln!("Cannot authenticate users, no allowed groups in configuration!"); eprintln!("Cannot authenticate users, no allowed groups in configuration!");
Ok(Some(false)) Ok(Some(false))
@ -873,8 +873,7 @@ impl CacheLayer {
let user_set: BTreeSet<_> = tok let user_set: BTreeSet<_> = tok
.groups .groups
.iter() .iter()
.map(|g| vec![g.name.clone(), g.spn.clone(), g.uuid.clone()]) .flat_map(|g| [g.name.clone(), g.spn.clone(), g.uuid.clone()])
.flatten()
.collect(); .collect();
debug!( debug!(

View file

@ -17,7 +17,7 @@ impl Decoder for ClientCodec {
type Item = ClientResponse; type Item = ClientResponse;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
match serde_json::from_slice::<ClientResponse>(&src) { match serde_json::from_slice::<ClientResponse>(src) {
Ok(msg) => { Ok(msg) => {
// Clear the buffer for the next message. // Clear the buffer for the next message.
src.clear(); src.clear();

View file

@ -86,7 +86,7 @@ impl<'a> DbTxn<'a> {
} }
/// This handles an error coming back from an sqlite event and dumps more information from it /// This handles an error coming back from an sqlite event and dumps more information from it
fn sqlite_error(&self, msg: &str, error: rusqlite::Error) { fn sqlite_error(&self, msg: &str, error: &rusqlite::Error) {
error!( error!(
"sqlite {} error: {:?} db_path={:?}", "sqlite {} error: {:?} db_path={:?}",
msg, msg,
@ -96,7 +96,7 @@ impl<'a> DbTxn<'a> {
} }
/// This handles an error coming back from an sqlite transaction and dumps a load of information from it /// This handles an error coming back from an sqlite transaction and dumps a load of information from it
fn sqlite_transaction_error(&self, error: rusqlite::Error, _stmt: &rusqlite::Statement) { fn sqlite_transaction_error(&self, error: &rusqlite::Error, _stmt: &rusqlite::Statement) {
error!( error!(
"sqlite transaction error={:?} db_path={:?}", "sqlite transaction error={:?} db_path={:?}",
error, error,
@ -111,7 +111,7 @@ impl<'a> DbTxn<'a> {
.prepare("PRAGMA journal_mode=WAL;") .prepare("PRAGMA journal_mode=WAL;")
.and_then(|mut wal_stmt| wal_stmt.query([]).map(|_| ())) .and_then(|mut wal_stmt| wal_stmt.query([]).map(|_| ()))
.map_err(|e| { .map_err(|e| {
self.sqlite_error("account_t create", e); self.sqlite_error("account_t create", &e);
})?; })?;
// Setup two tables - one for accounts, one for groups. // Setup two tables - one for accounts, one for groups.
@ -132,7 +132,7 @@ impl<'a> DbTxn<'a> {
[], [],
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("account_t create", e); self.sqlite_error("account_t create", &e);
})?; })?;
self.conn self.conn
@ -149,7 +149,7 @@ impl<'a> DbTxn<'a> {
[], [],
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("group_t create", e); self.sqlite_error("group_t create", &e);
})?; })?;
self.conn self.conn
@ -164,7 +164,7 @@ impl<'a> DbTxn<'a> {
[], [],
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("memberof_t create error", e); self.sqlite_error("memberof_t create error", &e);
})?; })?;
Ok(()) Ok(())
@ -182,7 +182,7 @@ impl<'a> DbTxn<'a> {
.execute("COMMIT TRANSACTION", []) .execute("COMMIT TRANSACTION", [])
.map(|_| ()) .map(|_| ())
.map_err(|e| { .map_err(|e| {
self.sqlite_error("commit", e); self.sqlite_error("commit", &e);
}) })
} }
@ -190,13 +190,13 @@ impl<'a> DbTxn<'a> {
self.conn self.conn
.execute("UPDATE group_t SET expiry = 0", []) .execute("UPDATE group_t SET expiry = 0", [])
.map_err(|e| { .map_err(|e| {
self.sqlite_error("update group_t", e); self.sqlite_error("update group_t", &e);
})?; })?;
self.conn self.conn
.execute("UPDATE account_t SET expiry = 0", []) .execute("UPDATE account_t SET expiry = 0", [])
.map_err(|e| { .map_err(|e| {
self.sqlite_error("update account_t", e); self.sqlite_error("update account_t", &e);
})?; })?;
Ok(()) Ok(())
@ -204,13 +204,13 @@ impl<'a> DbTxn<'a> {
pub fn clear_cache(&self) -> Result<(), ()> { pub fn clear_cache(&self) -> Result<(), ()> {
self.conn.execute("DELETE FROM group_t", []).map_err(|e| { self.conn.execute("DELETE FROM group_t", []).map_err(|e| {
self.sqlite_error("delete group_t", e); self.sqlite_error("delete group_t", &e);
})?; })?;
self.conn self.conn
.execute("DELETE FROM account_t", []) .execute("DELETE FROM account_t", [])
.map_err(|e| { .map_err(|e| {
self.sqlite_error("delete group_t", e); self.sqlite_error("delete group_t", &e);
})?; })?;
Ok(()) Ok(())
@ -222,19 +222,19 @@ impl<'a> DbTxn<'a> {
"SELECT token, expiry FROM account_t WHERE uuid = :account_id OR name = :account_id OR spn = :account_id" "SELECT token, expiry FROM account_t WHERE uuid = :account_id OR name = :account_id OR spn = :account_id"
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare", e); self.sqlite_error("select prepare", &e);
})?; })?;
// Makes tuple (token, expiry) // Makes tuple (token, expiry)
let data_iter = stmt let data_iter = stmt
.query_map(&[account_id], |row| Ok((row.get(0)?, row.get(1)?))) .query_map([account_id], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| { .map_err(|e| {
self.sqlite_error("query_map failure", e); self.sqlite_error("query_map failure", &e);
})?; })?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map failure", e); self.sqlite_error("map failure", &e);
}) })
}) })
.collect(); .collect();
@ -246,19 +246,19 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("SELECT token, expiry FROM account_t WHERE gidnumber = :gid") .prepare("SELECT token, expiry FROM account_t WHERE gidnumber = :gid")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare", e); self.sqlite_error("select prepare", &e);
})?; })?;
// Makes tuple (token, expiry) // Makes tuple (token, expiry)
let data_iter = stmt let data_iter = stmt
.query_map(params![gid], |row| Ok((row.get(0)?, row.get(1)?))) .query_map(params![gid], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| { .map_err(|e| {
self.sqlite_error("query_map", e); self.sqlite_error("query_map", &e);
})?; })?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -303,16 +303,16 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("SELECT token FROM account_t") .prepare("SELECT token FROM account_t")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare", e); self.sqlite_error("select prepare", &e);
})?; })?;
let data_iter = stmt.query_map([], |row| Ok(row.get(0)?)).map_err(|e| { let data_iter = stmt.query_map([], |row| row.get(0)).map_err(|e| {
self.sqlite_error("query_map", e); self.sqlite_error("query_map", &e);
})?; })?;
let data: Result<Vec<Vec<u8>>, _> = data_iter let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -355,7 +355,7 @@ impl<'a> DbTxn<'a> {
} }
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("delete account_t duplicate", e); self.sqlite_error("delete account_t duplicate", &e);
}) })
.map(|_| ())?; .map(|_| ())?;
@ -371,14 +371,14 @@ impl<'a> DbTxn<'a> {
} }
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("delete account_t duplicate",e); self.sqlite_error("delete account_t duplicate", &e);
})?; })?;
if updated == 0 { if updated == 0 {
let mut stmt = self.conn let mut stmt = self.conn
.prepare("INSERT INTO account_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry) ON CONFLICT(uuid) DO UPDATE SET name=excluded.name, spn=excluded.name, gidnumber=excluded.gidnumber, token=excluded.token, expiry=excluded.expiry") .prepare("INSERT INTO account_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry) ON CONFLICT(uuid) DO UPDATE SET name=excluded.name, spn=excluded.name, gidnumber=excluded.gidnumber, token=excluded.token, expiry=excluded.expiry")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("prepare",e); self.sqlite_error("prepare", &e);
})?; })?;
stmt.execute(named_params! { stmt.execute(named_params! {
@ -393,7 +393,7 @@ impl<'a> DbTxn<'a> {
debug!("insert -> {:?}", r); debug!("insert -> {:?}", r);
}) })
.map_err(|error| { .map_err(|error| {
self.sqlite_transaction_error(error, &stmt); self.sqlite_transaction_error(&error, &stmt);
})?; })?;
} }
@ -404,21 +404,21 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("DELETE FROM memberof_t WHERE a_uuid = :a_uuid") .prepare("DELETE FROM memberof_t WHERE a_uuid = :a_uuid")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("prepare", e); self.sqlite_error("prepare", &e);
})?; })?;
stmt.execute(&[&account.uuid]) stmt.execute([&account.uuid])
.map(|r| { .map(|r| {
debug!("delete memberships -> {:?}", r); debug!("delete memberships -> {:?}", r);
}) })
.map_err(|error| { .map_err(|error| {
self.sqlite_transaction_error(error, &stmt); self.sqlite_transaction_error(&error, &stmt);
})?; })?;
let mut stmt = self let mut stmt = self
.conn .conn
.prepare("INSERT INTO memberof_t (a_uuid, g_uuid) VALUES (:a_uuid, :g_uuid)") .prepare("INSERT INTO memberof_t (a_uuid, g_uuid) VALUES (:a_uuid, :g_uuid)")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("prepare", e); self.sqlite_error("prepare", &e);
})?; })?;
// Now for each group, add the relation. // Now for each group, add the relation.
account.groups.iter().try_for_each(|g| { account.groups.iter().try_for_each(|g| {
@ -430,7 +430,7 @@ impl<'a> DbTxn<'a> {
debug!("insert membership -> {:?}", r); debug!("insert membership -> {:?}", r);
}) })
.map_err(|error| { .map_err(|error| {
self.sqlite_transaction_error(error, &stmt); self.sqlite_transaction_error(&error, &stmt);
}) })
}) })
} }
@ -443,12 +443,12 @@ impl<'a> DbTxn<'a> {
) )
.map(|_| ()) .map(|_| ())
.map_err(|e| { .map_err(|e| {
self.sqlite_error("memberof_t create", e); self.sqlite_error("memberof_t create", &e);
}) })
} }
pub fn update_account_password(&self, a_uuid: &str, cred: &str) -> Result<(), ()> { pub fn update_account_password(&self, a_uuid: &str, cred: &str) -> Result<(), ()> {
let pw = Password::new(&self.crypto_policy, cred).map_err(|e| { let pw = Password::new(self.crypto_policy, cred).map_err(|e| {
error!("password error -> {:?}", e); error!("password error -> {:?}", e);
})?; })?;
let dbpw = pw.to_dbpasswordv1(); let dbpw = pw.to_dbpasswordv1();
@ -465,7 +465,7 @@ impl<'a> DbTxn<'a> {
}, },
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("update account_t password", e); self.sqlite_error("update account_t password", &e);
}) })
.map(|_| ()) .map(|_| ())
} }
@ -475,19 +475,17 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("SELECT password FROM account_t WHERE uuid = :a_uuid AND password IS NOT NULL") .prepare("SELECT password FROM account_t WHERE uuid = :a_uuid AND password IS NOT NULL")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare", e); self.sqlite_error("select prepare", &e);
})?; })?;
// Makes tuple (token, expiry) // Makes tuple (token, expiry)
let data_iter = stmt let data_iter = stmt.query_map([a_uuid], |row| row.get(0)).map_err(|e| {
.query_map(&[a_uuid], |row| Ok(row.get(0)?)) self.sqlite_error("query_map", &e);
.map_err(|e| { })?;
self.sqlite_error("query_map", e);
})?;
let data: Result<Vec<Vec<u8>>, _> = data_iter let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -526,19 +524,19 @@ impl<'a> DbTxn<'a> {
"SELECT token, expiry FROM group_t WHERE uuid = :grp_id OR name = :grp_id OR spn = :grp_id" "SELECT token, expiry FROM group_t WHERE uuid = :grp_id OR name = :grp_id OR spn = :grp_id"
) )
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare",e); self.sqlite_error("select prepare", &e);
})?; })?;
// Makes tuple (token, expiry) // Makes tuple (token, expiry)
let data_iter = stmt let data_iter = stmt
.query_map(&[grp_id], |row| Ok((row.get(0)?, row.get(1)?))) .query_map([grp_id], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| { .map_err(|e| {
self.sqlite_error("query_map", e); self.sqlite_error("query_map", &e);
})?; })?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -550,19 +548,19 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("SELECT token, expiry FROM group_t WHERE gidnumber = :gid") .prepare("SELECT token, expiry FROM group_t WHERE gidnumber = :gid")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare", e); self.sqlite_error("select prepare", &e);
})?; })?;
// Makes tuple (token, expiry) // Makes tuple (token, expiry)
let data_iter = stmt let data_iter = stmt
.query_map(params![gid], |row| Ok((row.get(0)?, row.get(1)?))) .query_map(params![gid], |row| Ok((row.get(0)?, row.get(1)?)))
.map_err(|e| { .map_err(|e| {
self.sqlite_error("query_map", e); self.sqlite_error("query_map", &e);
})?; })?;
let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter let data: Result<Vec<(Vec<u8>, i64)>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -607,18 +605,16 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("SELECT account_t.token FROM (account_t, memberof_t) WHERE account_t.uuid = memberof_t.a_uuid AND memberof_t.g_uuid = :g_uuid") .prepare("SELECT account_t.token FROM (account_t, memberof_t) WHERE account_t.uuid = memberof_t.a_uuid AND memberof_t.g_uuid = :g_uuid")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare",e); self.sqlite_error("select prepare", &e);
})?; })?;
let data_iter = stmt let data_iter = stmt.query_map([g_uuid], |row| row.get(0)).map_err(|e| {
.query_map(&[g_uuid], |row| Ok(row.get(0)?)) self.sqlite_error("query_map", &e);
.map_err(|e| { })?;
self.sqlite_error("query_map", e);
})?;
let data: Result<Vec<Vec<u8>>, _> = data_iter let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -641,16 +637,16 @@ impl<'a> DbTxn<'a> {
.conn .conn
.prepare("SELECT token FROM group_t") .prepare("SELECT token FROM group_t")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("select prepare", e); self.sqlite_error("select prepare", &e);
})?; })?;
let data_iter = stmt.query_map([], |row| Ok(row.get(0)?)).map_err(|e| { let data_iter = stmt.query_map([], |row| row.get(0)).map_err(|e| {
self.sqlite_error("query_map", e); self.sqlite_error("query_map", &e);
})?; })?;
let data: Result<Vec<Vec<u8>>, _> = data_iter let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| { .map(|v| {
v.map_err(|e| { v.map_err(|e| {
self.sqlite_error("map", e); self.sqlite_error("map", &e);
}) })
}) })
.collect(); .collect();
@ -682,7 +678,7 @@ impl<'a> DbTxn<'a> {
let mut stmt = self.conn let mut stmt = self.conn
.prepare("INSERT OR REPLACE INTO group_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry)") .prepare("INSERT OR REPLACE INTO group_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry)")
.map_err(|e| { .map_err(|e| {
self.sqlite_error("prepare",e); self.sqlite_error("prepare", &e);
})?; })?;
stmt.execute(named_params! { stmt.execute(named_params! {
@ -697,16 +693,16 @@ impl<'a> DbTxn<'a> {
debug!("insert -> {:?}", r); debug!("insert -> {:?}", r);
}) })
.map_err(|e| { .map_err(|e| {
self.sqlite_error("execute", e); self.sqlite_error("execute", &e);
}) })
} }
pub fn delete_group(&self, g_uuid: &str) -> Result<(), ()> { pub fn delete_group(&self, g_uuid: &str) -> Result<(), ()> {
self.conn self.conn
.execute("DELETE FROM group_t WHERE uuid = :g_uuid", &[g_uuid]) .execute("DELETE FROM group_t WHERE uuid = :g_uuid", [g_uuid])
.map(|_| ()) .map(|_| ())
.map_err(|e| { .map_err(|e| {
self.sqlite_error("memberof_t create", e); self.sqlite_error("memberof_t create", &e);
}) })
} }
} }

View file

@ -212,7 +212,7 @@ impl<State: Clone + Send + Sync + 'static> tide::Middleware<State>
} }
} }
const KANIDM_VERSION: &'static str = env!("CARGO_PKG_VERSION"); const KANIDM_VERSION: &str = env!("CARGO_PKG_VERSION");
#[derive(Default)] #[derive(Default)]
pub struct VersionHeaderMiddleware; pub struct VersionHeaderMiddleware;

View file

@ -392,7 +392,7 @@ pub fn create_https_server(
.with_session_ttl(None) .with_session_ttl(None)
.with_cookie_name("kanidm-session") .with_cookie_name("kanidm-session")
// Without this, the cookies won't be used on subdomains of origin. // Without this, the cookies won't be used on subdomains of origin.
.with_cookie_domain(&domain) .with_cookie_domain(domain)
// Im not sure if we need Lax here, I don't think we do because on the first get // Im not sure if we need Lax here, I don't think we do because on the first get
// we don't need the cookie since wasm drives the fetches. // we don't need the cookie since wasm drives the fetches.
.with_same_site_policy(tide::http::cookies::SameSite::Strict), .with_same_site_policy(tide::http::cookies::SameSite::Strict),

View file

@ -59,7 +59,7 @@ async fn client_process<W: AsyncWrite + Unpin, R: AsyncRead + Unpin>(
while let Some(Ok(protomsg)) = r.next().await { while let Some(Ok(protomsg)) = r.next().await {
// Start the event // Start the event
let uat = session.uat.clone(); let uat = session.uat.clone();
let caddr = client_address.clone(); let caddr = client_address;
match client_process_msg(uat, caddr, protomsg, qe_r_ref).await { match client_process_msg(uat, caddr, protomsg, qe_r_ref).await {
// I'd really have liked to have put this near the [LdapResponseState::Bind] but due // I'd really have liked to have put this near the [LdapResponseState::Bind] but due

View file

@ -432,7 +432,7 @@ pub async fn domain_rename_core(config: &Configuration) {
match qs.read().await.get_db_domain_name() { match qs.read().await.get_db_domain_name() {
Ok(old_domain_name) => { Ok(old_domain_name) => {
admin_info!(?old_domain_name, ?new_domain_name); admin_info!(?old_domain_name, ?new_domain_name);
if &old_domain_name == &new_domain_name { if old_domain_name == new_domain_name {
admin_info!("Domain name not changing, stopping."); admin_info!("Domain name not changing, stopping.");
return; return;
} }
@ -568,14 +568,14 @@ pub struct CoreHandle {
impl CoreHandle { impl CoreHandle {
pub async fn shutdown(&mut self) { pub async fn shutdown(&mut self) {
if let Err(_) = self.tx.send(CoreAction::Shutdown) { if self.tx.send(CoreAction::Shutdown).is_err() {
eprintln!("No receivers acked shutdown request. Treating as unclean."); eprintln!("No receivers acked shutdown request. Treating as unclean.");
return; return;
} }
// Wait on the handles. // Wait on the handles.
while let Some(handle) = self.handles.pop() { while let Some(handle) = self.handles.pop() {
if let Err(_) = handle.await { if handle.await.is_err() {
eprintln!("A task failed to join"); eprintln!("A task failed to join");
} }
} }
@ -607,7 +607,7 @@ pub async fn create_server_core(
} else if config.tls_config.is_none() { } else if config.tls_config.is_none() {
// TLS is great! We won't run without it. // TLS is great! We won't run without it.
error!("Running without TLS is not supported! Quitting!"); error!("Running without TLS is not supported! Quitting!");
return Err({}); return Err(());
} }
info!( info!(

View file

@ -17,7 +17,7 @@ pub use crate::constants::values::*;
use std::time::Duration; use std::time::Duration;
// Increment this as we add new schema types and values!!! // Increment this as we add new schema types and values!!!
pub const SYSTEM_INDEX_VERSION: i64 = 27; pub const SYSTEM_INDEX_VERSION: i64 = 28;
/* /*
* domain functional levels * domain functional levels

View file

@ -300,22 +300,24 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
// TODO: This could benefit from a search that only grabs uuids? // TODO: This could benefit from a search that only grabs uuids?
let existing_entries = self let existing_entries = self
.qs_write .qs_write
.internal_search(f_all_sync.clone()) // .internal_search(f_all_sync.clone())
.internal_exists(f_all_sync.clone())
.map_err(|e| { .map_err(|e| {
error!("Failed to determine existing entries set"); error!("Failed to determine existing entries set");
e e
})?; })?;
// This is the delete filter we need later. /*
let filter_or: Vec<_> = existing_entries let filter_or: Vec<_> = existing_entries
.iter() .iter()
.map(|e| f_eq("uuid", PartialValue::Uuid(e.get_uuid()))) .map(|e| f_eq("uuid", PartialValue::Uuid(e.get_uuid())))
.collect(); .collect();
*/
// We only need to delete the sync account itself. // We only need to delete the sync account itself.
let delete_filter = filter!(f_eq("uuid", PartialValue::Uuid(sync_uuid))); let delete_filter = filter!(f_eq("uuid", PartialValue::Uuid(sync_uuid)));
if !filter_or.is_empty() { if existing_entries {
// Now modify these to remove their sync related attributes. // Now modify these to remove their sync related attributes.
let schema = self.qs_write.get_schema(); let schema = self.qs_write.get_schema();
let sync_class = schema.get_classes().get("sync_object").ok_or_else(|| { let sync_class = schema.get_classes().get("sync_object").ok_or_else(|| {
@ -429,16 +431,16 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
e e
})?; })?;
// This is the delete filter we need later. let delete_filter = if existing_entries.is_empty() {
let filter_or: Vec<_> = existing_entries
.iter()
.map(|e| f_eq("uuid", PartialValue::Uuid(e.get_uuid())))
.collect();
let delete_filter = if filter_or.is_empty() {
// We only need to delete the sync account itself. // We only need to delete the sync account itself.
filter!(f_eq("uuid", PartialValue::Uuid(sync_uuid))) filter!(f_eq("uuid", PartialValue::Uuid(sync_uuid)))
} else { } else {
// This is the delete filter we need later.
let filter_or: Vec<_> = existing_entries
.iter()
.map(|e| f_eq("uuid", PartialValue::Uuid(e.get_uuid())))
.collect();
// Now modify these to remove their sync related attributes. // Now modify these to remove their sync related attributes.
let schema = self.qs_write.get_schema(); let schema = self.qs_write.get_schema();
let sync_class = schema.get_classes().get("sync_object").ok_or_else(|| { let sync_class = schema.get_classes().get("sync_object").ok_or_else(|| {

View file

@ -1,3 +1,6 @@
pub mod cid; pub mod cid;
pub mod entry; pub mod entry;
pub mod ruv; pub mod ruv;
#[cfg(test)]
mod tests;

View file

@ -1,7 +1,6 @@
use crate::prelude::*; // use crate::prelude::*;
#[tokio::test] #[tokio::test]
async fn multiple_qs_setup() { async fn multiple_qs_setup() {
assert!(true); assert!(true);
} }

View file

@ -213,7 +213,7 @@ pub trait AccessControlsTransaction<'a> {
let allowed_entries: Vec<_> = entries let allowed_entries: Vec<_> = entries
.into_iter() .into_iter()
.filter(|e| { .filter(|e| {
match apply_search_access(&se.ident, related_acp.as_slice(), &e) { match apply_search_access(&se.ident, related_acp.as_slice(), e) {
SearchResult::Denied => false, SearchResult::Denied => false,
SearchResult::Grant => true, SearchResult::Grant => true,
SearchResult::Allow(allowed_attrs) => { SearchResult::Allow(allowed_attrs) => {
@ -449,7 +449,7 @@ pub trait AccessControlsTransaction<'a> {
security_access!(?requested_classes, "Requested class set"); security_access!(?requested_classes, "Requested class set");
let r = entries.iter().all(|e| { let r = entries.iter().all(|e| {
match apply_modify_access(&me.ident, related_acp.as_slice(), &e) { match apply_modify_access(&me.ident, related_acp.as_slice(), e) {
ModifyResult::Denied => false, ModifyResult::Denied => false,
ModifyResult::Grant => true, ModifyResult::Grant => true,
ModifyResult::Allow { pres, rem, cls } => { ModifyResult::Allow { pres, rem, cls } => {
@ -582,7 +582,7 @@ pub trait AccessControlsTransaction<'a> {
security_access!(?requested_rem, "Requested remove set"); security_access!(?requested_rem, "Requested remove set");
security_access!(?requested_classes, "Requested class set"); security_access!(?requested_classes, "Requested class set");
match apply_modify_access(&me.ident, related_acp.as_slice(), &e) { match apply_modify_access(&me.ident, related_acp.as_slice(), e) {
ModifyResult::Denied => false, ModifyResult::Denied => false,
ModifyResult::Grant => true, ModifyResult::Grant => true,
ModifyResult::Allow { pres, rem, cls } => { ModifyResult::Allow { pres, rem, cls } => {
@ -665,7 +665,7 @@ pub trait AccessControlsTransaction<'a> {
// For each entry // For each entry
let r = entries.iter().all(|e| { let r = entries.iter().all(|e| {
match apply_create_access(&ce.ident, related_acp.as_slice(), &e) { match apply_create_access(&ce.ident, related_acp.as_slice(), e) {
CreateResult::Denied => false, CreateResult::Denied => false,
CreateResult::Grant => true, CreateResult::Grant => true,
} }
@ -729,7 +729,7 @@ pub trait AccessControlsTransaction<'a> {
// For each entry // For each entry
let r = entries.iter().all(|e| { let r = entries.iter().all(|e| {
match apply_delete_access(&de.ident, related_acp.as_slice(), &e) { match apply_delete_access(&de.ident, related_acp.as_slice(), e) {
DeleteResult::Denied => false, DeleteResult::Denied => false,
DeleteResult::Grant => true, DeleteResult::Grant => true,
} }
@ -798,7 +798,7 @@ pub trait AccessControlsTransaction<'a> {
.map(|e| { .map(|e| {
// == search == // == search ==
let search_effective = let search_effective =
match apply_search_access(ident, search_related_acp.as_slice(), &e) { match apply_search_access(ident, search_related_acp.as_slice(), e) {
SearchResult::Denied => Access::Denied, SearchResult::Denied => Access::Denied,
SearchResult::Grant => Access::Grant, SearchResult::Grant => Access::Grant,
SearchResult::Allow(allowed_attrs) => { SearchResult::Allow(allowed_attrs) => {
@ -810,7 +810,7 @@ pub trait AccessControlsTransaction<'a> {
// == modify == // == modify ==
let (modify_pres, modify_rem, modify_class) = let (modify_pres, modify_rem, modify_class) =
match apply_modify_access(ident, modify_related_acp.as_slice(), &e) { match apply_modify_access(ident, modify_related_acp.as_slice(), e) {
ModifyResult::Denied => (Access::Denied, Access::Denied, Access::Denied), ModifyResult::Denied => (Access::Denied, Access::Denied, Access::Denied),
ModifyResult::Grant => (Access::Grant, Access::Grant, Access::Grant), ModifyResult::Grant => (Access::Grant, Access::Grant, Access::Grant),
ModifyResult::Allow { pres, rem, cls } => ( ModifyResult::Allow { pres, rem, cls } => (

View file

@ -45,6 +45,9 @@ pub mod recycle;
const RESOLVE_FILTER_CACHE_MAX: usize = 4096; const RESOLVE_FILTER_CACHE_MAX: usize = 4096;
const RESOLVE_FILTER_CACHE_LOCAL: usize = 0; const RESOLVE_FILTER_CACHE_LOCAL: usize = 0;
pub type ResolveFilterCacheReadTxn<'a> =
ARCacheReadTxn<'a, (IdentityId, Filter<FilterValid>), Filter<FilterValidResolved>, ()>;
#[derive(Debug, Clone, PartialOrd, PartialEq, Eq)] #[derive(Debug, Clone, PartialOrd, PartialEq, Eq)]
enum ServerPhase { enum ServerPhase {
Bootstrap, Bootstrap,
@ -140,9 +143,7 @@ pub trait QueryServerTransaction<'a> {
fn get_domain_display_name(&self) -> &str; fn get_domain_display_name(&self) -> &str;
fn get_resolve_filter_cache( fn get_resolve_filter_cache(&mut self) -> &mut ResolveFilterCacheReadTxn<'a>;
&mut self,
) -> &mut ARCacheReadTxn<'a, (IdentityId, Filter<FilterValid>), Filter<FilterValidResolved>, ()>;
// Because of how borrowck in rust works, if we need to get two inner types we have to get them // Because of how borrowck in rust works, if we need to get two inner types we have to get them
// in a single fn. // in a single fn.
@ -151,7 +152,7 @@ pub trait QueryServerTransaction<'a> {
&mut self, &mut self,
) -> ( ) -> (
&mut Self::BackendTransactionType, &mut Self::BackendTransactionType,
&mut ARCacheReadTxn<'a, (IdentityId, Filter<FilterValid>), Filter<FilterValidResolved>, ()>, &mut ResolveFilterCacheReadTxn<'a>,
); );
/// Conduct a search and apply access controls to yield a set of entries that /// Conduct a search and apply access controls to yield a set of entries that

View file

@ -133,9 +133,7 @@ impl TotpRemoveComp {
let status: CUStatus = let status: CUStatus =
serde_wasm_bindgen::from_value(jsval).expect_throw("Invalid response type"); serde_wasm_bindgen::from_value(jsval).expect_throw("Invalid response type");
cb.emit(EventBusMsg::UpdateStatus { cb.emit(EventBusMsg::UpdateStatus { status });
status: status.clone(),
});
Ok(Msg::Success) Ok(Msg::Success)
} else { } else {

View file

@ -48,6 +48,8 @@ fn landing() -> Html {
html! { <main></main> } html! { <main></main> }
} }
// Needed for yew to pass by value
#[allow(clippy::needless_pass_by_value)]
fn switch(route: Route) -> Html { fn switch(route: Route) -> Html {
#[cfg(debug_assertions)] #[cfg(debug_assertions)]
console::debug!("manager::switch"); console::debug!("manager::switch");

View file

@ -12,7 +12,7 @@ const N_GROUPS: usize = 1500;
const N_MEMBERSHIPS: usize = 10; const N_MEMBERSHIPS: usize = 10;
const N_NEST: usize = 4; const N_NEST: usize = 4;
pub(crate) fn doit(output: &Path) -> () { pub(crate) fn doit(output: &Path) {
info!( info!(
"Performing data generation into {}", "Performing data generation into {}",
output.to_str().unwrap(), output.to_str().unwrap(),
@ -68,7 +68,8 @@ pub(crate) fn doit(output: &Path) -> () {
let mut chunk_iter = groups.chunks_mut(chunk_size); let mut chunk_iter = groups.chunks_mut(chunk_size);
// Can't fail due to above checks. // Can't fail due to above checks.
let mut p_chunk = chunk_iter.next().unwrap(); let mut p_chunk = chunk_iter.next().unwrap();
while let Some(w_chunk) = chunk_iter.next() { // while let Some(w_chunk) = chunk_iter.next() {
for w_chunk in chunk_iter {
// add items from work chunk to parent chunk // add items from work chunk to parent chunk
p_chunk p_chunk
.iter_mut() .iter_mut()