Cleanup of println and other outputs (#3266)

This commit is contained in:
Firstyear 2024-12-04 15:13:14 +10:00 committed by GitHub
parent 516874460b
commit 9b3350f753
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 76 additions and 97 deletions

View file

@ -409,43 +409,18 @@ pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
}
info!("Database loaded successfully");
info!("Attempting to init query server ...");
let (qs, _idms, _idms_delayed, _idms_audit) = match setup_qs_idms(be, schema, config).await {
Ok(t) => t,
Err(e) => {
error!("Unable to setup query server or idm server -> {:?}", e);
return;
}
};
info!("Success!");
info!("Start reindex phase ...");
let Ok(mut qs_write) = qs.write(duration_from_epoch_now()).await else {
error!("Unable to acquire write transaction");
return;
};
let r = qs_write.reindex().and_then(|_| qs_write.commit());
match r {
Ok(_) => info!("Reindex Success!"),
Err(e) => {
error!("Restore failed: {:?}", e);
std::process::exit(1);
}
};
reindex_inner(be, schema, config).await;
info!("✅ Restore Success!");
}
pub async fn reindex_server_core(config: &Configuration) {
eprintln!("Start Index Phase 1 ...");
info!("Start Index Phase 1 ...");
// First, we provide the in-memory schema so that core attrs are indexed correctly.
let schema = match Schema::new() {
Ok(s) => s,
Err(e) => {
eprintln!("Failed to setup in memory schema: {:?}", e);
error!("Failed to setup in memory schema: {:?}", e);
std::process::exit(1);
}
};
@ -458,6 +433,12 @@ pub async fn reindex_server_core(config: &Configuration) {
}
};
reindex_inner(be, schema, config).await;
info!("✅ Reindex Success!");
}
async fn reindex_inner(be: Backend, schema: Schema, config: &Configuration) {
// Reindex only the core schema attributes to bootstrap the process.
let mut be_wr_txn = match be.write() {
Ok(txn) => txn,
@ -469,16 +450,17 @@ pub async fn reindex_server_core(config: &Configuration) {
return;
}
};
let r = be_wr_txn.reindex().and_then(|_| be_wr_txn.commit());
let r = be_wr_txn.reindex(true).and_then(|_| be_wr_txn.commit());
// Now that's done, setup a minimal qs and reindex from that.
if r.is_err() {
eprintln!("Failed to reindex database: {:?}", r);
error!("Failed to reindex database: {:?}", r);
std::process::exit(1);
}
eprintln!("Index Phase 1 Success!");
info!("Index Phase 1 Success!");
eprintln!("Attempting to init query server ...");
info!("Attempting to init query server ...");
let (qs, _idms, _idms_delayed, _idms_audit) = match setup_qs_idms(be, schema, config).await {
Ok(t) => t,
@ -487,20 +469,20 @@ pub async fn reindex_server_core(config: &Configuration) {
return;
}
};
eprintln!("Init Query Server Success!");
info!("Init Query Server Success!");
eprintln!("Start Index Phase 2 ...");
info!("Start Index Phase 2 ...");
let Ok(mut qs_write) = qs.write(duration_from_epoch_now()).await else {
error!("Unable to acquire write transaction");
return;
};
let r = qs_write.reindex().and_then(|_| qs_write.commit());
let r = qs_write.reindex(true).and_then(|_| qs_write.commit());
match r {
Ok(_) => eprintln!("Index Phase 2 Success!"),
Ok(_) => info!("Index Phase 2 Success!"),
Err(e) => {
eprintln!("Reindex failed: {:?}", e);
error!("Reindex failed: {:?}", e);
std::process::exit(1);
}
};

View file

@ -410,6 +410,8 @@ async fn start_daemon(
// HERE'S WHERE YOU CAN START USING THE LOGGER
// ************************************************
info!(version = %env!("KANIDM_PKG_VERSION"), "Starting Kanidmd");
// guard which shuts down the logging/tracing providers when we close out
let _otelguard = TracingPipelineGuard {};

View file

@ -1138,7 +1138,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
/// specific situations.
#[instrument(level = "trace", skip_all)]
pub fn danger_purge_idxs(&mut self) -> Result<(), OperationError> {
warn!("CLEARING CACHE");
debug!("CLEARING CACHE");
self.db.danger_purge_idxs().map(|()| {
self.idl_cache.clear();
self.name_cache.clear();

View file

@ -1684,7 +1684,7 @@ impl<'a> BackendWriteTransaction<'a> {
let dbv = self.get_db_index_version()?;
admin_debug!(?dbv, ?v, "upgrade_reindex");
if dbv < v {
self.reindex()?;
self.reindex(false)?;
self.set_db_index_version(v)
} else {
Ok(())
@ -1692,8 +1692,14 @@ impl<'a> BackendWriteTransaction<'a> {
}
#[instrument(level = "info", skip_all)]
pub fn reindex(&mut self) -> Result<(), OperationError> {
limmediate_warning!("NOTICE: System reindex started\n");
pub fn reindex(&mut self, immediate: bool) -> Result<(), OperationError> {
let notice_immediate = immediate || (cfg!(not(test)) && cfg!(not(debug_assertions)));
info!(
immediate = notice_immediate,
"System reindex: started - this may take a long time!"
);
// Purge the idxs
self.idlayer.danger_purge_idxs()?;
@ -1704,39 +1710,46 @@ impl<'a> BackendWriteTransaction<'a> {
// Future idea: Do this in batches of X amount to limit memory
// consumption.
let idl = IdList::AllIds;
let entries = self.idlayer.get_identry(&idl).map_err(|e| {
admin_error!(err = ?e, "get_identry failure");
e
let entries = self.idlayer.get_identry(&idl).inspect_err(|err| {
error!(?err, "get_identry failure");
})?;
let mut count = 0;
// This is the longest phase of reindexing, so we have a "progress" display here.
entries
.iter()
.try_for_each(|e| {
if immediate {
count += 1;
if count % 2500 == 0 {
limmediate_warning!("{}", count);
eprint!("{}", count);
} else if count % 250 == 0 {
limmediate_warning!(".");
eprint!(".");
}
}
self.entry_index(None, Some(e))
})
.map_err(|e| {
admin_error!("reindex failed -> {:?}", e);
e
.inspect_err(|err| {
error!(?err, "reindex failed");
})?;
limmediate_warning!("done ✅: reindexed {} entries\n", count);
limmediate_warning!("Optimising Indexes ... ");
if immediate {
eprintln!(" done ✅");
}
info!(immediate, "Reindexed {count} entries");
info!("Optimising Indexes: started");
self.idlayer.optimise_dirty_idls();
limmediate_warning!("done ✅\n");
limmediate_warning!("Calculating Index Optimisation Slopes ... ");
self.idlayer.analyse_idx_slopes().map_err(|e| {
admin_error!(err = ?e, "index optimisation failed");
e
info!("Optimising Indexes: complete ✅");
info!("Calculating Index Optimisation Slopes: started");
self.idlayer.analyse_idx_slopes().inspect_err(|err| {
error!(?err, "index optimisation failed");
})?;
limmediate_warning!("done ✅\n");
limmediate_warning!("NOTICE: System reindex complete\n");
info!("Calculating Index Optimisation Slopes: complete ✅");
info!("System reindex: complete 🎉");
Ok(())
}
@ -1907,9 +1920,6 @@ impl<'a> BackendWriteTransaction<'a> {
info!("Restored {} entries", dbentries.len());
// Reindex now we are loaded.
self.reindex()?;
let vr = self.verify();
if vr.is_empty() {
Ok(())
@ -2696,7 +2706,7 @@ mod tests {
// Add some test data?
let missing = be.missing_idxs().unwrap();
assert_eq!(missing.len(), 7);
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
let missing = be.missing_idxs().unwrap();
debug!("{:?}", missing);
assert!(missing.is_empty());
@ -2730,7 +2740,7 @@ mod tests {
// Check they are gone
let missing = be.missing_idxs().unwrap();
assert_eq!(missing.len(), 7);
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
let missing = be.missing_idxs().unwrap();
debug!("{:?}", missing);
assert!(missing.is_empty());
@ -2876,7 +2886,7 @@ mod tests {
fn test_be_index_create_delete_simple() {
run_test!(|be: &mut BackendWriteTransaction| {
// First, setup our index tables!
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
// Test that on entry create, the indexes are made correctly.
// this is a similar case to reindex.
let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -2979,7 +2989,7 @@ mod tests {
run_test!(|be: &mut BackendWriteTransaction| {
// delete multiple entries at a time, without deleting others
// First, setup our index tables!
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
// Test that on entry create, the indexes are made correctly.
// this is a similar case to reindex.
let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -3079,7 +3089,7 @@ mod tests {
#[test]
fn test_be_index_modify_simple() {
run_test!(|be: &mut BackendWriteTransaction| {
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
// modify with one type, ensuring we clean the indexes behind
// us. For the test to be "accurate" we must add one attr, remove one attr
// and change one attr.
@ -3158,7 +3168,7 @@ mod tests {
#[test]
fn test_be_index_modify_rename() {
run_test!(|be: &mut BackendWriteTransaction| {
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
// test when we change name AND uuid
// This will be needing to be correct for conflicts when we add
// replication support!
@ -3251,7 +3261,7 @@ mod tests {
#[test]
fn test_be_index_search_simple() {
run_test!(|be: &mut BackendWriteTransaction| {
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
// Create a test entry with some indexed / unindexed values.
let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -3622,7 +3632,7 @@ mod tests {
// Now check slope generation for the values. Today these are calculated
// at reindex time, so we now perform the re-index.
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
assert!(be.is_idx_slopeyness_generated().unwrap());
let ta_eq_slope = be
@ -3733,7 +3743,7 @@ mod tests {
assert!(res.is_ok());
// --> This will shortcut due to indexing.
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
let res = be.search(&lim_deny, &filt);
assert_eq!(res, Err(OperationError::ResourceLimit));
// we don't limit on exists because we never load the entries.
@ -3774,7 +3784,7 @@ mod tests {
assert!(single_result.is_ok());
// Reindex so we have things in place for our query
assert!(be.reindex().is_ok());
assert!(be.reindex(false).is_ok());
// 🚨 This is evil!
// The and allows us to hit "allids + indexed -> partial".

View file

@ -291,8 +291,6 @@ where
.map(|u| f_eq(Attribute::Uuid, PartialValue::Uuid(u)))
.collect();
println!("{:?}", conditions);
let f = filter!(f_or(conditions));
let entries = qs.internal_search(f).map_err(|e| {
@ -300,8 +298,6 @@ where
e
})?;
println!("{}", entries.len());
let mut groups = vec![];
let mut unix_groups = Group::<Unix>::try_from_entry(value)
.ok()

View file

@ -652,12 +652,6 @@ macro_rules! vs_cid {
});
}
macro_rules! limmediate_warning {
($($arg:tt)*) => ({
eprint!($($arg)*)
})
}
macro_rules! str_concat {
($str_iter:expr, $join_char:expr) => {{
use itertools::Itertools;

View file

@ -623,7 +623,7 @@ impl QueryServerWriteTransaction<'_> {
// We have to reindex to force all the existing indexes to be dumped
// and recreated before we start to import.
self.reindex().inspect_err(|err| {
self.reindex(false).inspect_err(|err| {
error!(?err, "Failed to reload schema");
})?;

View file

@ -18,9 +18,6 @@ fn repl_initialise(
// First, build the refresh context.
let refresh_context = from.supplier_provide_refresh()?;
// Verify content of the refresh
// eprintln!("{:#?}", refresh_context);
// Apply it to the server
to.consumer_apply_refresh(refresh_context)?;

View file

@ -58,7 +58,7 @@ impl QueryServer {
// sure that some base IDM operations are fast. Since this is still
// very early in the bootstrap process, and very few entries exist,
// reindexing is very fast here.
write_txn.reindex()?;
write_txn.reindex(false)?;
} else {
// Domain info was present, so we need to reflect that in our server
// domain structures. If we don't do this, the in memory domain level
@ -184,7 +184,7 @@ impl QueryServer {
write_txn.reload()?;
// We are not yet at the schema phase where reindexes will auto-trigger
// so if one was required, do it now.
write_txn.reindex()?;
write_txn.reindex(false)?;
}
// Now set the db/domain devel taint flag to match our current release status

View file

@ -2431,11 +2431,11 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.internal_modify(&filt, &modl)
}
pub fn reindex(&mut self) -> Result<(), OperationError> {
pub fn reindex(&mut self, immediate: bool) -> Result<(), OperationError> {
// initiate a be reindex here. This could have been from first run checking
// the versions, or it could just be from the cli where an admin needs to do an
// indexing.
self.be_txn.reindex()
self.be_txn.reindex(immediate)
}
fn force_schema_reload(&mut self) {
@ -2491,7 +2491,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// operational, then a reindex may be required. After the reindex, the schema
// must also be reloaded so that slope optimisation indexes are loaded correctly.
if *self.phase >= ServerPhase::Running {
self.reindex()?;
self.reindex(false)?;
self.reload_schema()?;
}
}

View file

@ -60,16 +60,14 @@ pub fn has_trailer(contents: &Vec<u8>) -> Result<bool, ImageValidationError> {
))
} else if (eoi_index + 2) < buf.len() {
// there's still bytes in the buffer after the EOI magic bytes
#[cfg(any(test, debug_assertions))]
println!(
debug!(
"we're at pos: {} and buf len is {}, is not OK",
eoi_index,
buf.len()
);
Ok(true)
} else {
#[cfg(any(test, debug_assertions))]
println!(
debug!(
"we're at pos: {} and buf len is {}, is OK",
eoi_index,
buf.len()