Cleanup of println and other outputs (#3266)

This commit is contained in:
Firstyear 2024-12-04 15:13:14 +10:00 committed by GitHub
parent 516874460b
commit 9b3350f753
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
11 changed files with 76 additions and 97 deletions

View file

@ -409,43 +409,18 @@ pub async fn restore_server_core(config: &Configuration, dst_path: &str) {
} }
info!("Database loaded successfully"); info!("Database loaded successfully");
info!("Attempting to init query server ..."); reindex_inner(be, schema, config).await;
let (qs, _idms, _idms_delayed, _idms_audit) = match setup_qs_idms(be, schema, config).await {
Ok(t) => t,
Err(e) => {
error!("Unable to setup query server or idm server -> {:?}", e);
return;
}
};
info!("Success!");
info!("Start reindex phase ...");
let Ok(mut qs_write) = qs.write(duration_from_epoch_now()).await else {
error!("Unable to acquire write transaction");
return;
};
let r = qs_write.reindex().and_then(|_| qs_write.commit());
match r {
Ok(_) => info!("Reindex Success!"),
Err(e) => {
error!("Restore failed: {:?}", e);
std::process::exit(1);
}
};
info!("✅ Restore Success!"); info!("✅ Restore Success!");
} }
pub async fn reindex_server_core(config: &Configuration) { pub async fn reindex_server_core(config: &Configuration) {
eprintln!("Start Index Phase 1 ..."); info!("Start Index Phase 1 ...");
// First, we provide the in-memory schema so that core attrs are indexed correctly. // First, we provide the in-memory schema so that core attrs are indexed correctly.
let schema = match Schema::new() { let schema = match Schema::new() {
Ok(s) => s, Ok(s) => s,
Err(e) => { Err(e) => {
eprintln!("Failed to setup in memory schema: {:?}", e); error!("Failed to setup in memory schema: {:?}", e);
std::process::exit(1); std::process::exit(1);
} }
}; };
@ -458,6 +433,12 @@ pub async fn reindex_server_core(config: &Configuration) {
} }
}; };
reindex_inner(be, schema, config).await;
info!("✅ Reindex Success!");
}
async fn reindex_inner(be: Backend, schema: Schema, config: &Configuration) {
// Reindex only the core schema attributes to bootstrap the process. // Reindex only the core schema attributes to bootstrap the process.
let mut be_wr_txn = match be.write() { let mut be_wr_txn = match be.write() {
Ok(txn) => txn, Ok(txn) => txn,
@ -469,16 +450,17 @@ pub async fn reindex_server_core(config: &Configuration) {
return; return;
} }
}; };
let r = be_wr_txn.reindex().and_then(|_| be_wr_txn.commit());
let r = be_wr_txn.reindex(true).and_then(|_| be_wr_txn.commit());
// Now that's done, setup a minimal qs and reindex from that. // Now that's done, setup a minimal qs and reindex from that.
if r.is_err() { if r.is_err() {
eprintln!("Failed to reindex database: {:?}", r); error!("Failed to reindex database: {:?}", r);
std::process::exit(1); std::process::exit(1);
} }
eprintln!("Index Phase 1 Success!"); info!("Index Phase 1 Success!");
eprintln!("Attempting to init query server ..."); info!("Attempting to init query server ...");
let (qs, _idms, _idms_delayed, _idms_audit) = match setup_qs_idms(be, schema, config).await { let (qs, _idms, _idms_delayed, _idms_audit) = match setup_qs_idms(be, schema, config).await {
Ok(t) => t, Ok(t) => t,
@ -487,20 +469,20 @@ pub async fn reindex_server_core(config: &Configuration) {
return; return;
} }
}; };
eprintln!("Init Query Server Success!"); info!("Init Query Server Success!");
eprintln!("Start Index Phase 2 ..."); info!("Start Index Phase 2 ...");
let Ok(mut qs_write) = qs.write(duration_from_epoch_now()).await else { let Ok(mut qs_write) = qs.write(duration_from_epoch_now()).await else {
error!("Unable to acquire write transaction"); error!("Unable to acquire write transaction");
return; return;
}; };
let r = qs_write.reindex().and_then(|_| qs_write.commit()); let r = qs_write.reindex(true).and_then(|_| qs_write.commit());
match r { match r {
Ok(_) => eprintln!("Index Phase 2 Success!"), Ok(_) => info!("Index Phase 2 Success!"),
Err(e) => { Err(e) => {
eprintln!("Reindex failed: {:?}", e); error!("Reindex failed: {:?}", e);
std::process::exit(1); std::process::exit(1);
} }
}; };

View file

@ -410,6 +410,8 @@ async fn start_daemon(
// HERE'S WHERE YOU CAN START USING THE LOGGER // HERE'S WHERE YOU CAN START USING THE LOGGER
// ************************************************ // ************************************************
info!(version = %env!("KANIDM_PKG_VERSION"), "Starting Kanidmd");
// guard which shuts down the logging/tracing providers when we close out // guard which shuts down the logging/tracing providers when we close out
let _otelguard = TracingPipelineGuard {}; let _otelguard = TracingPipelineGuard {};

View file

@ -1138,7 +1138,7 @@ impl IdlArcSqliteWriteTransaction<'_> {
/// specific situations. /// specific situations.
#[instrument(level = "trace", skip_all)] #[instrument(level = "trace", skip_all)]
pub fn danger_purge_idxs(&mut self) -> Result<(), OperationError> { pub fn danger_purge_idxs(&mut self) -> Result<(), OperationError> {
warn!("CLEARING CACHE"); debug!("CLEARING CACHE");
self.db.danger_purge_idxs().map(|()| { self.db.danger_purge_idxs().map(|()| {
self.idl_cache.clear(); self.idl_cache.clear();
self.name_cache.clear(); self.name_cache.clear();

View file

@ -1684,7 +1684,7 @@ impl<'a> BackendWriteTransaction<'a> {
let dbv = self.get_db_index_version()?; let dbv = self.get_db_index_version()?;
admin_debug!(?dbv, ?v, "upgrade_reindex"); admin_debug!(?dbv, ?v, "upgrade_reindex");
if dbv < v { if dbv < v {
self.reindex()?; self.reindex(false)?;
self.set_db_index_version(v) self.set_db_index_version(v)
} else { } else {
Ok(()) Ok(())
@ -1692,8 +1692,14 @@ impl<'a> BackendWriteTransaction<'a> {
} }
#[instrument(level = "info", skip_all)] #[instrument(level = "info", skip_all)]
pub fn reindex(&mut self) -> Result<(), OperationError> { pub fn reindex(&mut self, immediate: bool) -> Result<(), OperationError> {
limmediate_warning!("NOTICE: System reindex started\n"); let notice_immediate = immediate || (cfg!(not(test)) && cfg!(not(debug_assertions)));
info!(
immediate = notice_immediate,
"System reindex: started - this may take a long time!"
);
// Purge the idxs // Purge the idxs
self.idlayer.danger_purge_idxs()?; self.idlayer.danger_purge_idxs()?;
@ -1704,39 +1710,46 @@ impl<'a> BackendWriteTransaction<'a> {
// Future idea: Do this in batches of X amount to limit memory // Future idea: Do this in batches of X amount to limit memory
// consumption. // consumption.
let idl = IdList::AllIds; let idl = IdList::AllIds;
let entries = self.idlayer.get_identry(&idl).map_err(|e| { let entries = self.idlayer.get_identry(&idl).inspect_err(|err| {
admin_error!(err = ?e, "get_identry failure"); error!(?err, "get_identry failure");
e
})?; })?;
let mut count = 0; let mut count = 0;
// This is the longest phase of reindexing, so we have a "progress" display here.
entries entries
.iter() .iter()
.try_for_each(|e| { .try_for_each(|e| {
count += 1; if immediate {
if count % 2500 == 0 { count += 1;
limmediate_warning!("{}", count); if count % 2500 == 0 {
} else if count % 250 == 0 { eprint!("{}", count);
limmediate_warning!("."); } else if count % 250 == 0 {
eprint!(".");
}
} }
self.entry_index(None, Some(e)) self.entry_index(None, Some(e))
}) })
.map_err(|e| { .inspect_err(|err| {
admin_error!("reindex failed -> {:?}", e); error!(?err, "reindex failed");
e
})?; })?;
limmediate_warning!("done ✅: reindexed {} entries\n", count);
limmediate_warning!("Optimising Indexes ... "); if immediate {
eprintln!(" done ✅");
}
info!(immediate, "Reindexed {count} entries");
info!("Optimising Indexes: started");
self.idlayer.optimise_dirty_idls(); self.idlayer.optimise_dirty_idls();
limmediate_warning!("done ✅\n"); info!("Optimising Indexes: complete ✅");
limmediate_warning!("Calculating Index Optimisation Slopes ... "); info!("Calculating Index Optimisation Slopes: started");
self.idlayer.analyse_idx_slopes().map_err(|e| { self.idlayer.analyse_idx_slopes().inspect_err(|err| {
admin_error!(err = ?e, "index optimisation failed"); error!(?err, "index optimisation failed");
e
})?; })?;
limmediate_warning!("done ✅\n"); info!("Calculating Index Optimisation Slopes: complete ✅");
limmediate_warning!("NOTICE: System reindex complete\n"); info!("System reindex: complete 🎉");
Ok(()) Ok(())
} }
@ -1907,9 +1920,6 @@ impl<'a> BackendWriteTransaction<'a> {
info!("Restored {} entries", dbentries.len()); info!("Restored {} entries", dbentries.len());
// Reindex now we are loaded.
self.reindex()?;
let vr = self.verify(); let vr = self.verify();
if vr.is_empty() { if vr.is_empty() {
Ok(()) Ok(())
@ -2696,7 +2706,7 @@ mod tests {
// Add some test data? // Add some test data?
let missing = be.missing_idxs().unwrap(); let missing = be.missing_idxs().unwrap();
assert_eq!(missing.len(), 7); assert_eq!(missing.len(), 7);
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
let missing = be.missing_idxs().unwrap(); let missing = be.missing_idxs().unwrap();
debug!("{:?}", missing); debug!("{:?}", missing);
assert!(missing.is_empty()); assert!(missing.is_empty());
@ -2730,7 +2740,7 @@ mod tests {
// Check they are gone // Check they are gone
let missing = be.missing_idxs().unwrap(); let missing = be.missing_idxs().unwrap();
assert_eq!(missing.len(), 7); assert_eq!(missing.len(), 7);
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
let missing = be.missing_idxs().unwrap(); let missing = be.missing_idxs().unwrap();
debug!("{:?}", missing); debug!("{:?}", missing);
assert!(missing.is_empty()); assert!(missing.is_empty());
@ -2876,7 +2886,7 @@ mod tests {
fn test_be_index_create_delete_simple() { fn test_be_index_create_delete_simple() {
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
// First, setup our index tables! // First, setup our index tables!
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
// Test that on entry create, the indexes are made correctly. // Test that on entry create, the indexes are made correctly.
// this is a similar case to reindex. // this is a similar case to reindex.
let mut e1: Entry<EntryInit, EntryNew> = Entry::new(); let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -2979,7 +2989,7 @@ mod tests {
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
// delete multiple entries at a time, without deleting others // delete multiple entries at a time, without deleting others
// First, setup our index tables! // First, setup our index tables!
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
// Test that on entry create, the indexes are made correctly. // Test that on entry create, the indexes are made correctly.
// this is a similar case to reindex. // this is a similar case to reindex.
let mut e1: Entry<EntryInit, EntryNew> = Entry::new(); let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -3079,7 +3089,7 @@ mod tests {
#[test] #[test]
fn test_be_index_modify_simple() { fn test_be_index_modify_simple() {
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
// modify with one type, ensuring we clean the indexes behind // modify with one type, ensuring we clean the indexes behind
// us. For the test to be "accurate" we must add one attr, remove one attr // us. For the test to be "accurate" we must add one attr, remove one attr
// and change one attr. // and change one attr.
@ -3158,7 +3168,7 @@ mod tests {
#[test] #[test]
fn test_be_index_modify_rename() { fn test_be_index_modify_rename() {
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
// test when we change name AND uuid // test when we change name AND uuid
// This will be needing to be correct for conflicts when we add // This will be needing to be correct for conflicts when we add
// replication support! // replication support!
@ -3251,7 +3261,7 @@ mod tests {
#[test] #[test]
fn test_be_index_search_simple() { fn test_be_index_search_simple() {
run_test!(|be: &mut BackendWriteTransaction| { run_test!(|be: &mut BackendWriteTransaction| {
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
// Create a test entry with some indexed / unindexed values. // Create a test entry with some indexed / unindexed values.
let mut e1: Entry<EntryInit, EntryNew> = Entry::new(); let mut e1: Entry<EntryInit, EntryNew> = Entry::new();
@ -3622,7 +3632,7 @@ mod tests {
// Now check slope generation for the values. Today these are calculated // Now check slope generation for the values. Today these are calculated
// at reindex time, so we now perform the re-index. // at reindex time, so we now perform the re-index.
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
assert!(be.is_idx_slopeyness_generated().unwrap()); assert!(be.is_idx_slopeyness_generated().unwrap());
let ta_eq_slope = be let ta_eq_slope = be
@ -3733,7 +3743,7 @@ mod tests {
assert!(res.is_ok()); assert!(res.is_ok());
// --> This will shortcut due to indexing. // --> This will shortcut due to indexing.
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
let res = be.search(&lim_deny, &filt); let res = be.search(&lim_deny, &filt);
assert_eq!(res, Err(OperationError::ResourceLimit)); assert_eq!(res, Err(OperationError::ResourceLimit));
// we don't limit on exists because we never load the entries. // we don't limit on exists because we never load the entries.
@ -3774,7 +3784,7 @@ mod tests {
assert!(single_result.is_ok()); assert!(single_result.is_ok());
// Reindex so we have things in place for our query // Reindex so we have things in place for our query
assert!(be.reindex().is_ok()); assert!(be.reindex(false).is_ok());
// 🚨 This is evil! // 🚨 This is evil!
// The and allows us to hit "allids + indexed -> partial". // The and allows us to hit "allids + indexed -> partial".

View file

@ -291,8 +291,6 @@ where
.map(|u| f_eq(Attribute::Uuid, PartialValue::Uuid(u))) .map(|u| f_eq(Attribute::Uuid, PartialValue::Uuid(u)))
.collect(); .collect();
println!("{:?}", conditions);
let f = filter!(f_or(conditions)); let f = filter!(f_or(conditions));
let entries = qs.internal_search(f).map_err(|e| { let entries = qs.internal_search(f).map_err(|e| {
@ -300,8 +298,6 @@ where
e e
})?; })?;
println!("{}", entries.len());
let mut groups = vec![]; let mut groups = vec![];
let mut unix_groups = Group::<Unix>::try_from_entry(value) let mut unix_groups = Group::<Unix>::try_from_entry(value)
.ok() .ok()

View file

@ -652,12 +652,6 @@ macro_rules! vs_cid {
}); });
} }
macro_rules! limmediate_warning {
($($arg:tt)*) => ({
eprint!($($arg)*)
})
}
macro_rules! str_concat { macro_rules! str_concat {
($str_iter:expr, $join_char:expr) => {{ ($str_iter:expr, $join_char:expr) => {{
use itertools::Itertools; use itertools::Itertools;

View file

@ -623,7 +623,7 @@ impl QueryServerWriteTransaction<'_> {
// We have to reindex to force all the existing indexes to be dumped // We have to reindex to force all the existing indexes to be dumped
// and recreated before we start to import. // and recreated before we start to import.
self.reindex().inspect_err(|err| { self.reindex(false).inspect_err(|err| {
error!(?err, "Failed to reload schema"); error!(?err, "Failed to reload schema");
})?; })?;

View file

@ -18,9 +18,6 @@ fn repl_initialise(
// First, build the refresh context. // First, build the refresh context.
let refresh_context = from.supplier_provide_refresh()?; let refresh_context = from.supplier_provide_refresh()?;
// Verify content of the refresh
// eprintln!("{:#?}", refresh_context);
// Apply it to the server // Apply it to the server
to.consumer_apply_refresh(refresh_context)?; to.consumer_apply_refresh(refresh_context)?;

View file

@ -58,7 +58,7 @@ impl QueryServer {
// sure that some base IDM operations are fast. Since this is still // sure that some base IDM operations are fast. Since this is still
// very early in the bootstrap process, and very few entries exist, // very early in the bootstrap process, and very few entries exist,
// reindexing is very fast here. // reindexing is very fast here.
write_txn.reindex()?; write_txn.reindex(false)?;
} else { } else {
// Domain info was present, so we need to reflect that in our server // Domain info was present, so we need to reflect that in our server
// domain structures. If we don't do this, the in memory domain level // domain structures. If we don't do this, the in memory domain level
@ -184,7 +184,7 @@ impl QueryServer {
write_txn.reload()?; write_txn.reload()?;
// We are not yet at the schema phase where reindexes will auto-trigger // We are not yet at the schema phase where reindexes will auto-trigger
// so if one was required, do it now. // so if one was required, do it now.
write_txn.reindex()?; write_txn.reindex(false)?;
} }
// Now set the db/domain devel taint flag to match our current release status // Now set the db/domain devel taint flag to match our current release status

View file

@ -2431,11 +2431,11 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.internal_modify(&filt, &modl) self.internal_modify(&filt, &modl)
} }
pub fn reindex(&mut self) -> Result<(), OperationError> { pub fn reindex(&mut self, immediate: bool) -> Result<(), OperationError> {
// initiate a be reindex here. This could have been from first run checking // initiate a be reindex here. This could have been from first run checking
// the versions, or it could just be from the cli where an admin needs to do an // the versions, or it could just be from the cli where an admin needs to do an
// indexing. // indexing.
self.be_txn.reindex() self.be_txn.reindex(immediate)
} }
fn force_schema_reload(&mut self) { fn force_schema_reload(&mut self) {
@ -2491,7 +2491,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
// operational, then a reindex may be required. After the reindex, the schema // operational, then a reindex may be required. After the reindex, the schema
// must also be reloaded so that slope optimisation indexes are loaded correctly. // must also be reloaded so that slope optimisation indexes are loaded correctly.
if *self.phase >= ServerPhase::Running { if *self.phase >= ServerPhase::Running {
self.reindex()?; self.reindex(false)?;
self.reload_schema()?; self.reload_schema()?;
} }
} }

View file

@ -60,16 +60,14 @@ pub fn has_trailer(contents: &Vec<u8>) -> Result<bool, ImageValidationError> {
)) ))
} else if (eoi_index + 2) < buf.len() { } else if (eoi_index + 2) < buf.len() {
// there's still bytes in the buffer after the EOI magic bytes // there's still bytes in the buffer after the EOI magic bytes
#[cfg(any(test, debug_assertions))] debug!(
println!(
"we're at pos: {} and buf len is {}, is not OK", "we're at pos: {} and buf len is {}, is not OK",
eoi_index, eoi_index,
buf.len() buf.len()
); );
Ok(true) Ok(true)
} else { } else {
#[cfg(any(test, debug_assertions))] debug!(
println!(
"we're at pos: {} and buf len is {}, is OK", "we're at pos: {} and buf len is {}, is OK",
eoi_index, eoi_index,
buf.len() buf.len()