Clippy Lints (#3255)

This commit is contained in:
Firstyear 2024-11-30 16:13:26 +10:00 committed by GitHub
parent c1ed939c28
commit db101e6d26
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
33 changed files with 160 additions and 165 deletions

View file

@ -215,7 +215,7 @@ mod tests {
struct TestBVisitor; struct TestBVisitor;
impl<'de> Visitor<'de> for TestBVisitor { impl Visitor<'_> for TestBVisitor {
type Value = TestB; type Value = TestB;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {

View file

@ -385,7 +385,7 @@ pub trait IdlArcSqliteTransaction {
fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError>; fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError>;
} }
impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> { impl IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'_> {
fn get_identry( fn get_identry(
&mut self, &mut self,
idl: &IdList, idl: &IdList,
@ -480,7 +480,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
} }
} }
impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> { impl IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'_> {
fn get_identry( fn get_identry(
&mut self, &mut self,
idl: &IdList, idl: &IdList,
@ -578,7 +578,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
} }
} }
impl<'a> IdlArcSqliteWriteTransaction<'a> { impl IdlArcSqliteWriteTransaction<'_> {
#[cfg(any(test, debug_assertions))] #[cfg(any(test, debug_assertions))]
#[instrument(level = "debug", name = "idl_arc_sqlite::clear_cache", skip_all)] #[instrument(level = "debug", name = "idl_arc_sqlite::clear_cache", skip_all)]
pub fn clear_cache(&mut self) -> Result<(), OperationError> { pub fn clear_cache(&mut self) -> Result<(), OperationError> {

View file

@ -43,7 +43,7 @@ pub trait IdxKeyToRef {
fn keyref(&self) -> IdxKeyRef<'_>; fn keyref(&self) -> IdxKeyRef<'_>;
} }
impl<'a> IdxKeyToRef for IdxKeyRef<'a> { impl IdxKeyToRef for IdxKeyRef<'_> {
fn keyref(&self) -> IdxKeyRef<'_> { fn keyref(&self) -> IdxKeyRef<'_> {
// Copy the self. // Copy the self.
*self *self
@ -65,15 +65,15 @@ impl<'a> Borrow<dyn IdxKeyToRef + 'a> for IdxKey {
} }
} }
impl<'a> PartialEq for (dyn IdxKeyToRef + 'a) { impl PartialEq for (dyn IdxKeyToRef + '_) {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.keyref().eq(&other.keyref()) self.keyref().eq(&other.keyref())
} }
} }
impl<'a> Eq for (dyn IdxKeyToRef + 'a) {} impl Eq for (dyn IdxKeyToRef + '_) {}
impl<'a> Hash for (dyn IdxKeyToRef + 'a) { impl Hash for (dyn IdxKeyToRef + '_) {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.keyref().hash(state) self.keyref().hash(state)
} }
@ -107,7 +107,7 @@ pub trait IdlCacheKeyToRef {
fn keyref(&self) -> IdlCacheKeyRef<'_>; fn keyref(&self) -> IdlCacheKeyRef<'_>;
} }
impl<'a> IdlCacheKeyToRef for IdlCacheKeyRef<'a> { impl IdlCacheKeyToRef for IdlCacheKeyRef<'_> {
fn keyref(&self) -> IdlCacheKeyRef<'_> { fn keyref(&self) -> IdlCacheKeyRef<'_> {
// Copy the self // Copy the self
*self *self
@ -130,27 +130,27 @@ impl<'a> Borrow<dyn IdlCacheKeyToRef + 'a> for IdlCacheKey {
} }
} }
impl<'a> PartialEq for (dyn IdlCacheKeyToRef + 'a) { impl PartialEq for (dyn IdlCacheKeyToRef + '_) {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.keyref().eq(&other.keyref()) self.keyref().eq(&other.keyref())
} }
} }
impl<'a> Eq for (dyn IdlCacheKeyToRef + 'a) {} impl Eq for (dyn IdlCacheKeyToRef + '_) {}
impl<'a> Hash for (dyn IdlCacheKeyToRef + 'a) { impl Hash for (dyn IdlCacheKeyToRef + '_) {
fn hash<H: Hasher>(&self, state: &mut H) { fn hash<H: Hasher>(&self, state: &mut H) {
self.keyref().hash(state) self.keyref().hash(state)
} }
} }
impl<'a> PartialOrd for (dyn IdlCacheKeyToRef + 'a) { impl PartialOrd for (dyn IdlCacheKeyToRef + '_) {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(&other.keyref())) Some(self.cmp(&other.keyref()))
} }
} }
impl<'a> Ord for (dyn IdlCacheKeyToRef + 'a) { impl Ord for (dyn IdlCacheKeyToRef + '_) {
fn cmp(&self, other: &Self) -> Ordering { fn cmp(&self, other: &Self) -> Ordering {
self.keyref().cmp(&other.keyref()) self.keyref().cmp(&other.keyref())
} }

View file

@ -33,7 +33,7 @@ pub enum KeyHandle {
}, },
} }
impl<'a> BackendWriteTransaction<'a> { impl BackendWriteTransaction<'_> {
/// Retrieve a key stored in the database by it's key handle. This /// Retrieve a key stored in the database by it's key handle. This
/// handle may require further processing for the key to be usable /// handle may require further processing for the key to be usable
/// in higher level contexts as this is simply the storage layer /// in higher level contexts as this is simply the storage layer
@ -55,7 +55,7 @@ impl<'a> BackendWriteTransaction<'a> {
} }
} }
impl<'a> IdlArcSqliteWriteTransaction<'a> { impl IdlArcSqliteWriteTransaction<'_> {
pub(crate) fn get_key_handle( pub(crate) fn get_key_handle(
&mut self, &mut self,
handle: KeyHandleId, handle: KeyHandleId,

View file

@ -173,9 +173,9 @@ pub struct BackendReadTransaction<'a> {
ruv: ReplicationUpdateVectorReadTransaction<'a>, ruv: ReplicationUpdateVectorReadTransaction<'a>,
} }
unsafe impl<'a> Sync for BackendReadTransaction<'a> {} unsafe impl Sync for BackendReadTransaction<'_> {}
unsafe impl<'a> Send for BackendReadTransaction<'a> {} unsafe impl Send for BackendReadTransaction<'_> {}
pub struct BackendWriteTransaction<'a> { pub struct BackendWriteTransaction<'a> {
idlayer: IdlArcSqliteWriteTransaction<'a>, idlayer: IdlArcSqliteWriteTransaction<'a>,
@ -1009,7 +1009,7 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> {
} }
} }
impl<'a> BackendReadTransaction<'a> { impl BackendReadTransaction<'_> {
pub fn list_indexes(&mut self) -> Result<Vec<String>, OperationError> { pub fn list_indexes(&mut self) -> Result<Vec<String>, OperationError> {
self.get_idlayer().list_idxs() self.get_idlayer().list_idxs()
} }

View file

@ -1,62 +1,65 @@
use std::time::Duration; //! Represents a temporary denial of the credential to authenticate. This is used
//! to ratelimit and prevent bruteforcing of accounts. At an initial failure the
//! SoftLock is created and the count set to 1, with a unlock_at set to 1 second
//! later, and a reset_count_at: at a maximum time window for a cycle.
//!
//! If the softlock already exists, and the failure count is 0, then this acts as the
//! creation where the reset_count_at window is then set.
//!
//! While current_time < unlock_at, all authentication attempts are denied with a
//! message regarding the account being temporarily unavailable. Once
//! unlock_at < current_time, authentication will be processed again. If a subsequent
//! failure occurs, unlock_at is extended based on policy, and failure_count incremented.
//!
//! If unlock_at < current_time, and authentication succeeds the login is allowed
//! and no changes to failure_count or unlock_at are made.
//!
//! If reset_count_at < current_time, then failure_count is reset to 0 before processing.
//!
//! This allows handling of max_failure_count, so that when that value from policy is
//! exceeded then unlock_at is set to reset_count_at to softlock until the cycle
//! is over (see NIST sp800-63b.). For example, reset_count_at will be 24 hours after
//! the first failed authentication attempt.
//!
//! This also works for something like TOTP which allows a 60 second cycle for the
//! reset_count_at and a max number of attempts in that window (say 5). with short
//! delays in between (1 second).
//!
//! ```
//!
//! ┌────────────────────────┐
//! │reset_at < current_time │
//! ─└────────────────────────┘
//! │ │
//! ▼
//! ┌─────┐ .─────. ┌────┐ │
//! │Valid│ ╲ │Fail│
//! ┌────┴─────┴───────────────────────(count = 0)─────┴────┴┐ │
//! │ `. ,' │
//! │ `───' │ │
//! │ ┌────────────────────────┐▲ │
//! │ │reset_at < current_time │ │ │
//! │ └────────────────────────┘│ │
//! │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ │ │
//! │ │
//! │ ├─────┬───────┬──┐ ▼ │
//! │ │ │ Fail │ │ .─────.
//! │ │ │count++│ │ ,' `. │
//! ▼ .─────. └───────┘ │ ; Locked :
//! ┌────────────┐ ╲ └─────────▶: count > 0 ;◀─┤
//! │Auth Success│◀─┬─────┬──(Unlocked ) ╲
//! └────────────┘ │Valid│ `. ,' `. ,' │
//! └─────┘ `───' `───' │
//! ▲ │ │
//! │ │ │
//! └─────┬──────────────────────────┬┴┬───────┴──────────────────┐
//! │ expire_at < current_time │ │ current_time < expire_at │
//! └──────────────────────────┘ └──────────────────────────┘
//!
//! ```
//!
/// Represents a temporary denial of the credential to authenticate. This is used use std::time::Duration;
/// to ratelimit and prevent bruteforcing of accounts. At an initial failure the
/// SoftLock is created and the count set to 1, with a unlock_at set to 1 second
/// later, and a reset_count_at: at a maximum time window for a cycle.
///
/// If the softlock already exists, and the failure count is 0, then this acts as the
/// creation where the reset_count_at window is then set.
///
/// While current_time < unlock_at, all authentication attempts are denied with a
/// message regarding the account being temporarily unavailable. Once
/// unlock_at < current_time, authentication will be processed again. If a subsequent
/// failure occurs, unlock_at is extended based on policy, and failure_count incremented.
///
/// If unlock_at < current_time, and authentication succeeds the login is allowed
/// and no changes to failure_count or unlock_at are made.
///
/// If reset_count_at < current_time, then failure_count is reset to 0 before processing.
///
/// This allows handling of max_failure_count, so that when that value from policy is
/// exceeded then unlock_at is set to reset_count_at to softlock until the cycle
/// is over (see NIST sp800-63b.). For example, reset_count_at will be 24 hours after
/// the first failed authentication attempt.
///
/// This also works for something like TOTP which allows a 60 second cycle for the
/// reset_count_at and a max number of attempts in that window (say 5). with short
/// delays in between (1 second).
//
// ┌────────────────────────┐
// │reset_at < current_time │
// ─└────────────────────────┘
// │ │
// ▼
// ┌─────┐ .─────. ┌────┐ │
// │Valid│ ╲ │Fail│
// ┌────┴─────┴───────────────────────(count = 0)─────┴────┴┐ │
// │ `. ,' │
// │ `───' │ │
// │ ┌────────────────────────┐▲ │
// │ │reset_at < current_time │ │ │
// │ └────────────────────────┘│ │
// │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ │ │
// │ │
// │ ├─────┬───────┬──┐ ▼ │
// │ │ │ Fail │ │ .─────.
// │ │ │count++│ │ ,' `. │
// ▼ .─────. └───────┘ │ ; Locked :
// ┌────────────┐ ╲ └─────────▶: count > 0 ;◀─┤
// │Auth Success│◀─┬─────┬──(Unlocked ) ╲
// └────────────┘ │Valid│ `. ,' `. ,' │
// └─────┘ `───' `───' │
// ▲ │ │
// │ │ │
// └─────┬──────────────────────────┬┴┬───────┴──────────────────┐
// │ expire_at < current_time │ │ current_time < expire_at │
// └──────────────────────────┘ └──────────────────────────┘
//
//
const ONEDAY: u64 = 86400; const ONEDAY: u64 = 86400;

View file

@ -846,7 +846,7 @@ impl DestroySessionTokenEvent {
} }
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
pub fn account_destroy_session_token( pub fn account_destroy_session_token(
&mut self, &mut self,
dte: &DestroySessionTokenEvent, dte: &DestroySessionTokenEvent,
@ -973,7 +973,7 @@ pub struct ListUserAuthTokenEvent {
pub target: Uuid, pub target: Uuid,
} }
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
pub fn account_list_user_auth_tokens( pub fn account_list_user_auth_tokens(
&mut self, &mut self,
lte: &ListUserAuthTokenEvent, lte: &ListUserAuthTokenEvent,

View file

@ -62,7 +62,7 @@ pub struct LdapApplicationsWriteTransaction<'a> {
inner: CowCellWriteTxn<'a, LdapApplicationsInner>, inner: CowCellWriteTxn<'a, LdapApplicationsInner>,
} }
impl<'a> LdapApplicationsWriteTransaction<'a> { impl LdapApplicationsWriteTransaction<'_> {
pub fn reload(&mut self, value: Vec<Arc<EntrySealedCommitted>>) -> Result<(), OperationError> { pub fn reload(&mut self, value: Vec<Arc<EntrySealedCommitted>>) -> Result<(), OperationError> {
let app_set: Result<HashMap<_, _>, _> = value let app_set: Result<HashMap<_, _>, _> = value
.into_iter() .into_iter()

View file

@ -2,7 +2,7 @@ use crate::idm::server::IdmServerProxyReadTransaction;
use crate::prelude::*; use crate::prelude::*;
use kanidm_proto::internal::AppLink; use kanidm_proto::internal::AppLink;
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
pub fn list_applinks(&mut self, ident: &Identity) -> Result<Vec<AppLink>, OperationError> { pub fn list_applinks(&mut self, ident: &Identity) -> Result<Vec<AppLink>, OperationError> {
// From the member-of of the ident. // From the member-of of the ident.
let Some(ident_mo) = ident.get_memberof() else { let Some(ident_mo) = ident.get_memberof() else {

View file

@ -520,7 +520,7 @@ impl InitCredentialUpdateEvent {
} }
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
fn validate_init_credential_update( fn validate_init_credential_update(
&mut self, &mut self,
target: Uuid, target: Uuid,
@ -1546,7 +1546,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
} }
} }
impl<'a> IdmServerCredUpdateTransaction<'a> { impl IdmServerCredUpdateTransaction<'_> {
#[cfg(test)] #[cfg(test)]
pub fn get_origin(&self) -> &Url { pub fn get_origin(&self) -> &Url {
&self.webauthn.get_allowed_origins()[0] &self.webauthn.get_allowed_origins()[0]

View file

@ -54,7 +54,7 @@ impl IdentifyUserSubmitCodeEvent {
} }
} }
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
pub fn handle_identify_user_start( pub fn handle_identify_user_start(
&mut self, &mut self,
IdentifyUserStartEvent { target, ident }: &IdentifyUserStartEvent, IdentifyUserStartEvent { target, ident }: &IdentifyUserStartEvent,

View file

@ -461,7 +461,7 @@ impl Oauth2ResourceServers {
} }
} }
impl<'a> Oauth2ResourceServersWriteTransaction<'a> { impl Oauth2ResourceServersWriteTransaction<'_> {
pub fn reload( pub fn reload(
&mut self, &mut self,
value: Vec<Arc<EntrySealedCommitted>>, value: Vec<Arc<EntrySealedCommitted>>,
@ -801,7 +801,7 @@ impl<'a> Oauth2ResourceServersWriteTransaction<'a> {
} }
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
#[instrument(level = "debug", skip_all)] #[instrument(level = "debug", skip_all)]
pub fn oauth2_token_revoke( pub fn oauth2_token_revoke(
&mut self, &mut self,
@ -1789,7 +1789,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
} }
} }
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
#[instrument(level = "debug", skip_all)] #[instrument(level = "debug", skip_all)]
pub fn check_oauth2_authorisation( pub fn check_oauth2_authorisation(
&self, &self,

View file

@ -114,7 +114,7 @@ impl GenerateScimSyncTokenEvent {
} }
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
pub fn scim_sync_generate_token( pub fn scim_sync_generate_token(
&mut self, &mut self,
gte: &GenerateScimSyncTokenEvent, gte: &GenerateScimSyncTokenEvent,
@ -234,7 +234,7 @@ pub struct ScimSyncFinaliseEvent {
pub target: Uuid, pub target: Uuid,
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
pub fn scim_sync_finalise( pub fn scim_sync_finalise(
&mut self, &mut self,
sfe: &ScimSyncFinaliseEvent, sfe: &ScimSyncFinaliseEvent,
@ -364,7 +364,7 @@ pub struct ScimSyncTerminateEvent {
pub target: Uuid, pub target: Uuid,
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
pub fn scim_sync_terminate( pub fn scim_sync_terminate(
&mut self, &mut self,
ste: &ScimSyncTerminateEvent, ste: &ScimSyncTerminateEvent,
@ -497,7 +497,7 @@ pub struct ScimSyncUpdateEvent {
pub ident: Identity, pub ident: Identity,
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
#[instrument(level = "info", skip_all)] #[instrument(level = "info", skip_all)]
pub fn scim_sync_apply( pub fn scim_sync_apply(
&mut self, &mut self,
@ -1464,7 +1464,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
} }
} }
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
pub fn scim_sync_get_state( pub fn scim_sync_get_state(
&mut self, &mut self,
ident: &Identity, ident: &Identity,

View file

@ -1521,7 +1521,7 @@ fn gen_password_upgrade_mod(
} }
} }
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
pub fn jws_public_jwk(&mut self, key_id: &str) -> Result<Jwk, OperationError> { pub fn jws_public_jwk(&mut self, key_id: &str) -> Result<Jwk, OperationError> {
self.qs_read self.qs_read
.get_key_providers() .get_key_providers()
@ -1628,7 +1628,7 @@ impl<'a> IdmServerTransaction<'a> for IdmServerProxyWriteTransaction<'a> {
} }
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
pub(crate) fn crypto_policy(&self) -> &CryptoPolicy { pub(crate) fn crypto_policy(&self) -> &CryptoPolicy {
self.crypto_policy self.crypto_policy
} }
@ -4223,7 +4223,7 @@ mod tests {
.await; .await;
assert!(result.is_ok()); assert!(result.is_ok());
if let Some(_) = expected { if expected.is_some() {
assert!(result.unwrap().is_some()); assert!(result.unwrap().is_some());
} else { } else {
assert!(result.unwrap().is_none()); assert!(result.unwrap().is_none());

View file

@ -174,7 +174,7 @@ impl DestroyApiTokenEvent {
} }
} }
impl<'a> IdmServerProxyWriteTransaction<'a> { impl IdmServerProxyWriteTransaction<'_> {
pub fn service_account_generate_api_token( pub fn service_account_generate_api_token(
&mut self, &mut self,
gte: &GenerateApiTokenEvent, gte: &GenerateApiTokenEvent,
@ -353,7 +353,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
} }
} }
impl<'a> IdmServerProxyReadTransaction<'a> { impl IdmServerProxyReadTransaction<'_> {
pub fn service_account_list_api_token( pub fn service_account_list_api_token(
&mut self, &mut self,
lte: &ListApiTokenEvent, lte: &ListApiTokenEvent,

View file

@ -70,7 +70,6 @@ fn generate_domain_cookie_key() -> Value {
impl Domain { impl Domain {
/// Generates the cookie key for the domain. /// Generates the cookie key for the domain.
fn modify_inner<T: Clone + std::fmt::Debug>( fn modify_inner<T: Clone + std::fmt::Debug>(
qs: &mut QueryServerWriteTransaction, qs: &mut QueryServerWriteTransaction,
cand: &mut [Entry<EntryInvalid, T>], cand: &mut [Entry<EntryInvalid, T>],

View file

@ -9,46 +9,46 @@ use crate::plugins::Plugin;
use crate::prelude::*; use crate::prelude::*;
use crate::utils::uuid_to_gid_u32; use crate::utils::uuid_to_gid_u32;
/// Systemd dynamic units allocate between 6118465519, most distros allocate // Systemd dynamic units allocate between 6118465519, most distros allocate
/// system uids from 0 - 1000, and many others give user ids between 1000 to // system uids from 0 - 1000, and many others give user ids between 1000 to
/// 2000. This whole numberspace is cursed, lets assume it's not ours. :( // 2000. This whole numberspace is cursed, lets assume it's not ours. :(
/// //
/// Per <https://systemd.io/UIDS-GIDS/>, systemd claims a huge chunk of this // Per <https://systemd.io/UIDS-GIDS/>, systemd claims a huge chunk of this
/// space to itself. As a result we can't allocate between 65536 and u32 max // space to itself. As a result we can't allocate between 65536 and u32 max
/// because systemd takes most of the usable range for its own containers, // because systemd takes most of the usable range for its own containers,
/// and half the range is probably going to trigger linux kernel issues. // and half the range is probably going to trigger linux kernel issues.
/// //
/// Seriously, linux's uid/gid model is so fundamentally terrible... Windows // Seriously, linux's uid/gid model is so fundamentally terrible... Windows
/// NT got this right with SIDs. // NT got this right with SIDs.
/// //
/// Because of this, we have to ensure that anything we allocate is in the // Because of this, we have to ensure that anything we allocate is in the
/// range 1879048192 (0x70000000) to 2147483647 (0x7fffffff) // range 1879048192 (0x70000000) to 2147483647 (0x7fffffff)
const GID_SYSTEM_NUMBER_PREFIX: u32 = 0x7000_0000; const GID_SYSTEM_NUMBER_PREFIX: u32 = 0x7000_0000;
const GID_SYSTEM_NUMBER_MASK: u32 = 0x0fff_ffff; const GID_SYSTEM_NUMBER_MASK: u32 = 0x0fff_ffff;
/// Systemd claims so many ranges to itself, we have to check we are in certain bounds. // Systemd claims so many ranges to itself, we have to check we are in certain bounds.
//
/// This is the normal system range, we MUST NOT allow it to be allocated. // This is the normal system range, we MUST NOT allow it to be allocated.
pub const GID_REGULAR_USER_MIN: u32 = 1000; pub const GID_REGULAR_USER_MIN: u32 = 1000;
pub const GID_REGULAR_USER_MAX: u32 = 60000; pub const GID_REGULAR_USER_MAX: u32 = 60000;
/// Systemd homed claims 60001 through 60577 // Systemd homed claims 60001 through 60577
pub const GID_UNUSED_A_MIN: u32 = 60578; pub const GID_UNUSED_A_MIN: u32 = 60578;
pub const GID_UNUSED_A_MAX: u32 = 61183; pub const GID_UNUSED_A_MAX: u32 = 61183;
/// Systemd dyn service users 61184 through 65519 // Systemd dyn service users 61184 through 65519
pub const GID_UNUSED_B_MIN: u32 = 65520; pub const GID_UNUSED_B_MIN: u32 = 65520;
pub const GID_UNUSED_B_MAX: u32 = 65533; pub const GID_UNUSED_B_MAX: u32 = 65533;
/// nobody is 65534 // nobody is 65534
/// 16bit uid -1 65535 // 16bit uid -1 65535
pub const GID_UNUSED_C_MIN: u32 = 65536; pub const GID_UNUSED_C_MIN: u32 = 65536;
const GID_UNUSED_C_MAX: u32 = 524287; const GID_UNUSED_C_MAX: u32 = 524287;
/// systemd claims 524288 through 1879048191 for nspawn // systemd claims 524288 through 1879048191 for nspawn
const GID_NSPAWN_MIN: u32 = 524288; const GID_NSPAWN_MIN: u32 = 524288;
const GID_NSPAWN_MAX: u32 = 1879048191; const GID_NSPAWN_MAX: u32 = 1879048191;
@ -56,8 +56,8 @@ const GID_NSPAWN_MAX: u32 = 1879048191;
const GID_UNUSED_D_MIN: u32 = 0x7000_0000; const GID_UNUSED_D_MIN: u32 = 0x7000_0000;
pub const GID_UNUSED_D_MAX: u32 = 0x7fff_ffff; pub const GID_UNUSED_D_MAX: u32 = 0x7fff_ffff;
/// Anything above 2147483648 can confuse the kernel (so basically half the address space // Anything above 2147483648 can confuse the kernel (so basically half the address space
/// can't be accessed. // can't be accessed.
// const GID_UNSAFE_MAX: u32 = 2147483648; // const GID_UNSAFE_MAX: u32 = 2147483648;
pub struct GidNumber {} pub struct GidNumber {}

View file

@ -5,7 +5,7 @@ use crate::server::ChangeFlag;
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc; use std::sync::Arc;
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
// Apply the state changes if they are valid. // Apply the state changes if they are valid.
fn consumer_incremental_apply_entries( fn consumer_incremental_apply_entries(

View file

@ -236,7 +236,7 @@ pub struct ReplicationUpdateVectorWriteTransaction<'a> {
ranged: BptreeMapWriteTxn<'a, Uuid, BTreeSet<Duration>>, ranged: BptreeMapWriteTxn<'a, Uuid, BTreeSet<Duration>>,
} }
impl<'a> fmt::Debug for ReplicationUpdateVectorWriteTransaction<'a> { impl fmt::Debug for ReplicationUpdateVectorWriteTransaction<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "RUV DATA DUMP")?; writeln!(f, "RUV DATA DUMP")?;
self.data self.data
@ -255,7 +255,7 @@ pub struct ReplicationUpdateVectorReadTransaction<'a> {
ranged: BptreeMapReadTxn<'a, Uuid, BTreeSet<Duration>>, ranged: BptreeMapReadTxn<'a, Uuid, BTreeSet<Duration>>,
} }
impl<'a> fmt::Debug for ReplicationUpdateVectorReadTransaction<'a> { impl fmt::Debug for ReplicationUpdateVectorReadTransaction<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(f, "RUV DATA DUMP")?; writeln!(f, "RUV DATA DUMP")?;
self.data self.data
@ -540,7 +540,7 @@ pub trait ReplicationUpdateVectorTransaction {
} }
} }
impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTransaction<'a> { impl ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTransaction<'_> {
fn ruv_snapshot(&self) -> BptreeMapReadSnapshot<'_, Cid, IDLBitRange> { fn ruv_snapshot(&self) -> BptreeMapReadSnapshot<'_, Cid, IDLBitRange> {
self.data.to_snapshot() self.data.to_snapshot()
} }
@ -550,7 +550,7 @@ impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTran
} }
} }
impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTransaction<'a> { impl ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTransaction<'_> {
fn ruv_snapshot(&self) -> BptreeMapReadSnapshot<'_, Cid, IDLBitRange> { fn ruv_snapshot(&self) -> BptreeMapReadSnapshot<'_, Cid, IDLBitRange> {
self.data.to_snapshot() self.data.to_snapshot()
} }
@ -560,7 +560,7 @@ impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTrans
} }
} }
impl<'a> ReplicationUpdateVectorWriteTransaction<'a> { impl ReplicationUpdateVectorWriteTransaction<'_> {
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.added = None; self.added = None;
self.data.clear(); self.data.clear();

View file

@ -9,7 +9,7 @@ use crate::be::keystorage::{KeyHandle, KeyHandleId};
use kanidm_lib_crypto::mtls::build_self_signed_server_and_client_identity; use kanidm_lib_crypto::mtls::build_self_signed_server_and_client_identity;
use kanidm_lib_crypto::prelude::{PKey, Private, X509}; use kanidm_lib_crypto::prelude::{PKey, Private, X509};
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
fn supplier_generate_key_cert( fn supplier_generate_key_cert(
&mut self, &mut self,
domain_name: &str, domain_name: &str,
@ -80,7 +80,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
} }
} }
impl<'a> QueryServerReadTransaction<'a> { impl QueryServerReadTransaction<'_> {
// Given a consumers state, calculate the differential of changes they // Given a consumers state, calculate the differential of changes they
// need to be sent to bring them to the equivalent state. // need to be sent to bring them to the equivalent state.

View file

@ -759,7 +759,7 @@ pub trait SchemaTransaction {
} }
} }
impl<'a> SchemaWriteTransaction<'a> { impl SchemaWriteTransaction<'_> {
// Schema probably needs to be part of the backend, so that commits are wholly atomic // Schema probably needs to be part of the backend, so that commits are wholly atomic
// but in the current design, we need to open be first, then schema, but we have to commit be // but in the current design, we need to open be first, then schema, but we have to commit be
// first, then schema to ensure that the be content matches our schema. Saying this, if your // first, then schema to ensure that the be content matches our schema. Saying this, if your
@ -2241,7 +2241,7 @@ impl<'a> SchemaWriteTransaction<'a> {
} }
} }
impl<'a> SchemaTransaction for SchemaWriteTransaction<'a> { impl SchemaTransaction for SchemaWriteTransaction<'_> {
fn get_attributes_unique(&self) -> &Vec<Attribute> { fn get_attributes_unique(&self) -> &Vec<Attribute> {
&self.unique_cache &self.unique_cache
} }

View file

@ -891,7 +891,7 @@ pub struct AccessControlsWriteTransaction<'a> {
acp_resolve_filter_cache: Cell<ResolveFilterCacheReadTxn<'a>>, acp_resolve_filter_cache: Cell<ResolveFilterCacheReadTxn<'a>>,
} }
impl<'a> AccessControlsWriteTransaction<'a> { impl AccessControlsWriteTransaction<'_> {
// We have a method to update each set, so that if an error // We have a method to update each set, so that if an error
// occurs we KNOW it's an error, rather than using errors as // occurs we KNOW it's an error, rather than using errors as
// part of the logic (IE try-parse-fail method). // part of the logic (IE try-parse-fail method).
@ -983,9 +983,9 @@ pub struct AccessControlsReadTransaction<'a> {
acp_resolve_filter_cache: Cell<ResolveFilterCacheReadTxn<'a>>, acp_resolve_filter_cache: Cell<ResolveFilterCacheReadTxn<'a>>,
} }
unsafe impl<'a> Sync for AccessControlsReadTransaction<'a> {} unsafe impl Sync for AccessControlsReadTransaction<'_> {}
unsafe impl<'a> Send for AccessControlsReadTransaction<'a> {} unsafe impl Send for AccessControlsReadTransaction<'_> {}
impl<'a> AccessControlsTransaction<'a> for AccessControlsReadTransaction<'a> { impl<'a> AccessControlsTransaction<'a> for AccessControlsReadTransaction<'a> {
fn get_search(&self) -> &Vec<AccessControlSearch> { fn get_search(&self) -> &Vec<AccessControlSearch> {

View file

@ -10,7 +10,7 @@ pub struct BatchModifyEvent {
pub modset: ModSetValid, pub modset: ModSetValid,
} }
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
/// This function behaves different to modify. Modify applies the same /// This function behaves different to modify. Modify applies the same
/// modification operation en-mass to 1 -> N entries. This takes a set of modifications /// modification operation en-mass to 1 -> N entries. This takes a set of modifications
/// that define a precise entry to apply a change to and only modifies that. /// that define a precise entry to apply a change to and only modifies that.

View file

@ -2,7 +2,7 @@ use crate::prelude::*;
use crate::server::CreateEvent; use crate::server::CreateEvent;
use crate::server::{ChangeFlag, Plugins}; use crate::server::{ChangeFlag, Plugins};
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
#[instrument(level = "debug", skip_all)] #[instrument(level = "debug", skip_all)]
/// The create event is a raw, read only representation of the request /// The create event is a raw, read only representation of the request
/// that was made to us, including information about the identity /// that was made to us, including information about the identity

View file

@ -2,7 +2,7 @@ use crate::prelude::*;
use crate::server::DeleteEvent; use crate::server::DeleteEvent;
use crate::server::{ChangeFlag, Plugins}; use crate::server::{ChangeFlag, Plugins};
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
#[allow(clippy::cognitive_complexity)] #[allow(clippy::cognitive_complexity)]
#[instrument(level = "debug", skip_all)] #[instrument(level = "debug", skip_all)]
pub fn delete(&mut self, de: &DeleteEvent) -> Result<(), OperationError> { pub fn delete(&mut self, de: &DeleteEvent) -> Result<(), OperationError> {

View file

@ -163,7 +163,7 @@ pub struct KeyProvidersWriteTransaction<'a> {
inner: CowCellWriteTxn<'a, KeyProvidersInner>, inner: CowCellWriteTxn<'a, KeyProvidersInner>,
} }
impl<'a> KeyProvidersTransaction for KeyProvidersWriteTransaction<'a> { impl KeyProvidersTransaction for KeyProvidersWriteTransaction<'_> {
#[cfg(test)] #[cfg(test)]
fn get_uuid(&self, key_provider_uuid: Uuid) -> Option<&KeyProvider> { fn get_uuid(&self, key_provider_uuid: Uuid) -> Option<&KeyProvider> {
self.inner self.inner
@ -187,7 +187,7 @@ impl<'a> KeyProvidersTransaction for KeyProvidersWriteTransaction<'a> {
} }
} }
impl<'a> KeyProvidersWriteTransaction<'a> { impl KeyProvidersWriteTransaction<'_> {
#[cfg(test)] #[cfg(test)]
pub(crate) fn get_default(&self) -> Result<&KeyProvider, OperationError> { pub(crate) fn get_default(&self) -> Result<&KeyProvider, OperationError> {
// In future we will make this configurable, and we'll load the default into // In future we will make this configurable, and we'll load the default into
@ -224,7 +224,7 @@ impl<'a> KeyProvidersWriteTransaction<'a> {
} }
} }
impl<'a> KeyProvidersWriteTransaction<'a> { impl KeyProvidersWriteTransaction<'_> {
pub(crate) fn update_providers( pub(crate) fn update_providers(
&mut self, &mut self,
providers: Vec<Arc<KeyProvider>>, providers: Vec<Arc<KeyProvider>>,

View file

@ -214,7 +214,7 @@ impl QueryServer {
} }
} }
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
/// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active /// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
/// level. /// level.
#[cfg(test)] #[cfg(test)]
@ -1064,7 +1064,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
} }
} }
impl<'a> QueryServerReadTransaction<'a> { impl QueryServerReadTransaction<'_> {
/// Retrieve the domain info of this server /// Retrieve the domain info of this server
pub fn domain_upgrade_check( pub fn domain_upgrade_check(
&mut self, &mut self,

View file

@ -141,9 +141,9 @@ pub struct QueryServerReadTransaction<'a> {
trim_cid: Cid, trim_cid: Cid,
} }
unsafe impl<'a> Sync for QueryServerReadTransaction<'a> {} unsafe impl Sync for QueryServerReadTransaction<'_> {}
unsafe impl<'a> Send for QueryServerReadTransaction<'a> {} unsafe impl Send for QueryServerReadTransaction<'_> {}
bitflags::bitflags! { bitflags::bitflags! {
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -189,7 +189,7 @@ pub struct QueryServerWriteTransaction<'a> {
dyngroup_cache: CowCellWriteTxn<'a, DynGroupCache>, dyngroup_cache: CowCellWriteTxn<'a, DynGroupCache>,
} }
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
pub(crate) fn trim_cid(&self) -> &Cid { pub(crate) fn trim_cid(&self) -> &Cid {
&self.trim_cid &self.trim_cid
} }
@ -1164,7 +1164,7 @@ impl<'a> QueryServerTransaction<'a> for QueryServerReadTransaction<'a> {
} }
} }
impl<'a> QueryServerReadTransaction<'a> { impl QueryServerReadTransaction<'_> {
pub(crate) fn trim_cid(&self) -> &Cid { pub(crate) fn trim_cid(&self) -> &Cid {
&self.trim_cid &self.trim_cid
} }

View file

@ -10,7 +10,7 @@ pub(crate) struct ModifyPartial<'a> {
pub me: &'a ModifyEvent, pub me: &'a ModifyEvent,
} }
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
#[instrument(level = "debug", skip_all)] #[instrument(level = "debug", skip_all)]
pub fn modify(&mut self, me: &ModifyEvent) -> Result<(), OperationError> { pub fn modify(&mut self, me: &ModifyEvent) -> Result<(), OperationError> {
let mp = self.modify_pre_apply(me)?; let mp = self.modify_pre_apply(me)?;
@ -305,7 +305,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
} }
} }
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
/// Used in conjunction with internal_apply_writable, to get a pre/post /// Used in conjunction with internal_apply_writable, to get a pre/post
/// pair, where post is pre-configured with metadata to allow /// pair, where post is pre-configured with metadata to allow
/// modificiation before submit back to internal_apply_writable /// modificiation before submit back to internal_apply_writable

View file

@ -4,7 +4,7 @@ use crate::prelude::*;
use crate::server::Plugins; use crate::server::Plugins;
use hashbrown::HashMap; use hashbrown::HashMap;
impl<'a> QueryServerWriteTransaction<'a> { impl QueryServerWriteTransaction<'_> {
#[instrument(level = "debug", skip_all)] #[instrument(level = "debug", skip_all)]
pub fn purge_tombstones(&mut self) -> Result<usize, OperationError> { pub fn purge_tombstones(&mut self) -> Result<usize, OperationError> {
// purge everything that is a tombstone. // purge everything that is a tombstone.

View file

@ -63,18 +63,11 @@ enum Event {
StackedAuthtok(Option<&'static str>), StackedAuthtok(Option<&'static str>),
} }
#[derive(Default)]
struct TestHandler { struct TestHandler {
response_queue: Mutex<VecDeque<Event>>, response_queue: Mutex<VecDeque<Event>>,
} }
impl Default for TestHandler {
fn default() -> Self {
TestHandler {
response_queue: Default::default(),
}
}
}
impl From<Vec<Event>> for TestHandler { impl From<Vec<Event>> for TestHandler {
fn from(v: Vec<Event>) -> Self { fn from(v: Vec<Event>) -> Self {
TestHandler { TestHandler {

View file

@ -240,7 +240,7 @@ impl<'a> DbTxn<'a> {
} }
} }
impl<'a, 'b> KeyStoreTxn<'a, 'b> { impl KeyStoreTxn<'_, '_> {
pub fn get_tagged_hsm_key<K: DeserializeOwned>( pub fn get_tagged_hsm_key<K: DeserializeOwned>(
&mut self, &mut self,
tag: &str, tag: &str,
@ -261,7 +261,7 @@ impl<'a, 'b> KeyStoreTxn<'a, 'b> {
} }
} }
impl<'a> DbTxn<'a> { impl DbTxn<'_> {
fn get_tagged_hsm_key<K: DeserializeOwned>( fn get_tagged_hsm_key<K: DeserializeOwned>(
&mut self, &mut self,
tag: &str, tag: &str,
@ -329,7 +329,7 @@ impl<'a> DbTxn<'a> {
} }
} }
impl<'a> DbTxn<'a> { impl DbTxn<'_> {
pub fn migrate(&mut self) -> Result<(), CacheError> { pub fn migrate(&mut self) -> Result<(), CacheError> {
self.conn.set_prepared_statement_cache_capacity(16); self.conn.set_prepared_statement_cache_capacity(16);
self.conn self.conn
@ -897,13 +897,13 @@ impl<'a> DbTxn<'a> {
} }
} }
impl<'a> fmt::Debug for DbTxn<'a> { impl fmt::Debug for DbTxn<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "DbTxn {{}}") write!(f, "DbTxn {{}}")
} }
} }
impl<'a> Drop for DbTxn<'a> { impl Drop for DbTxn<'_> {
// Abort // Abort
fn drop(&mut self) { fn drop(&mut self) {
if !self.committed { if !self.committed {

View file

@ -219,8 +219,8 @@ pub trait IdProvider {
/// with remote members. /// with remote members.
fn has_map_group(&self, local: &str) -> Option<&Id>; fn has_map_group(&self, local: &str) -> Option<&Id>;
/// This is similar to a "domain join" process. What do we actually need to pass here // This is similar to a "domain join" process. What do we actually need to pass here
/// for this to work for kanidm or himmelblau? Should we make it take a generic? // for this to work for kanidm or himmelblau? Should we make it take a generic?
/* /*
async fn configure_machine_identity( async fn configure_machine_identity(
&self, &self,