mirror of
https://github.com/kanidm/kanidm.git
synced 2025-02-23 04:27:02 +01:00
Clippy Lints (#3255)
This commit is contained in:
parent
c1ed939c28
commit
db101e6d26
|
@ -215,7 +215,7 @@ mod tests {
|
|||
|
||||
struct TestBVisitor;
|
||||
|
||||
impl<'de> Visitor<'de> for TestBVisitor {
|
||||
impl Visitor<'_> for TestBVisitor {
|
||||
type Value = TestB;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
|
|
|
@ -385,7 +385,7 @@ pub trait IdlArcSqliteTransaction {
|
|||
fn get_id2entry(&self, id: u64) -> Result<(u64, String), OperationError>;
|
||||
}
|
||||
|
||||
impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
|
||||
impl IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'_> {
|
||||
fn get_identry(
|
||||
&mut self,
|
||||
idl: &IdList,
|
||||
|
@ -480,7 +480,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
|
||||
impl IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'_> {
|
||||
fn get_identry(
|
||||
&mut self,
|
||||
idl: &IdList,
|
||||
|
@ -578,7 +578,7 @@ impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdlArcSqliteWriteTransaction<'a> {
|
||||
impl IdlArcSqliteWriteTransaction<'_> {
|
||||
#[cfg(any(test, debug_assertions))]
|
||||
#[instrument(level = "debug", name = "idl_arc_sqlite::clear_cache", skip_all)]
|
||||
pub fn clear_cache(&mut self) -> Result<(), OperationError> {
|
||||
|
|
|
@ -43,7 +43,7 @@ pub trait IdxKeyToRef {
|
|||
fn keyref(&self) -> IdxKeyRef<'_>;
|
||||
}
|
||||
|
||||
impl<'a> IdxKeyToRef for IdxKeyRef<'a> {
|
||||
impl IdxKeyToRef for IdxKeyRef<'_> {
|
||||
fn keyref(&self) -> IdxKeyRef<'_> {
|
||||
// Copy the self.
|
||||
*self
|
||||
|
@ -65,15 +65,15 @@ impl<'a> Borrow<dyn IdxKeyToRef + 'a> for IdxKey {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for (dyn IdxKeyToRef + 'a) {
|
||||
impl PartialEq for (dyn IdxKeyToRef + '_) {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.keyref().eq(&other.keyref())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Eq for (dyn IdxKeyToRef + 'a) {}
|
||||
impl Eq for (dyn IdxKeyToRef + '_) {}
|
||||
|
||||
impl<'a> Hash for (dyn IdxKeyToRef + 'a) {
|
||||
impl Hash for (dyn IdxKeyToRef + '_) {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.keyref().hash(state)
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ pub trait IdlCacheKeyToRef {
|
|||
fn keyref(&self) -> IdlCacheKeyRef<'_>;
|
||||
}
|
||||
|
||||
impl<'a> IdlCacheKeyToRef for IdlCacheKeyRef<'a> {
|
||||
impl IdlCacheKeyToRef for IdlCacheKeyRef<'_> {
|
||||
fn keyref(&self) -> IdlCacheKeyRef<'_> {
|
||||
// Copy the self
|
||||
*self
|
||||
|
@ -130,27 +130,27 @@ impl<'a> Borrow<dyn IdlCacheKeyToRef + 'a> for IdlCacheKey {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialEq for (dyn IdlCacheKeyToRef + 'a) {
|
||||
impl PartialEq for (dyn IdlCacheKeyToRef + '_) {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.keyref().eq(&other.keyref())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Eq for (dyn IdlCacheKeyToRef + 'a) {}
|
||||
impl Eq for (dyn IdlCacheKeyToRef + '_) {}
|
||||
|
||||
impl<'a> Hash for (dyn IdlCacheKeyToRef + 'a) {
|
||||
impl Hash for (dyn IdlCacheKeyToRef + '_) {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.keyref().hash(state)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> PartialOrd for (dyn IdlCacheKeyToRef + 'a) {
|
||||
impl PartialOrd for (dyn IdlCacheKeyToRef + '_) {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(&other.keyref()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Ord for (dyn IdlCacheKeyToRef + 'a) {
|
||||
impl Ord for (dyn IdlCacheKeyToRef + '_) {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
self.keyref().cmp(&other.keyref())
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ pub enum KeyHandle {
|
|||
},
|
||||
}
|
||||
|
||||
impl<'a> BackendWriteTransaction<'a> {
|
||||
impl BackendWriteTransaction<'_> {
|
||||
/// Retrieve a key stored in the database by it's key handle. This
|
||||
/// handle may require further processing for the key to be usable
|
||||
/// in higher level contexts as this is simply the storage layer
|
||||
|
@ -55,7 +55,7 @@ impl<'a> BackendWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdlArcSqliteWriteTransaction<'a> {
|
||||
impl IdlArcSqliteWriteTransaction<'_> {
|
||||
pub(crate) fn get_key_handle(
|
||||
&mut self,
|
||||
handle: KeyHandleId,
|
||||
|
|
|
@ -173,9 +173,9 @@ pub struct BackendReadTransaction<'a> {
|
|||
ruv: ReplicationUpdateVectorReadTransaction<'a>,
|
||||
}
|
||||
|
||||
unsafe impl<'a> Sync for BackendReadTransaction<'a> {}
|
||||
unsafe impl Sync for BackendReadTransaction<'_> {}
|
||||
|
||||
unsafe impl<'a> Send for BackendReadTransaction<'a> {}
|
||||
unsafe impl Send for BackendReadTransaction<'_> {}
|
||||
|
||||
pub struct BackendWriteTransaction<'a> {
|
||||
idlayer: IdlArcSqliteWriteTransaction<'a>,
|
||||
|
@ -1009,7 +1009,7 @@ impl<'a> BackendTransaction for BackendReadTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> BackendReadTransaction<'a> {
|
||||
impl BackendReadTransaction<'_> {
|
||||
pub fn list_indexes(&mut self) -> Result<Vec<String>, OperationError> {
|
||||
self.get_idlayer().list_idxs()
|
||||
}
|
||||
|
|
|
@ -1,62 +1,65 @@
|
|||
use std::time::Duration;
|
||||
//! Represents a temporary denial of the credential to authenticate. This is used
|
||||
//! to ratelimit and prevent bruteforcing of accounts. At an initial failure the
|
||||
//! SoftLock is created and the count set to 1, with a unlock_at set to 1 second
|
||||
//! later, and a reset_count_at: at a maximum time window for a cycle.
|
||||
//!
|
||||
//! If the softlock already exists, and the failure count is 0, then this acts as the
|
||||
//! creation where the reset_count_at window is then set.
|
||||
//!
|
||||
//! While current_time < unlock_at, all authentication attempts are denied with a
|
||||
//! message regarding the account being temporarily unavailable. Once
|
||||
//! unlock_at < current_time, authentication will be processed again. If a subsequent
|
||||
//! failure occurs, unlock_at is extended based on policy, and failure_count incremented.
|
||||
//!
|
||||
//! If unlock_at < current_time, and authentication succeeds the login is allowed
|
||||
//! and no changes to failure_count or unlock_at are made.
|
||||
//!
|
||||
//! If reset_count_at < current_time, then failure_count is reset to 0 before processing.
|
||||
//!
|
||||
//! This allows handling of max_failure_count, so that when that value from policy is
|
||||
//! exceeded then unlock_at is set to reset_count_at to softlock until the cycle
|
||||
//! is over (see NIST sp800-63b.). For example, reset_count_at will be 24 hours after
|
||||
//! the first failed authentication attempt.
|
||||
//!
|
||||
//! This also works for something like TOTP which allows a 60 second cycle for the
|
||||
//! reset_count_at and a max number of attempts in that window (say 5). with short
|
||||
//! delays in between (1 second).
|
||||
//!
|
||||
//! ```
|
||||
//!
|
||||
//! ┌────────────────────────┐
|
||||
//! │reset_at < current_time │
|
||||
//! ─└────────────────────────┘
|
||||
//! │ │
|
||||
//! ▼
|
||||
//! ┌─────┐ .─────. ┌────┐ │
|
||||
//! │Valid│ ╱ ╲ │Fail│
|
||||
//! ┌────┴─────┴───────────────────────(count = 0)─────┴────┴┐ │
|
||||
//! │ `. ,' │
|
||||
//! │ `───' │ │
|
||||
//! │ ┌────────────────────────┐▲ │
|
||||
//! │ │reset_at < current_time │ │ │
|
||||
//! │ └────────────────────────┘│ │
|
||||
//! │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ │ │
|
||||
//! │ │
|
||||
//! │ ├─────┬───────┬──┐ ▼ │
|
||||
//! │ │ │ Fail │ │ .─────.
|
||||
//! │ │ │count++│ │ ,' `. │
|
||||
//! ▼ .─────. └───────┘ │ ; Locked :
|
||||
//! ┌────────────┐ ╱ ╲ └─────────▶: count > 0 ;◀─┤
|
||||
//! │Auth Success│◀─┬─────┬──(Unlocked ) ╲ ╱ │
|
||||
//! └────────────┘ │Valid│ `. ,' `. ,' │
|
||||
//! └─────┘ `───' `───' │
|
||||
//! ▲ │ │
|
||||
//! │ │ │
|
||||
//! └─────┬──────────────────────────┬┴┬───────┴──────────────────┐
|
||||
//! │ expire_at < current_time │ │ current_time < expire_at │
|
||||
//! └──────────────────────────┘ └──────────────────────────┘
|
||||
//!
|
||||
//! ```
|
||||
//!
|
||||
|
||||
/// Represents a temporary denial of the credential to authenticate. This is used
|
||||
/// to ratelimit and prevent bruteforcing of accounts. At an initial failure the
|
||||
/// SoftLock is created and the count set to 1, with a unlock_at set to 1 second
|
||||
/// later, and a reset_count_at: at a maximum time window for a cycle.
|
||||
///
|
||||
/// If the softlock already exists, and the failure count is 0, then this acts as the
|
||||
/// creation where the reset_count_at window is then set.
|
||||
///
|
||||
/// While current_time < unlock_at, all authentication attempts are denied with a
|
||||
/// message regarding the account being temporarily unavailable. Once
|
||||
/// unlock_at < current_time, authentication will be processed again. If a subsequent
|
||||
/// failure occurs, unlock_at is extended based on policy, and failure_count incremented.
|
||||
///
|
||||
/// If unlock_at < current_time, and authentication succeeds the login is allowed
|
||||
/// and no changes to failure_count or unlock_at are made.
|
||||
///
|
||||
/// If reset_count_at < current_time, then failure_count is reset to 0 before processing.
|
||||
///
|
||||
/// This allows handling of max_failure_count, so that when that value from policy is
|
||||
/// exceeded then unlock_at is set to reset_count_at to softlock until the cycle
|
||||
/// is over (see NIST sp800-63b.). For example, reset_count_at will be 24 hours after
|
||||
/// the first failed authentication attempt.
|
||||
///
|
||||
/// This also works for something like TOTP which allows a 60 second cycle for the
|
||||
/// reset_count_at and a max number of attempts in that window (say 5). with short
|
||||
/// delays in between (1 second).
|
||||
//
|
||||
// ┌────────────────────────┐
|
||||
// │reset_at < current_time │
|
||||
// ─└────────────────────────┘
|
||||
// │ │
|
||||
// ▼
|
||||
// ┌─────┐ .─────. ┌────┐ │
|
||||
// │Valid│ ╱ ╲ │Fail│
|
||||
// ┌────┴─────┴───────────────────────(count = 0)─────┴────┴┐ │
|
||||
// │ `. ,' │
|
||||
// │ `───' │ │
|
||||
// │ ┌────────────────────────┐▲ │
|
||||
// │ │reset_at < current_time │ │ │
|
||||
// │ └────────────────────────┘│ │
|
||||
// │ ┌ ─ ─ ─ ─ ─ ─ ─ ─ │ │
|
||||
// │ │
|
||||
// │ ├─────┬───────┬──┐ ▼ │
|
||||
// │ │ │ Fail │ │ .─────.
|
||||
// │ │ │count++│ │ ,' `. │
|
||||
// ▼ .─────. └───────┘ │ ; Locked :
|
||||
// ┌────────────┐ ╱ ╲ └─────────▶: count > 0 ;◀─┤
|
||||
// │Auth Success│◀─┬─────┬──(Unlocked ) ╲ ╱ │
|
||||
// └────────────┘ │Valid│ `. ,' `. ,' │
|
||||
// └─────┘ `───' `───' │
|
||||
// ▲ │ │
|
||||
// │ │ │
|
||||
// └─────┬──────────────────────────┬┴┬───────┴──────────────────┐
|
||||
// │ expire_at < current_time │ │ current_time < expire_at │
|
||||
// └──────────────────────────┘ └──────────────────────────┘
|
||||
//
|
||||
//
|
||||
use std::time::Duration;
|
||||
|
||||
const ONEDAY: u64 = 86400;
|
||||
|
||||
|
|
|
@ -846,7 +846,7 @@ impl DestroySessionTokenEvent {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
pub fn account_destroy_session_token(
|
||||
&mut self,
|
||||
dte: &DestroySessionTokenEvent,
|
||||
|
@ -973,7 +973,7 @@ pub struct ListUserAuthTokenEvent {
|
|||
pub target: Uuid,
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
pub fn account_list_user_auth_tokens(
|
||||
&mut self,
|
||||
lte: &ListUserAuthTokenEvent,
|
||||
|
|
|
@ -62,7 +62,7 @@ pub struct LdapApplicationsWriteTransaction<'a> {
|
|||
inner: CowCellWriteTxn<'a, LdapApplicationsInner>,
|
||||
}
|
||||
|
||||
impl<'a> LdapApplicationsWriteTransaction<'a> {
|
||||
impl LdapApplicationsWriteTransaction<'_> {
|
||||
pub fn reload(&mut self, value: Vec<Arc<EntrySealedCommitted>>) -> Result<(), OperationError> {
|
||||
let app_set: Result<HashMap<_, _>, _> = value
|
||||
.into_iter()
|
||||
|
|
|
@ -2,7 +2,7 @@ use crate::idm::server::IdmServerProxyReadTransaction;
|
|||
use crate::prelude::*;
|
||||
use kanidm_proto::internal::AppLink;
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
pub fn list_applinks(&mut self, ident: &Identity) -> Result<Vec<AppLink>, OperationError> {
|
||||
// From the member-of of the ident.
|
||||
let Some(ident_mo) = ident.get_memberof() else {
|
||||
|
|
|
@ -520,7 +520,7 @@ impl InitCredentialUpdateEvent {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
fn validate_init_credential_update(
|
||||
&mut self,
|
||||
target: Uuid,
|
||||
|
@ -1546,7 +1546,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerCredUpdateTransaction<'a> {
|
||||
impl IdmServerCredUpdateTransaction<'_> {
|
||||
#[cfg(test)]
|
||||
pub fn get_origin(&self) -> &Url {
|
||||
&self.webauthn.get_allowed_origins()[0]
|
||||
|
|
|
@ -54,7 +54,7 @@ impl IdentifyUserSubmitCodeEvent {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
pub fn handle_identify_user_start(
|
||||
&mut self,
|
||||
IdentifyUserStartEvent { target, ident }: &IdentifyUserStartEvent,
|
||||
|
|
|
@ -461,7 +461,7 @@ impl Oauth2ResourceServers {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> Oauth2ResourceServersWriteTransaction<'a> {
|
||||
impl Oauth2ResourceServersWriteTransaction<'_> {
|
||||
pub fn reload(
|
||||
&mut self,
|
||||
value: Vec<Arc<EntrySealedCommitted>>,
|
||||
|
@ -801,7 +801,7 @@ impl<'a> Oauth2ResourceServersWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn oauth2_token_revoke(
|
||||
&mut self,
|
||||
|
@ -1789,7 +1789,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn check_oauth2_authorisation(
|
||||
&self,
|
||||
|
|
|
@ -114,7 +114,7 @@ impl GenerateScimSyncTokenEvent {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
pub fn scim_sync_generate_token(
|
||||
&mut self,
|
||||
gte: &GenerateScimSyncTokenEvent,
|
||||
|
@ -234,7 +234,7 @@ pub struct ScimSyncFinaliseEvent {
|
|||
pub target: Uuid,
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
pub fn scim_sync_finalise(
|
||||
&mut self,
|
||||
sfe: &ScimSyncFinaliseEvent,
|
||||
|
@ -364,7 +364,7 @@ pub struct ScimSyncTerminateEvent {
|
|||
pub target: Uuid,
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
pub fn scim_sync_terminate(
|
||||
&mut self,
|
||||
ste: &ScimSyncTerminateEvent,
|
||||
|
@ -497,7 +497,7 @@ pub struct ScimSyncUpdateEvent {
|
|||
pub ident: Identity,
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
#[instrument(level = "info", skip_all)]
|
||||
pub fn scim_sync_apply(
|
||||
&mut self,
|
||||
|
@ -1464,7 +1464,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
pub fn scim_sync_get_state(
|
||||
&mut self,
|
||||
ident: &Identity,
|
||||
|
|
|
@ -1521,7 +1521,7 @@ fn gen_password_upgrade_mod(
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
pub fn jws_public_jwk(&mut self, key_id: &str) -> Result<Jwk, OperationError> {
|
||||
self.qs_read
|
||||
.get_key_providers()
|
||||
|
@ -1628,7 +1628,7 @@ impl<'a> IdmServerTransaction<'a> for IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
pub(crate) fn crypto_policy(&self) -> &CryptoPolicy {
|
||||
self.crypto_policy
|
||||
}
|
||||
|
@ -4223,7 +4223,7 @@ mod tests {
|
|||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
if let Some(_) = expected {
|
||||
if expected.is_some() {
|
||||
assert!(result.unwrap().is_some());
|
||||
} else {
|
||||
assert!(result.unwrap().is_none());
|
||||
|
|
|
@ -174,7 +174,7 @@ impl DestroyApiTokenEvent {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyWriteTransaction<'a> {
|
||||
impl IdmServerProxyWriteTransaction<'_> {
|
||||
pub fn service_account_generate_api_token(
|
||||
&mut self,
|
||||
gte: &GenerateApiTokenEvent,
|
||||
|
@ -353,7 +353,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IdmServerProxyReadTransaction<'a> {
|
||||
impl IdmServerProxyReadTransaction<'_> {
|
||||
pub fn service_account_list_api_token(
|
||||
&mut self,
|
||||
lte: &ListApiTokenEvent,
|
||||
|
|
|
@ -70,7 +70,6 @@ fn generate_domain_cookie_key() -> Value {
|
|||
|
||||
impl Domain {
|
||||
/// Generates the cookie key for the domain.
|
||||
|
||||
fn modify_inner<T: Clone + std::fmt::Debug>(
|
||||
qs: &mut QueryServerWriteTransaction,
|
||||
cand: &mut [Entry<EntryInvalid, T>],
|
||||
|
|
|
@ -9,46 +9,46 @@ use crate::plugins::Plugin;
|
|||
use crate::prelude::*;
|
||||
use crate::utils::uuid_to_gid_u32;
|
||||
|
||||
/// Systemd dynamic units allocate between 61184–65519, most distros allocate
|
||||
/// system uids from 0 - 1000, and many others give user ids between 1000 to
|
||||
/// 2000. This whole numberspace is cursed, lets assume it's not ours. :(
|
||||
///
|
||||
/// Per <https://systemd.io/UIDS-GIDS/>, systemd claims a huge chunk of this
|
||||
/// space to itself. As a result we can't allocate between 65536 and u32 max
|
||||
/// because systemd takes most of the usable range for its own containers,
|
||||
/// and half the range is probably going to trigger linux kernel issues.
|
||||
///
|
||||
/// Seriously, linux's uid/gid model is so fundamentally terrible... Windows
|
||||
/// NT got this right with SIDs.
|
||||
///
|
||||
/// Because of this, we have to ensure that anything we allocate is in the
|
||||
/// range 1879048192 (0x70000000) to 2147483647 (0x7fffffff)
|
||||
// Systemd dynamic units allocate between 61184–65519, most distros allocate
|
||||
// system uids from 0 - 1000, and many others give user ids between 1000 to
|
||||
// 2000. This whole numberspace is cursed, lets assume it's not ours. :(
|
||||
//
|
||||
// Per <https://systemd.io/UIDS-GIDS/>, systemd claims a huge chunk of this
|
||||
// space to itself. As a result we can't allocate between 65536 and u32 max
|
||||
// because systemd takes most of the usable range for its own containers,
|
||||
// and half the range is probably going to trigger linux kernel issues.
|
||||
//
|
||||
// Seriously, linux's uid/gid model is so fundamentally terrible... Windows
|
||||
// NT got this right with SIDs.
|
||||
//
|
||||
// Because of this, we have to ensure that anything we allocate is in the
|
||||
// range 1879048192 (0x70000000) to 2147483647 (0x7fffffff)
|
||||
const GID_SYSTEM_NUMBER_PREFIX: u32 = 0x7000_0000;
|
||||
const GID_SYSTEM_NUMBER_MASK: u32 = 0x0fff_ffff;
|
||||
|
||||
/// Systemd claims so many ranges to itself, we have to check we are in certain bounds.
|
||||
|
||||
/// This is the normal system range, we MUST NOT allow it to be allocated.
|
||||
// Systemd claims so many ranges to itself, we have to check we are in certain bounds.
|
||||
//
|
||||
// This is the normal system range, we MUST NOT allow it to be allocated.
|
||||
pub const GID_REGULAR_USER_MIN: u32 = 1000;
|
||||
pub const GID_REGULAR_USER_MAX: u32 = 60000;
|
||||
|
||||
/// Systemd homed claims 60001 through 60577
|
||||
// Systemd homed claims 60001 through 60577
|
||||
|
||||
pub const GID_UNUSED_A_MIN: u32 = 60578;
|
||||
pub const GID_UNUSED_A_MAX: u32 = 61183;
|
||||
|
||||
/// Systemd dyn service users 61184 through 65519
|
||||
// Systemd dyn service users 61184 through 65519
|
||||
|
||||
pub const GID_UNUSED_B_MIN: u32 = 65520;
|
||||
pub const GID_UNUSED_B_MAX: u32 = 65533;
|
||||
|
||||
/// nobody is 65534
|
||||
/// 16bit uid -1 65535
|
||||
// nobody is 65534
|
||||
// 16bit uid -1 65535
|
||||
|
||||
pub const GID_UNUSED_C_MIN: u32 = 65536;
|
||||
const GID_UNUSED_C_MAX: u32 = 524287;
|
||||
|
||||
/// systemd claims 524288 through 1879048191 for nspawn
|
||||
// systemd claims 524288 through 1879048191 for nspawn
|
||||
|
||||
const GID_NSPAWN_MIN: u32 = 524288;
|
||||
const GID_NSPAWN_MAX: u32 = 1879048191;
|
||||
|
@ -56,8 +56,8 @@ const GID_NSPAWN_MAX: u32 = 1879048191;
|
|||
const GID_UNUSED_D_MIN: u32 = 0x7000_0000;
|
||||
pub const GID_UNUSED_D_MAX: u32 = 0x7fff_ffff;
|
||||
|
||||
/// Anything above 2147483648 can confuse the kernel (so basically half the address space
|
||||
/// can't be accessed.
|
||||
// Anything above 2147483648 can confuse the kernel (so basically half the address space
|
||||
// can't be accessed.
|
||||
// const GID_UNSAFE_MAX: u32 = 2147483648;
|
||||
|
||||
pub struct GidNumber {}
|
||||
|
|
|
@ -5,7 +5,7 @@ use crate::server::ChangeFlag;
|
|||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
// Apply the state changes if they are valid.
|
||||
|
||||
fn consumer_incremental_apply_entries(
|
||||
|
|
|
@ -236,7 +236,7 @@ pub struct ReplicationUpdateVectorWriteTransaction<'a> {
|
|||
ranged: BptreeMapWriteTxn<'a, Uuid, BTreeSet<Duration>>,
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for ReplicationUpdateVectorWriteTransaction<'a> {
|
||||
impl fmt::Debug for ReplicationUpdateVectorWriteTransaction<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "RUV DATA DUMP")?;
|
||||
self.data
|
||||
|
@ -255,7 +255,7 @@ pub struct ReplicationUpdateVectorReadTransaction<'a> {
|
|||
ranged: BptreeMapReadTxn<'a, Uuid, BTreeSet<Duration>>,
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for ReplicationUpdateVectorReadTransaction<'a> {
|
||||
impl fmt::Debug for ReplicationUpdateVectorReadTransaction<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
writeln!(f, "RUV DATA DUMP")?;
|
||||
self.data
|
||||
|
@ -540,7 +540,7 @@ pub trait ReplicationUpdateVectorTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTransaction<'a> {
|
||||
impl ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTransaction<'_> {
|
||||
fn ruv_snapshot(&self) -> BptreeMapReadSnapshot<'_, Cid, IDLBitRange> {
|
||||
self.data.to_snapshot()
|
||||
}
|
||||
|
@ -550,7 +550,7 @@ impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorWriteTran
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTransaction<'a> {
|
||||
impl ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTransaction<'_> {
|
||||
fn ruv_snapshot(&self) -> BptreeMapReadSnapshot<'_, Cid, IDLBitRange> {
|
||||
self.data.to_snapshot()
|
||||
}
|
||||
|
@ -560,7 +560,7 @@ impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTrans
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> ReplicationUpdateVectorWriteTransaction<'a> {
|
||||
impl ReplicationUpdateVectorWriteTransaction<'_> {
|
||||
pub fn clear(&mut self) {
|
||||
self.added = None;
|
||||
self.data.clear();
|
||||
|
|
|
@ -9,7 +9,7 @@ use crate::be::keystorage::{KeyHandle, KeyHandleId};
|
|||
use kanidm_lib_crypto::mtls::build_self_signed_server_and_client_identity;
|
||||
use kanidm_lib_crypto::prelude::{PKey, Private, X509};
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
fn supplier_generate_key_cert(
|
||||
&mut self,
|
||||
domain_name: &str,
|
||||
|
@ -80,7 +80,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerReadTransaction<'a> {
|
||||
impl QueryServerReadTransaction<'_> {
|
||||
// Given a consumers state, calculate the differential of changes they
|
||||
// need to be sent to bring them to the equivalent state.
|
||||
|
||||
|
|
|
@ -759,7 +759,7 @@ pub trait SchemaTransaction {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> SchemaWriteTransaction<'a> {
|
||||
impl SchemaWriteTransaction<'_> {
|
||||
// Schema probably needs to be part of the backend, so that commits are wholly atomic
|
||||
// but in the current design, we need to open be first, then schema, but we have to commit be
|
||||
// first, then schema to ensure that the be content matches our schema. Saying this, if your
|
||||
|
@ -2241,7 +2241,7 @@ impl<'a> SchemaWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> SchemaTransaction for SchemaWriteTransaction<'a> {
|
||||
impl SchemaTransaction for SchemaWriteTransaction<'_> {
|
||||
fn get_attributes_unique(&self) -> &Vec<Attribute> {
|
||||
&self.unique_cache
|
||||
}
|
||||
|
|
|
@ -891,7 +891,7 @@ pub struct AccessControlsWriteTransaction<'a> {
|
|||
acp_resolve_filter_cache: Cell<ResolveFilterCacheReadTxn<'a>>,
|
||||
}
|
||||
|
||||
impl<'a> AccessControlsWriteTransaction<'a> {
|
||||
impl AccessControlsWriteTransaction<'_> {
|
||||
// We have a method to update each set, so that if an error
|
||||
// occurs we KNOW it's an error, rather than using errors as
|
||||
// part of the logic (IE try-parse-fail method).
|
||||
|
@ -983,9 +983,9 @@ pub struct AccessControlsReadTransaction<'a> {
|
|||
acp_resolve_filter_cache: Cell<ResolveFilterCacheReadTxn<'a>>,
|
||||
}
|
||||
|
||||
unsafe impl<'a> Sync for AccessControlsReadTransaction<'a> {}
|
||||
unsafe impl Sync for AccessControlsReadTransaction<'_> {}
|
||||
|
||||
unsafe impl<'a> Send for AccessControlsReadTransaction<'a> {}
|
||||
unsafe impl Send for AccessControlsReadTransaction<'_> {}
|
||||
|
||||
impl<'a> AccessControlsTransaction<'a> for AccessControlsReadTransaction<'a> {
|
||||
fn get_search(&self) -> &Vec<AccessControlSearch> {
|
||||
|
|
|
@ -10,7 +10,7 @@ pub struct BatchModifyEvent {
|
|||
pub modset: ModSetValid,
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
/// This function behaves different to modify. Modify applies the same
|
||||
/// modification operation en-mass to 1 -> N entries. This takes a set of modifications
|
||||
/// that define a precise entry to apply a change to and only modifies that.
|
||||
|
|
|
@ -2,7 +2,7 @@ use crate::prelude::*;
|
|||
use crate::server::CreateEvent;
|
||||
use crate::server::{ChangeFlag, Plugins};
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
/// The create event is a raw, read only representation of the request
|
||||
/// that was made to us, including information about the identity
|
||||
|
|
|
@ -2,7 +2,7 @@ use crate::prelude::*;
|
|||
use crate::server::DeleteEvent;
|
||||
use crate::server::{ChangeFlag, Plugins};
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn delete(&mut self, de: &DeleteEvent) -> Result<(), OperationError> {
|
||||
|
|
|
@ -163,7 +163,7 @@ pub struct KeyProvidersWriteTransaction<'a> {
|
|||
inner: CowCellWriteTxn<'a, KeyProvidersInner>,
|
||||
}
|
||||
|
||||
impl<'a> KeyProvidersTransaction for KeyProvidersWriteTransaction<'a> {
|
||||
impl KeyProvidersTransaction for KeyProvidersWriteTransaction<'_> {
|
||||
#[cfg(test)]
|
||||
fn get_uuid(&self, key_provider_uuid: Uuid) -> Option<&KeyProvider> {
|
||||
self.inner
|
||||
|
@ -187,7 +187,7 @@ impl<'a> KeyProvidersTransaction for KeyProvidersWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> KeyProvidersWriteTransaction<'a> {
|
||||
impl KeyProvidersWriteTransaction<'_> {
|
||||
#[cfg(test)]
|
||||
pub(crate) fn get_default(&self) -> Result<&KeyProvider, OperationError> {
|
||||
// In future we will make this configurable, and we'll load the default into
|
||||
|
@ -224,7 +224,7 @@ impl<'a> KeyProvidersWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> KeyProvidersWriteTransaction<'a> {
|
||||
impl KeyProvidersWriteTransaction<'_> {
|
||||
pub(crate) fn update_providers(
|
||||
&mut self,
|
||||
providers: Vec<Arc<KeyProvider>>,
|
||||
|
|
|
@ -214,7 +214,7 @@ impl QueryServer {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
/// Apply a domain migration `to_level`. Panics if `to_level` is not greater than the active
|
||||
/// level.
|
||||
#[cfg(test)]
|
||||
|
@ -1064,7 +1064,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerReadTransaction<'a> {
|
||||
impl QueryServerReadTransaction<'_> {
|
||||
/// Retrieve the domain info of this server
|
||||
pub fn domain_upgrade_check(
|
||||
&mut self,
|
||||
|
|
|
@ -141,9 +141,9 @@ pub struct QueryServerReadTransaction<'a> {
|
|||
trim_cid: Cid,
|
||||
}
|
||||
|
||||
unsafe impl<'a> Sync for QueryServerReadTransaction<'a> {}
|
||||
unsafe impl Sync for QueryServerReadTransaction<'_> {}
|
||||
|
||||
unsafe impl<'a> Send for QueryServerReadTransaction<'a> {}
|
||||
unsafe impl Send for QueryServerReadTransaction<'_> {}
|
||||
|
||||
bitflags::bitflags! {
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
|
@ -189,7 +189,7 @@ pub struct QueryServerWriteTransaction<'a> {
|
|||
dyngroup_cache: CowCellWriteTxn<'a, DynGroupCache>,
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
pub(crate) fn trim_cid(&self) -> &Cid {
|
||||
&self.trim_cid
|
||||
}
|
||||
|
@ -1164,7 +1164,7 @@ impl<'a> QueryServerTransaction<'a> for QueryServerReadTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerReadTransaction<'a> {
|
||||
impl QueryServerReadTransaction<'_> {
|
||||
pub(crate) fn trim_cid(&self) -> &Cid {
|
||||
&self.trim_cid
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ pub(crate) struct ModifyPartial<'a> {
|
|||
pub me: &'a ModifyEvent,
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn modify(&mut self, me: &ModifyEvent) -> Result<(), OperationError> {
|
||||
let mp = self.modify_pre_apply(me)?;
|
||||
|
@ -305,7 +305,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
/// Used in conjunction with internal_apply_writable, to get a pre/post
|
||||
/// pair, where post is pre-configured with metadata to allow
|
||||
/// modificiation before submit back to internal_apply_writable
|
||||
|
|
|
@ -4,7 +4,7 @@ use crate::prelude::*;
|
|||
use crate::server::Plugins;
|
||||
use hashbrown::HashMap;
|
||||
|
||||
impl<'a> QueryServerWriteTransaction<'a> {
|
||||
impl QueryServerWriteTransaction<'_> {
|
||||
#[instrument(level = "debug", skip_all)]
|
||||
pub fn purge_tombstones(&mut self) -> Result<usize, OperationError> {
|
||||
// purge everything that is a tombstone.
|
||||
|
|
|
@ -63,18 +63,11 @@ enum Event {
|
|||
StackedAuthtok(Option<&'static str>),
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestHandler {
|
||||
response_queue: Mutex<VecDeque<Event>>,
|
||||
}
|
||||
|
||||
impl Default for TestHandler {
|
||||
fn default() -> Self {
|
||||
TestHandler {
|
||||
response_queue: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<Event>> for TestHandler {
|
||||
fn from(v: Vec<Event>) -> Self {
|
||||
TestHandler {
|
||||
|
|
|
@ -240,7 +240,7 @@ impl<'a> DbTxn<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a, 'b> KeyStoreTxn<'a, 'b> {
|
||||
impl KeyStoreTxn<'_, '_> {
|
||||
pub fn get_tagged_hsm_key<K: DeserializeOwned>(
|
||||
&mut self,
|
||||
tag: &str,
|
||||
|
@ -261,7 +261,7 @@ impl<'a, 'b> KeyStoreTxn<'a, 'b> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> DbTxn<'a> {
|
||||
impl DbTxn<'_> {
|
||||
fn get_tagged_hsm_key<K: DeserializeOwned>(
|
||||
&mut self,
|
||||
tag: &str,
|
||||
|
@ -329,7 +329,7 @@ impl<'a> DbTxn<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> DbTxn<'a> {
|
||||
impl DbTxn<'_> {
|
||||
pub fn migrate(&mut self) -> Result<(), CacheError> {
|
||||
self.conn.set_prepared_statement_cache_capacity(16);
|
||||
self.conn
|
||||
|
@ -897,13 +897,13 @@ impl<'a> DbTxn<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for DbTxn<'a> {
|
||||
impl fmt::Debug for DbTxn<'_> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "DbTxn {{}}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for DbTxn<'a> {
|
||||
impl Drop for DbTxn<'_> {
|
||||
// Abort
|
||||
fn drop(&mut self) {
|
||||
if !self.committed {
|
||||
|
|
|
@ -219,8 +219,8 @@ pub trait IdProvider {
|
|||
/// with remote members.
|
||||
fn has_map_group(&self, local: &str) -> Option<&Id>;
|
||||
|
||||
/// This is similar to a "domain join" process. What do we actually need to pass here
|
||||
/// for this to work for kanidm or himmelblau? Should we make it take a generic?
|
||||
// This is similar to a "domain join" process. What do we actually need to pass here
|
||||
// for this to work for kanidm or himmelblau? Should we make it take a generic?
|
||||
/*
|
||||
async fn configure_machine_identity(
|
||||
&self,
|
||||
|
|
Loading…
Reference in a new issue