From 562f35251632cd2ca350522ea438602835af2093 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alin=20Tr=C4=83istaru?= Date: Thu, 18 Jul 2024 05:22:20 +0200 Subject: [PATCH] fix typos (#2908) * fix typos and misspellings * use proper capitalization * Apply suggestions from code review --------- Co-authored-by: James Hodgkinson --- RELEASE_NOTES.md | 4 ++-- book/src/access_control/intro.md | 2 +- book/src/accounts/intro.md | 2 +- book/src/choosing_a_domain_name.md | 2 +- .../src/developers/designs/access_profiles_rework_2022.md | 4 ++-- book/src/developers/designs/account_policy.rst | 6 +++--- book/src/developers/designs/auth.md | 8 ++++---- .../developers/designs/auth_proto_rewrite_late_2020.rst | 2 +- book/src/developers/designs/credential-update.rst | 2 +- book/src/developers/designs/cryptography_key_domains.md | 6 +++--- book/src/developers/designs/device-authentication.rst | 2 +- book/src/developers/designs/downgrade.rst | 2 +- book/src/developers/designs/indexing.md | 4 ++-- book/src/developers/designs/kanidm-trust.rst | 2 +- book/src/developers/designs/ldap_gateway.rst | 4 ++-- book/src/developers/designs/memberof.rst | 2 +- book/src/developers/designs/oauth.rst | 2 +- book/src/developers/designs/oauth2_app_listing.md | 2 +- book/src/developers/designs/oauth2_refresh_tokens.md | 2 +- book/src/developers/designs/password-import.rst | 2 +- book/src/developers/designs/radius.rst | 2 +- book/src/developers/designs/replication.rst | 4 ++-- book/src/developers/designs/replication_coordinator.md | 8 ++++---- book/src/developers/designs/resource_limits.rst | 2 +- book/src/developers/designs/scim_migration_planning.md | 4 ++-- book/src/developers/designs/sudo.rst | 2 +- book/src/developers/designs/unixd_homes_task.rst | 2 +- book/src/developers/readme.md | 6 +++--- book/src/integrations/oauth2.md | 4 ++-- book/src/integrations/sssd.md | 2 +- book/src/security_hardening.md | 2 +- book/src/server_configuration.md | 2 +- book/src/support.md | 6 +++--- book/src/sync/concepts.md | 2 +- examples/kanidm-ipa-sync | 2 +- examples/kanidm-ldap-sync | 2 +- examples/unixd.macos | 4 ++-- libs/crypto/src/lib.rs | 2 +- platform/debian/build_kanidm.sh | 2 +- platform/opensuse/kanidm-unixd.service | 2 +- proto/src/oauth2.rs | 2 +- proto/src/v1/mod.rs | 2 +- server/core/src/actors/internal.rs | 2 +- server/core/src/https/mod.rs | 4 ++-- server/core/src/https/views/login.rs | 2 +- server/lib/src/entry.rs | 4 ++-- server/lib/src/filter.rs | 4 ++-- server/lib/src/macros.rs | 2 +- server/lib/src/plugins/refint.rs | 4 ++-- server/lib/src/repl/consumer.rs | 4 ++-- server/lib/src/repl/ruv.rs | 4 ++-- server/lib/src/repl/supplier.rs | 4 ++-- server/lib/src/repl/tests.rs | 2 +- server/lib/src/schema.rs | 2 +- server/lib/src/server/access/delete.rs | 2 +- server/lib/src/server/access/mod.rs | 2 +- server/lib/src/server/access/modify.rs | 2 +- server/lib/src/server/access/search.rs | 4 ++-- server/lib/src/server/keys/internal.rs | 2 +- server/lib/src/server/keys/provider.rs | 2 +- server/lib/src/server/migrations.rs | 2 +- server/lib/src/server/mod.rs | 2 +- server/lib/src/server/recycle.rs | 2 +- server/lib/src/value.rs | 2 +- server/lib/src/valueset/image/jpg.rs | 2 +- server/lib/src/valueset/image/png.rs | 2 +- server/lib/src/valueset/session.rs | 2 +- server/testkit/tests/integration.rs | 2 +- tools/cli/src/opt/kanidm.rs | 2 +- tools/iam_migrations/freeipa/src/main.rs | 2 +- tools/orca/README.md | 2 +- tools/orca/src/populate.rs | 2 +- 72 files changed, 102 insertions(+), 102 deletions(-) diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 2a9e2a0c8..c7a760264 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -104,7 +104,7 @@ finish our production components and the stability of the API's for longer term - Minimum TLS key length enforcement on server code. - Improvements to exit code returns on CLI commands. - Credential reset link timeout issues resolved. -- Removed a lot of uses of `unwrap` and `expect` to improve reliabilty. +- Removed a lot of uses of `unwrap` and `expect` to improve reliability. - Account policy framework is now in place. ## 2023-05-01 - Kanidm 1.1.0-beta13 @@ -333,7 +333,7 @@ bring the project this far! 🎉 🦀 - Dynamic menus on CLI for auth factors when choices exist - Better handle missing resources for web ui elements at server startup - Add WAL checkpointing to improve disk usage -- Oauth2 user interface flows for simple authorisation scenarioes +- Oauth2 user interface flows for simple authorisation scenarios - Improve entry memory usage based on valueset rewrite - Allow online backups to be scheduled and taken - Reliability improvements for unixd components with missing sockets diff --git a/book/src/access_control/intro.md b/book/src/access_control/intro.md index 7bab02e67..411d587a0 100644 --- a/book/src/access_control/intro.md +++ b/book/src/access_control/intro.md @@ -20,7 +20,7 @@ possible harm that an attacker may make if they gain access to these roles. Kanidm supports [privilege access mode](../accounts/authentication_and_credentials.md) so that high-level permissions can be assigned to users who must reauthenticate before using those privileges. The privileges then are only accessible for a short period of time. This can allow you -to assign high level permissions to regular persions accounts rather than requiring separete +to assign high level permissions to regular person accounts rather than requiring separate privilege access accounts (PAA) or privileged access workstations (PAW). ## Assigning Permissions to Service Accounts diff --git a/book/src/accounts/intro.md b/book/src/accounts/intro.md index ad9f48e01..7dfaf8d82 100644 --- a/book/src/accounts/intro.md +++ b/book/src/accounts/intro.md @@ -61,7 +61,7 @@ within a short time window. However, these sessions always retain their _read_ privileges - meaning that they can still access and view high levels of data at any time without reauthentication. -In high risk environments you should still consider assigning seperate administration accounts to +In high risk environments you should still consider assigning separate administration accounts to users if this is considered a risk. ## Recovering the Initial Admin Accounts diff --git a/book/src/choosing_a_domain_name.md b/book/src/choosing_a_domain_name.md index 0dd1d7cc5..a7221b944 100644 --- a/book/src/choosing_a_domain_name.md +++ b/book/src/choosing_a_domain_name.md @@ -42,7 +42,7 @@ selected to be the parent (toplevel) domain (`example.com`). Failure to use a unique subdomain may allow cookies to leak to other entities within your domain, and may allow webauthn to be used on entities you did not intend for which may or may not lead to -some phishing scenarioes. +some phishing scenarios. ## Examples diff --git a/book/src/developers/designs/access_profiles_rework_2022.md b/book/src/developers/designs/access_profiles_rework_2022.md index 2c3344604..262a63a6e 100644 --- a/book/src/developers/designs/access_profiles_rework_2022.md +++ b/book/src/developers/designs/access_profiles_rework_2022.md @@ -44,7 +44,7 @@ members can write self" meaning that any member of that group can write to thems themself. In the future we could also create different target/receiver specifiers to allow other extended -management and delegation scenarioes. This improves the situation making things more flexible from +management and delegation scenarios. This improves the situation making things more flexible from the current filter system. It also may allow filters to be simplified to remove the SELF uuid resolve step in some cases. @@ -58,7 +58,7 @@ allowing us to move from filter based access controls to "group" targeted. A risk of filter based groups is "infinite churn" because of recursion. This can occur if you had a rule such a "and not memberof = self" on a dynamic group. Because of this, filters on dynamic groups may not use "memberof" unless they are internally provided by the kanidm project so that we can vet -these rules as correct and without creating infinite recursion scenarioes. +these rules as correct and without creating infinite recursion scenarios. ### Access rules extracted to ACI entries on targets diff --git a/book/src/developers/designs/account_policy.rst b/book/src/developers/designs/account_policy.rst index a8d1b32e2..09825a6e0 100644 --- a/book/src/developers/designs/account_policy.rst +++ b/book/src/developers/designs/account_policy.rst @@ -1,7 +1,7 @@ Account Policy and Lockouts --------------------------- -For accounts we need to be able to define securite constraints and limits to prevent malicious use +For accounts we need to be able to define security constraints and limits to prevent malicious use or attacks from succeeding. While these attacks may have similar sources or goals, the defences to them may vary. @@ -100,7 +100,7 @@ Hard Lock + Expiry/Active Time Limits It must be possible to expire an account so it no longer operates (IE temporary contractor) or accounts that can only operate after a known point in time (Student enrollments and their course -commencment date). +commencement date). This expiry must exist at the account level, but also on issued token/API password levels. This allows revocation of individual tokens, but also the expiry of the account and all tokens as a whole. This expiry may be @@ -120,7 +120,7 @@ Application Passwords / Issued Oauth Tokens =========================================== * Relates to claims -* Need their own expirys +* Need their own expiries * Need ratelimit as above? diff --git a/book/src/developers/designs/auth.md b/book/src/developers/designs/auth.md index e8863867f..56c2aaf45 100644 --- a/book/src/developers/designs/auth.md +++ b/book/src/developers/designs/auth.md @@ -160,7 +160,7 @@ client to be able to construct correct authorisations. cookie keys to prevent forgery of writable master cookies) - cookies can request tokens, tokens are signed cbor that contains the set of group uuids + names - derferenced so that a client can make all authorisation decisions from a single datapoint + dereferenced so that a client can make all authorisation decisions from a single datapoint - Groups require the ability to be ephemeral/temporary or permanent. @@ -252,7 +252,7 @@ what reqwest supports). For more consideration, see, ``` -To enable legacy cryptograhy (RSA PKCS1-5 SHA256): +To enable legacy cryptography (RSA PKCS1-5 SHA256): ```bash kanidm system oauth2 warning-enable-legacy-crypto diff --git a/book/src/integrations/sssd.md b/book/src/integrations/sssd.md index da88fbc78..53c6745e5 100644 --- a/book/src/integrations/sssd.md +++ b/book/src/integrations/sssd.md @@ -125,7 +125,7 @@ override_homedir = /home/%U ignore_group_members = True # Disable caching of credentials by SSSD. SSSD uses less secure local password storage -# mechanisims, and is a risk for credential disclosure. +# mechanisms, and is a risk for credential disclosure. # # ⚠️ NEVER CHANGE THIS VALUE ⚠️ cache_credentials = False diff --git a/book/src/security_hardening.md b/book/src/security_hardening.md index 875e50dc4..0abc9cae3 100644 --- a/book/src/security_hardening.md +++ b/book/src/security_hardening.md @@ -154,7 +154,7 @@ docker run --rm -i -t -u 1000:1000 -v kanidmd:/data kanidm/server:latest /sbin/k ## Minimum TLS key lengths -We enforce a minimum RSA and ECDSA key sizes. If your key is insufficently large, the server will +We enforce a minimum RSA and ECDSA key sizes. If your key is insufficiently large, the server will refuse to start and inform you of this. Currently accepted key sizes are minimum 2048 bit RSA and 224 bit ECDSA. diff --git a/book/src/server_configuration.md b/book/src/server_configuration.md index 63ec0857a..44c61ce92 100644 --- a/book/src/server_configuration.md +++ b/book/src/server_configuration.md @@ -90,7 +90,7 @@ text=However you choose to run your server, you should document and keep note of ### Default Admin Accounts Now that the server is running, you can initialise the default admin accounts. There are two -parallel admin accounts that have seperate functions. `admin` which manages Kanidm's configuration, +parallel admin accounts that have separate functions. `admin` which manages Kanidm's configuration, and `idm_admin` which manages accounts and groups in Kanidm. You should consider these as "break-glass" accounts. They exist to allow the server to be diff --git a/book/src/support.md b/book/src/support.md index 28517bb7b..edbbb0b8d 100644 --- a/book/src/support.md +++ b/book/src/support.md @@ -10,7 +10,7 @@ missing or if you have a question, please The version of this document found [on the project page](https://github.com/kanidm/kanidm/blob/master/book/src/support.md) is -considered authoritive and applies to all versions. +considered authoritative and applies to all versions. ## Release Schedule and Versioning @@ -87,7 +87,7 @@ before the servers release. ### API stability Kanidm has a number of APIs with different stability guarantees. APIs that are stable will only -recieve breaking changes in the case of an ethics, security or potential data corruption issue. +receive breaking changes in the case of an ethics, security or potential data corruption issue. Stable APIs are: @@ -123,7 +123,7 @@ All code changes will include full type-casting wherever possible. ### Project Discretion -In the event of an unforseen or extraordinary situation, the project team may make decisions +In the event of an unforeseen or extraordinary situation, the project team may make decisions contradictory to this document at their discretion. In these situation, the project team will make every effort to communicate the reason for the decision and will attempt to minimise disruption to users. diff --git a/book/src/sync/concepts.md b/book/src/sync/concepts.md index 16599be9e..6ee428bf4 100644 --- a/book/src/sync/concepts.md +++ b/book/src/sync/concepts.md @@ -94,7 +94,7 @@ By default Kanidm assumes that authority over synchronised entries is retained b This means that synchronised entries can not be written to in any capacity outside of a small number of internal Kanidm internal attributes. -An adminisrator may wish to allow synchronised entries to have some attributes written by the +An administrator may wish to allow synchronised entries to have some attributes written by the instance locally. An example is allowing passkeys to be created on Kanidm when the external synchronisation provider does not supply them. diff --git a/examples/kanidm-ipa-sync b/examples/kanidm-ipa-sync index b814f679b..8e122648f 100644 --- a/examples/kanidm-ipa-sync +++ b/examples/kanidm-ipa-sync @@ -26,7 +26,7 @@ ipa_sync_pw = "directory manager password" # The basedn to examine. ipa_sync_base_dn = "dc=ipa,dc=dev,dc=kanidm,dc=com" -# By default Kanidm seperates the primary account password and credentials from +# By default Kanidm separates the primary account password and credentials from # the unix credential. This allows the unix password to be isolated from the # account password so that compromise of one doesn't compromise the other. However # this can be surprising for new users during a migration. This boolean allows the diff --git a/examples/kanidm-ldap-sync b/examples/kanidm-ldap-sync index eaaea1d2b..a0a26720c 100644 --- a/examples/kanidm-ldap-sync +++ b/examples/kanidm-ldap-sync @@ -32,7 +32,7 @@ ldap_sync_base_dn = "dc=ldap,dc=dev,dc=kanidm,dc=com" ldap_filter = "(|(objectclass=person)(objectclass=posixgroup))" # ldap_filter = "(cn=\"my value\")" -# By default Kanidm seperates the primary account password and credentials from +# By default Kanidm separates the primary account password and credentials from # the unix credential. This allows the unix password to be isolated from the # account password so that compromise of one doesn't compromise the other. However # this can be surprising for new users during a migration. This boolean allows the diff --git a/examples/unixd.macos b/examples/unixd.macos index ebbacb04d..c5e2643ab 100644 --- a/examples/unixd.macos +++ b/examples/unixd.macos @@ -1,7 +1,7 @@ # this example configures kanidm-unixd for testing on macos db_path = "/tmp/kanidm-unixd" -sock_path = "/tmp/kanimd_unixd.sock" -task_sock_path = "/tmp/kanimd_unidx_task.sock" +sock_path = "/tmp/kanidm_unixd.sock" +task_sock_path = "/tmp/kanidm_unixd_task.sock" # some documentation is here: https://github.com/kanidm/kanidm/blob/master/book/src/pam_and_nsswitch.md pam_allowed_login_groups = ["posix_group"] # default_shell = "/bin/sh" diff --git a/libs/crypto/src/lib.rs b/libs/crypto/src/lib.rs index 6e7b8dc66..bf5605480 100644 --- a/libs/crypto/src/lib.rs +++ b/libs/crypto/src/lib.rs @@ -304,7 +304,7 @@ impl CryptoPolicy { // // We also need to balance this against the fact we are a database, and we do have // caches. We also don't want to over-use RAM, especially because in the worst case - // every thread will be operationg in argon2id at the same time. That means + // every thread will be operating in argon2id at the same time. That means // thread x ram will be used. If we had 8 threads at 64mb of ram, that would require // 512mb of ram alone just for hashing. This becomes worse as core counts scale, with // 24 core xeons easily reaching 1.5GB in these cases. diff --git a/platform/debian/build_kanidm.sh b/platform/debian/build_kanidm.sh index d04d25b20..8e0ae9d4c 100755 --- a/platform/debian/build_kanidm.sh +++ b/platform/debian/build_kanidm.sh @@ -67,7 +67,7 @@ if [ "$(which cargo | wc -l)" -eq 0 ]; then fi # this assumes the versions are in lock-step, which is fine at the moment. -# Debian is picky abour dashes in version strings, so a bit of conversion +# Debian is picky about dashes in version strings, so a bit of conversion # is needed for the first one to prevent interference. KANIDM_VERSION="$(grep -ioE 'version.*' Cargo.toml | head -n1 | awk '{print $NF}' | tr -d '"' | sed -e 's/-/~/')" diff --git a/platform/opensuse/kanidm-unixd.service b/platform/opensuse/kanidm-unixd.service index 03adb76fd..3ccb27b97 100644 --- a/platform/opensuse/kanidm-unixd.service +++ b/platform/opensuse/kanidm-unixd.service @@ -7,7 +7,7 @@ After=chronyd.service nscd.service ntpd.service network-online.target suspend.ta Before=systemd-user-sessions.service sshd.service nss-user-lookup.target Wants=nss-user-lookup.target # While it seems confusing, we need to be after nscd.service so that the -# Conflicts will triger and then automatically stop it. +# Conflicts will trigger and then automatically stop it. Conflicts=nscd.service [Service] diff --git a/proto/src/oauth2.rs b/proto/src/oauth2.rs index e5ba2711a..1215d2b0f 100644 --- a/proto/src/oauth2.rs +++ b/proto/src/oauth2.rs @@ -290,7 +290,7 @@ pub enum PkceAlg { #[serde(rename_all = "UPPERCASE")] /// Algorithms supported for token signatures. Prefers `ES256` pub enum IdTokenSignAlg { - // WE REFUSE TO SUPPORT NONE. DONT EVEN ASK. IT WON'T HAPPEN. + // WE REFUSE TO SUPPORT NONE. DON'T EVEN ASK. IT WON'T HAPPEN. ES256, RS256, } diff --git a/proto/src/v1/mod.rs b/proto/src/v1/mod.rs index 6a97f5c97..f30c694e2 100644 --- a/proto/src/v1/mod.rs +++ b/proto/src/v1/mod.rs @@ -41,7 +41,7 @@ impl Display for AccountType { // entry/ava/filter types. These related deeply to schema. /// The current purpose of a User Auth Token. It may be read-only, read-write -/// or privilige capable (able to step up to read-write after re-authentication). +/// or privilege capable (able to step up to read-write after re-authentication). #[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] #[serde(rename_all = "lowercase")] pub enum UatPurposeStatus { diff --git a/server/core/src/actors/internal.rs b/server/core/src/actors/internal.rs index 22fcd9cc5..40c18777f 100644 --- a/server/core/src/actors/internal.rs +++ b/server/core/src/actors/internal.rs @@ -128,7 +128,7 @@ impl QueryServerWriteV1 { .await; if retry { - // An error occured, retry each operation one at a time. + // An error occurred, retry each operation one at a time. for da in da_batch.iter() { let eventid = Uuid::new_v4(); let span = span!(Level::INFO, "process_delayed_action_retried", uuid = ?eventid); diff --git a/server/core/src/https/mod.rs b/server/core/src/https/mod.rs index bc60b1e60..f3f9b8da0 100644 --- a/server/core/src/https/mod.rs +++ b/server/core/src/https/mod.rs @@ -462,7 +462,7 @@ async fn server_loop( info!("Loading client certificates from {}", client_ca.display()); let verify = SslVerifyMode::PEER; - // In future we may add a "require mTLS option" which would necesitate this. + // In future we may add a "require mTLS option" which would necessitate this. // verify.insert(SslVerifyMode::FAIL_IF_NO_PEER_CERT); tls_builder.set_verify(verify); @@ -494,7 +494,7 @@ async fn server_loop( item.file_name() .to_str() // Hashed certs end in .0 - // Hsahed crls are .r0 + // Hashed crls are .r0 .map(|fname| fname.ends_with(".0")) .unwrap_or_default() }) { diff --git a/server/core/src/https/views/login.rs b/server/core/src/https/views/login.rs index 82c93a1ad..cf19c2178 100644 --- a/server/core/src/https/views/login.rs +++ b/server/core/src/https/views/login.rs @@ -635,7 +635,7 @@ async fn view_login_step( match issue { AuthIssueSession::Token => { error!( - "Impossible state, should not recieve token in a htmx view auth flow" + "Impossible state, should not receive token in a htmx view auth flow" ); return Err(OperationError::InvalidState); } diff --git a/server/lib/src/entry.rs b/server/lib/src/entry.rs index 1ccdea2b0..d18b28a3c 100644 --- a/server/lib/src/entry.rs +++ b/server/lib/src/entry.rs @@ -2972,10 +2972,10 @@ impl Entry { .unwrap_or(false) } - // Since EntryValid/Invalid is just about class adherenece, not Value correctness, we + // Since EntryValid/Invalid is just about class adherence, not Value correctness, we // can now apply filters to invalid entries - why? Because even if they aren't class // valid, we still have strict typing checks between the filter -> entry to guarantee - // they should be functional. We'll never match something that isn't syntactially valid. + // they should be functional. We'll never match something that isn't syntactically valid. #[inline(always)] #[instrument(level = "trace", name = "entry::entry_match_no_index", skip(self))] /// Test if the following filter applies to and matches this entry. diff --git a/server/lib/src/filter.rs b/server/lib/src/filter.rs index a106ff296..6c8390dfc 100644 --- a/server/lib/src/filter.rs +++ b/server/lib/src/filter.rs @@ -376,7 +376,7 @@ pub enum FilterPlan { /// /// This `Filter` validation state is in the `STATE` attribute and will be either `FilterInvalid` /// or `FilterValid`. The `Filter` must be checked by the schema to move to `FilterValid`. This -/// helps to prevent errors at compile time to assert `Filters` are secuerly. checked +/// helps to prevent errors at compile time to assert `Filters` are securely checked /// /// [`Entry`]: ../entry/struct.Entry.html #[derive(Clone, Hash, Ord, Eq, PartialOrd, PartialEq)] @@ -634,7 +634,7 @@ impl Filter { // // YOLO. // tl;dr - blindly accept that this filter and it's ava's MUST have - // been normalised and exist in schema. If they don't things may subtely + // been normalised and exist in schema. If they don't things may subtly // break, fail, or explode. As subtle as an explosion can be. Filter { state: FilterValid { diff --git a/server/lib/src/macros.rs b/server/lib/src/macros.rs index 2276de470..94c8764b3 100644 --- a/server/lib/src/macros.rs +++ b/server/lib/src/macros.rs @@ -520,7 +520,7 @@ macro_rules! vs_utf8 { #[allow(unused_macros)] #[macro_export] -/// Takes EntryClass objects and makes a VaueSetIutf8 +/// Takes EntryClass objects and makes a ValueSetIutf8 macro_rules! vs_iutf8 { () => ( compile_error!("ValueSetIutf8 needs at least 1 element") diff --git a/server/lib/src/plugins/refint.rs b/server/lib/src/plugins/refint.rs index e9027fac9..7ebfe165a 100644 --- a/server/lib/src/plugins/refint.rs +++ b/server/lib/src/plugins/refint.rs @@ -41,7 +41,7 @@ impl ReferentialIntegrity { // F_inc(lusion). All items of inner must be 1 or more, or the filter // will fail. This will return the union of the inclusion after the - // operationn. + // operation. let filt_in = filter!(f_inc(inner)); let b = qs.internal_exists(filt_in).map_err(|e| { admin_error!(err = ?e, "internal exists failure"); @@ -156,7 +156,7 @@ impl Plugin for ReferentialIntegrity { // Yes, this does mean we do more work to add/index/rollback in an error // condition, *but* it means we only have developed a single verification // so we can assert stronger trust in it's correct operation and interaction - // in complex scenarioes - It actually simplifies the check from "could + // in complex scenarios - It actually simplifies the check from "could // be in cand AND db" to simply "is it in the DB?". #[instrument(level = "debug", name = "refint_post_create", skip(qs, cand, _ce))] fn post_create( diff --git a/server/lib/src/repl/consumer.rs b/server/lib/src/repl/consumer.rs index e31608675..63265b431 100644 --- a/server/lib/src/repl/consumer.rs +++ b/server/lib/src/repl/consumer.rs @@ -185,7 +185,7 @@ impl<'a> QueryServerWriteTransaction<'a> { // let (cand, pre_cand): (Vec<_>, Vec<_>) = all_updates_valid .into_iter() - // We previously excluded this to avoid doing unnecesary work on entries that + // We previously excluded this to avoid doing unnecessary work on entries that // were moving to a conflict state, and the survivor was staying "as is" on this // node. However, this gets messy with dyngroups and memberof, where on a conflict // the memberships are deleted across the replication boundary. In these cases @@ -418,7 +418,7 @@ impl<'a> QueryServerWriteTransaction<'a> { // Reload the domain version, doing any needed migrations. // - // While it seems odd that we do the migrations after we recieve the entries, + // While it seems odd that we do the migrations after we receive the entries, // this is because the supplier will already be sending us everything that // was just migrated. As a result, we only need to apply the migrations to entries // that were not on the supplier, and therefore need updates here. diff --git a/server/lib/src/repl/ruv.rs b/server/lib/src/repl/ruv.rs index 6df87efb6..3b1194d37 100644 --- a/server/lib/src/repl/ruv.rs +++ b/server/lib/src/repl/ruv.rs @@ -617,7 +617,7 @@ impl<'a> ReplicationUpdateVectorWriteTransaction<'a> { // Since the ctx range comes from the supplier, when we rebuild due to the // state machine then some values may not exist since they were replaced // or updated. It's also possible that the imported range maximums *may not* - // exist especially in three way replication scenarioes where S1:A was the S1 + // exist especially in three way replication scenarios where S1:A was the S1 // maximum but is replaced by S2:B. This would make S1:A still it's valid // maximum but no entry reflects that in it's change state. let mut valid = true; @@ -874,7 +874,7 @@ impl<'a> ReplicationUpdateVectorWriteTransaction<'a> { to allow the comparison here to continue even if it's ruv is cleaned. Or, we need to have a delayed trim on the range that is 2x the normal trim range to give a buffer? - Mostly longer ruv/cid ranges aren't an issue for us, so could we just maek these ranges + Mostly longer ruv/cid ranges aren't an issue for us, so could we just make these ranges really large? NOTE: For now we do NOT trim out max CID's of any s_uuid so that we don't have to confront diff --git a/server/lib/src/repl/supplier.rs b/server/lib/src/repl/supplier.rs index 93fa3a675..5e9145cd7 100644 --- a/server/lib/src/repl/supplier.rs +++ b/server/lib/src/repl/supplier.rs @@ -64,7 +64,7 @@ impl<'a> QueryServerWriteTransaction<'a> { err })?; - // Can you process the keyhande? + // Can you process the keyhandle? let key_cert = match maybe_key_handle { Some(KeyHandle::X509Key { private, x509 }) => (private, x509), /* @@ -172,7 +172,7 @@ impl<'a> QueryServerReadTransaction<'a> { return Ok(ReplIncrementalContext::UnwillingToSupply); } RangeDiffStatus::NoRUVOverlap => { - error!("Replication Critical - Consumers RUV has desynchronsied and diverged! This must be immediately investigated!"); + error!("Replication Critical - Consumers RUV has desynchronised and diverged! This must be immediately investigated!"); debug!(consumer_ranges = ?ctx_ranges); debug!(supplier_ranges = ?our_ranges); return Ok(ReplIncrementalContext::UnwillingToSupply); diff --git a/server/lib/src/repl/tests.rs b/server/lib/src/repl/tests.rs index 907a48f9d..c8b1eac4e 100644 --- a/server/lib/src/repl/tests.rs +++ b/server/lib/src/repl/tests.rs @@ -98,7 +98,7 @@ fn repl_incremental( trace!(?b_ruv_range); // May need to be "is subset" for future when we are testing - // some more complex scenarioes. + // some more complex scenarios. let valid = match ReplicationUpdateVector::range_diff(&a_ruv_range, &b_ruv_range) { RangeDiffStatus::Ok(require) => require.is_empty(), _ => false, diff --git a/server/lib/src/schema.rs b/server/lib/src/schema.rs index 7560c1213..60fc4c29d 100644 --- a/server/lib/src/schema.rs +++ b/server/lib/src/schema.rs @@ -2585,7 +2585,7 @@ mod tests { ..Default::default() }; - // Since valueset now disallows such shenangians at a type level, this can't occur + // Since valueset now disallows such shenanigans at a type level, this can't occur /* let rvs = unsafe { valueset![ diff --git a/server/lib/src/server/access/delete.rs b/server/lib/src/server/access/delete.rs index 7342e019f..8b49a22b6 100644 --- a/server/lib/src/server/access/delete.rs +++ b/server/lib/src/server/access/delete.rs @@ -107,7 +107,7 @@ fn delete_filter_entry<'a>( return false; } } else { - // Can not satsify. + // Can not satisfy. return false; } } diff --git a/server/lib/src/server/access/mod.rs b/server/lib/src/server/access/mod.rs index 6dc7aea2a..4ce0846b9 100644 --- a/server/lib/src/server/access/mod.rs +++ b/server/lib/src/server/access/mod.rs @@ -2673,7 +2673,7 @@ mod tests { } #[test] - fn test_access_ouath2_dyn_search() { + fn test_access_oauth2_dyn_search() { sketching::test_init(); // Test that an account that is granted a scope to an oauth2 rs is granted // the ability to search that rs. diff --git a/server/lib/src/server/access/modify.rs b/server/lib/src/server/access/modify.rs index 0df426e7b..488fb73cc 100644 --- a/server/lib/src/server/access/modify.rs +++ b/server/lib/src/server/access/modify.rs @@ -100,7 +100,7 @@ pub(super) fn apply_modify_access<'a>( return None; } } else { - // Can not satsify. + // Can not satisfy. return None; } } diff --git a/server/lib/src/server/access/search.rs b/server/lib/src/server/access/search.rs index 47dc2373a..d61e95f1f 100644 --- a/server/lib/src/server/access/search.rs +++ b/server/lib/src/server/access/search.rs @@ -137,7 +137,7 @@ fn search_filter_entry<'a>( return None } } else { - // Can not satsify. + // Can not satisfy. return None } } @@ -240,7 +240,7 @@ fn search_sync_account_filter_entry<'a>( if sync_source_match { // We finally got here! - security_debug!(entry = ?entry.get_uuid(), ident = ?iuser.entry.get_uuid2rdn(), "ident is a synchronsied account from this sync account"); + security_debug!(entry = ?entry.get_uuid(), ident = ?iuser.entry.get_uuid2rdn(), "ident is a synchronised account from this sync account"); return AccessResult::Allow(btreeset!( Attribute::Class.as_ref(), diff --git a/server/lib/src/server/keys/internal.rs b/server/lib/src/server/keys/internal.rs index 1627cc47f..3aad14dc4 100644 --- a/server/lib/src/server/keys/internal.rs +++ b/server/lib/src/server/keys/internal.rs @@ -1165,7 +1165,7 @@ mod tests { // Scope to limit the key object } - // Will fail to be signed with the former key, since it is now revoked, and the ct preceeds + // Will fail to be signed with the former key, since it is now revoked, and the ct precedes // the validity of the new key { let key_object_loaded = write_txn diff --git a/server/lib/src/server/keys/provider.rs b/server/lib/src/server/keys/provider.rs index 57fc61165..a87aba1e2 100644 --- a/server/lib/src/server/keys/provider.rs +++ b/server/lib/src/server/keys/provider.rs @@ -324,7 +324,7 @@ mod tests { .internal_apply_domain_migration(DOMAIN_LEVEL_6) .expect("Unable to set domain level to version 6"); - // The internel key provider is created from dl 5 to 6 + // The internal key provider is created from dl 5 to 6 let key_provider_object = write_txn .internal_search_uuid(UUID_KEY_PROVIDER_INTERNAL) .expect("Unable to find key provider entry."); diff --git a/server/lib/src/server/migrations.rs b/server/lib/src/server/migrations.rs index 5392ae25e..73a2c0910 100644 --- a/server/lib/src/server/migrations.rs +++ b/server/lib/src/server/migrations.rs @@ -34,7 +34,7 @@ impl QueryServer { // Remember, that this would normally mean that it's possible for schema // to be mis-indexed (IE we index the new schemas here before we read // the schema to tell us what's indexed), but because we have the in - // mem schema that defines how schema is structuded, and this is all + // mem schema that defines how schema is structured, and this is all // marked "system", then we won't have an issue here. write_txn .initialise_schema_core() diff --git a/server/lib/src/server/mod.rs b/server/lib/src/server/mod.rs index be1828d32..996cd6103 100644 --- a/server/lib/src/server/mod.rs +++ b/server/lib/src/server/mod.rs @@ -1338,7 +1338,7 @@ impl QueryServer { } pub async fn read(&self) -> QueryServerReadTransaction<'_> { - // Get a read ticket. Basicly this forces us to queue with other readers, while preventing + // Get a read ticket. Basically this forces us to queue with other readers, while preventing // us from competing with writers on the db tickets. This tilts us to write prioritising // on db operations by always making sure a writer can get a db ticket. let read_ticket = if cfg!(test) { diff --git a/server/lib/src/server/recycle.rs b/server/lib/src/server/recycle.rs index edb978ff0..18ac5c7e1 100644 --- a/server/lib/src/server/recycle.rs +++ b/server/lib/src/server/recycle.rs @@ -676,7 +676,7 @@ mod tests { &["22b47373-d123-421f-859e-9ddd8ab14a2a"], ); - // Need a user in A -> B -> User, such that A/B are re-adde as MO + // Need a user in A -> B -> User, such that A/B are re-added as MO let u2 = create_user("u2", "5c19a4a2-b9f0-4429-b130-5782de5fddda"); let g2a = create_group( "g2a", diff --git a/server/lib/src/value.rs b/server/lib/src/value.rs index 6bf8300f5..4b4ffb1f9 100644 --- a/server/lib/src/value.rs +++ b/server/lib/src/value.rs @@ -2123,7 +2123,7 @@ impl Value { if UNICODE_CONTROL_RE.is_match(s) { error!("value contains invalid unicode control character",); // Trace only, could be an injection attack of some kind. - trace!(?s, "Invalid Uncode Control"); + trace!(?s, "Invalid Unicode Control"); false } else { true diff --git a/server/lib/src/valueset/image/jpg.rs b/server/lib/src/valueset/image/jpg.rs index e6196f4ba..63f14f746 100644 --- a/server/lib/src/valueset/image/jpg.rs +++ b/server/lib/src/valueset/image/jpg.rs @@ -115,7 +115,7 @@ fn test_jpg_has_trailer() { .expect("Failed to read file"); assert!(!has_trailer(&file_contents).expect("Failed to check for JPEG trailer")); - // checking a known bad imagee + // checking a known bad image let file_contents = std::fs::read(format!( "{}/src/valueset/image/test_images/windows11_3_cropped.jpg", env!("CARGO_MANIFEST_DIR") diff --git a/server/lib/src/valueset/image/png.rs b/server/lib/src/valueset/image/png.rs index f655c50eb..685399d53 100644 --- a/server/lib/src/valueset/image/png.rs +++ b/server/lib/src/valueset/image/png.rs @@ -5,7 +5,7 @@ static PNG_CHUNK_END: &[u8; 4] = b"IEND"; #[derive(Debug)] /// This is used as part of PNG validation to identify if we've seen the end of the file, and if it suffers from -/// Acropalypyse issues by having trailing data. +/// Acropalypse issues by having trailing data. enum PngChunkStatus { SeenEnd { has_trailer: bool }, MoreChunks, diff --git a/server/lib/src/valueset/session.rs b/server/lib/src/valueset/session.rs index 679c19bd8..f04a7b2bd 100644 --- a/server/lib/src/valueset/session.rs +++ b/server/lib/src/valueset/session.rs @@ -282,7 +282,7 @@ impl ValueSetT for ValueSetSession { // is replication safe since other replicas will also be performing // the same operation on merge, since we trim by session issuance order. - // This is a "slow path". This is becase we optimise session storage + // This is a "slow path". This is because we optimise session storage // based on fast session lookup, so now we need to actually create an // index based on time. We need to also clone here since we need to mutate // self.map which would violate mut/imut. diff --git a/server/testkit/tests/integration.rs b/server/testkit/tests/integration.rs index 0657290ad..29711927b 100644 --- a/server/testkit/tests/integration.rs +++ b/server/testkit/tests/integration.rs @@ -124,7 +124,7 @@ async fn test_webdriver_user_login(rsclient: kanidm_client::KanidmClient) { let username_form = handle_error!( c, c.form(Locator::Id("login")).await, - "Coudln't find login form" + "Couldn't find login form" ); handle_error!( c, diff --git a/tools/cli/src/opt/kanidm.rs b/tools/cli/src/opt/kanidm.rs index d159d3ff3..7b2de34e3 100644 --- a/tools/cli/src/opt/kanidm.rs +++ b/tools/cli/src/opt/kanidm.rs @@ -388,7 +388,7 @@ pub struct AccountNamedTagPkOpt { } #[derive(Debug, Args)] -/// Command-line options for account credental use-reset-token +/// Command-line options for account credential use-reset-token pub struct UseResetTokenOpt { #[clap(flatten)] copt: CommonOpt, diff --git a/tools/iam_migrations/freeipa/src/main.rs b/tools/iam_migrations/freeipa/src/main.rs index a73080a53..8209ac2f3 100644 --- a/tools/iam_migrations/freeipa/src/main.rs +++ b/tools/iam_migrations/freeipa/src/main.rs @@ -1001,7 +1001,7 @@ fn ipa_to_scim_entry( .into(), )) } else if oc.contains("ipatokentotp") { - // Skip for now, we don't supporty multiple totp yet. + // Skip for now, we don't support multiple totp yet. Ok(None) } else { debug!("Skipping entry {} with oc {:?}", dn, oc); diff --git a/tools/orca/README.md b/tools/orca/README.md index f2889dbc6..cadbecb8c 100644 --- a/tools/orca/README.md +++ b/tools/orca/README.md @@ -44,7 +44,7 @@ the parameters of the test you wish to perform. A statefile is the fully generated state of all entries that will be created and then used in the load test. The state file can be recreated from a profile and it's seed at anytime. The reason to -seperate these is that state files may get quite large, when what you really just need is the +separate these is that state files may get quite large, when what you really just need is the ability to recreate them when needed. This state file also contains all the details about accounts and entries so that during test diff --git a/tools/orca/src/populate.rs b/tools/orca/src/populate.rs index 51efc3066..0175e7a3e 100644 --- a/tools/orca/src/populate.rs +++ b/tools/orca/src/populate.rs @@ -40,7 +40,7 @@ async fn preflight_person( } } - // For each role we are part of, did we have other permissions required to fufil that? + // For each role we are part of, did we have other permissions required to fulfil that? for role in &person.roles { if let Some(need_groups) = role.requires_membership_to() { for group_name in need_groups {