diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml new file mode 100644 index 000000000..5fbb7ebe8 --- /dev/null +++ b/.github/workflows/codespell.yml @@ -0,0 +1,27 @@ +--- +name: Spell Check + +"on": + push: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true +jobs: + codespell: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + clean: false + + - name: Install python 3.10 + uses: actions/setup-python@v4 + with: + python-version: "3.10" + + - name: Install and run codespell + run: | + python -m pip install codespell + make codespell diff --git a/.github/workflows/kanidm_book.yml b/.github/workflows/kanidm_book.yml index 63e22dcf0..aea0215b4 100644 --- a/.github/workflows/kanidm_book.yml +++ b/.github/workflows/kanidm_book.yml @@ -25,7 +25,8 @@ jobs: libpam0g-dev - name: Setup deno - uses: denoland/setup-deno@v1 # Documentation: https://github.com/denoland/setup-deno + # Documentation: https://github.com/denoland/setup-deno + uses: denoland/setup-deno@v1 with: deno-version: v1.x @@ -56,6 +57,7 @@ jobs: uses: actions/setup-python@v4 with: python-version: "3.10" + - name: pykanidm docs run: | python -m pip install poetry diff --git a/FAQ.md b/FAQ.md index 566c25815..e36eed102 100644 --- a/FAQ.md +++ b/FAQ.md @@ -11,7 +11,7 @@ projects can come in different forms so I'll answer to a few of them: If it's not in Rust, it's not ellegible for inclusion. There is a single exception today (rlm python) but it's very likely this will also be removed in the future. Keeping a single language -helps with testing, but also makes the project more accesible and consistent to developers. +helps with testing, but also makes the project more accessible and consistent to developers. Additionally, features exist in Rust that help to improve quality of the project from development to production. @@ -40,7 +40,7 @@ communicating to a real server. Many developer choices have already been made to is the most important aspect of the project to ensure that every feature is high quality and reliable. -Additon of extra projects or dependencies, would violate this principle and lead to a situation +Addition of extra projects or dependencies, would violate this principle and lead to a situation where it would not be possible to effectively test for all developers. ## Why don't you use Raft/Etcd/MongoDB/Other to solve replication? @@ -54,11 +54,11 @@ CAP theorem states that in a database you must choose only two of the three poss - Consistency - All servers in a topology see the same data at all times - Availability - All servers in a a topology can accept write operations at all times -- Partitioning - In the case of a network seperation in the topology, all systems can continue to +- Partitioning - In the case of a network separation in the topology, all systems can continue to process read operations Many protocols like Raft or Etcd are databases that provide PC guarantees. They guarantee that they -are always consistent, and can always be read in the face of patitioning, but to accept a write, +are always consistent, and can always be read in the face of partitioning, but to accept a write, they must not be experiencing a partitioning event. Generally this is achieved by the fact that these systems elect a single node to process all operations, and then re-elect a new node in the case of partitioning events. The elections will fail if a quorum is not met disallowing writes @@ -77,12 +77,12 @@ _without_ communication between the nodes. ## Update Resolutionn Many databases do exist that are PA, such as CouchDB or MongoDB. However, they often do not have the -properties required in update resoultion that is required for Kanidm. +properties required in update resolution that is required for Kanidm. An example of this is that CouchDB uses object-level resolution. This means that if two servers update the same entry the "latest write wins". An example of where this won't work for Kanidm is if one server locks the account as an admin is revoking the access of an account, but another account -updates the username. If the username update happenned second, the lock event would be lost creating +updates the username. If the username update happened second, the lock event would be lost creating a security risk. There are certainly cases where this resolution method is valid, but Kanidm is not one. diff --git a/Makefile b/Makefile index 52a1039a1..2f7256d6e 100644 --- a/Makefile +++ b/Makefile @@ -117,6 +117,15 @@ prep: cargo outdated -R cargo audit +.PHONY: codespell +codespell: + codespell -c \ + -L crate,unexpect,Pres,pres,ACI,aci,te,ue \ + --skip='./target,./pykanidm/.venv,./pykanidm/.mypy_cache,./.mypy_cache' \ + --skip='./docs/*,./.git' \ + --skip='./kanidmd_web_ui/src/external,./kanidmd_web_ui/pkg/external' \ + --skip='./kanidmd/lib/src/constants/system_config.rs,./pykanidm/site,./kanidmd/lib/src/constants/*.json' + .PHONY: test/pykanidm/pytest test/pykanidm/pytest: cd pykanidm && \ @@ -142,7 +151,8 @@ test/pykanidm: test/pykanidm/pytest test/pykanidm/mypy test/pykanidm/pylint .PHONY: test/doc/format test/doc/format: ## Format docs and the Kanidm book - find . -type f -name \*.md -exec deno fmt --check $(MARKDOWN_FORMAT_ARGS) "{}" + + find . -type f -not -path './target/*' -name \*.md \ + -exec deno fmt --check $(MARKDOWN_FORMAT_ARGS) "{}" + ######################################################################## diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 190ddd1a0..3aa00453e 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -45,7 +45,7 @@ proxy. You should be ready for this change when you upgrade to the latest versio - Components for account permission elevation modes - Make pam\_unix more robust in high latency environments - Add proc macros for test cases -- Improve authentication requests with cookie/token seperation +- Improve authentication requests with cookie/token separation - Cleanup of expired authentication sessions - Improved administration of password badlists @@ -194,7 +194,7 @@ for a future supported release. - Rate limiting and softlocking of account credentials to prevent bruteforcing. - Foundations of webauthn and multiple credential support. - Rewrite of json authentication protocol components. -- Unixd will cache "non-existant" items to improve nss/pam latency. +- Unixd will cache "non-existent" items to improve nss/pam latency. ## 2020-10-01 - Kanidm 1.1.0-alpha2 diff --git a/designs/account_policy.rst b/designs/account_policy.rst index d86746c80..d7e4d3b19 100644 --- a/designs/account_policy.rst +++ b/designs/account_policy.rst @@ -76,9 +76,9 @@ For accounts with password-only: * After 5 incorrect attempts the account is rate limited by an increasing time window within the API. This limit delays the response to the auth (regardless of success) * After X attempts, the account is soft locked on the affected server only for a time window of Y increasing up to Z. -* If the attempts continue, the account is hard locked and signalled to an external system that this has occured. +* If the attempts continue, the account is hard locked and signalled to an external system that this has occurred. -The value of X should be less than 100, so that the NIST guidelines can be met. This is beacuse when there are +The value of X should be less than 100, so that the NIST guidelines can be met. This is because when there are many replicas, each replica maintains its own locking state, so "eventually" as each replica is attempted to be bruteforced, then they will all eventually soft lock the account. In larger environments, we require external signalling to coordinate the locking of the account. diff --git a/designs/architecture.md b/designs/architecture.md index 541e08bbc..e8d23e451 100644 --- a/designs/architecture.md +++ b/designs/architecture.md @@ -23,7 +23,7 @@ abstraction over the REST API. The `kanidm` proto is a set of structures that are used by the REST and raw API's for HTTP communication. These are intended to be a reference implementation of the on-the-wire protocol, but importantly these are also how the server represents its communication. This makes this the -authorative source of protocol layouts with regard to REST or raw communication. +authoritative source of protocol layouts with regard to REST or raw communication. ## Kanidmd (main server) @@ -55,8 +55,8 @@ it is checked by the schema to ensure that the request is valid and can be satis As these workers are in a thread pool, it's important that these are concurrent and do not lock or block - this concurrency is key to high performance and safety. It's also worth noting that this is -the level where read transactions are created and commited - all operations are transactionally -proctected from an early stage to guarantee consistency of the operations. +the level where read transactions are created and committed - all operations are transactionally +protected from an early stage to guarantee consistency of the operations. 3. When the event is known to be consistent, it is then handed to the queryserver - the query server begins a process of steps on the event to apply it and determine the results for the request. @@ -65,7 +65,7 @@ proctected from an early stage to guarantee consistency of the operations. 4. The backend takes the request and begins the low-level processing to actually determine a candidate set. The first step in query optimisation, to ensure we apply the query in the most - effecient manner. Once optimised, we then use the query to query indexes and create a potential + efficient manner. Once optimised, we then use the query to query indexes and create a potential candidate set of identifiers for matching entries (5.). Once we have this candidate id set, we then retrieve the relevant entries as our result candidate set (6.) and return them (7.) to the backend. @@ -76,8 +76,8 @@ proctected from an early stage to guarantee consistency of the operations. 6. The query server now applies access controls over what you can / can't see. This happens in two phases. The first is to determine "which candidate entries you have the rights to query and view" - and the second is to determine "which attributes of each entry you have the right to percieve". - This seperation exists so that other parts of the server can _impersonate_ users and conduct + and the second is to determine "which attributes of each entry you have the right to perceive". + This separation exists so that other parts of the server can _impersonate_ users and conduct searches on their behalf, but still internally operate on the full entry without access controls limiting their scope of attributes we can view. @@ -99,7 +99,7 @@ generated into messages. These messages are sent to a single write worker. There write worker due to the use of copy-on-write structures in the server, limiting us to a single writer, but allowing search transaction to proceed without blocking in parallel. -(3) From the worker, the relevent event is created. This may be a "Create", "Modify" or "Delete" +(3) From the worker, the relevant event is created. This may be a "Create", "Modify" or "Delete" event. The query server handles these slightly differently. In the create path, we take the set of entries you wish to create as our candidate set. In modify or delete, we perform an impersonation search, and use the set of entries within your read bounds to generate the candidate set. This diff --git a/designs/auth.rst b/designs/auth.rst index f259c41ca..41a20c44a 100644 --- a/designs/auth.rst +++ b/designs/auth.rst @@ -89,10 +89,10 @@ for this specific host. _W: Probably the main one is if a group/permission is granted always or ephemerally on the session. But that's per group/permission. I want to limit the amount of configuration policy here, because there are lots of ways that over configuration can create -too many scenarios to effective audit and test. +too many scenarios to effective audit and test. So the permissions would probably come down to something like "always", "request", and "request-approve", where always is you always have that, request means you have to re-auth then the permission lasts for X time, and request-approve -would mean you have to request, reauth, then someone else signs off on the approval to grant. +would mean you have to request, reauth, then someone else signs off on the approval to grant. SSH via a bastion host ====================== @@ -125,7 +125,7 @@ Implementation ideas for use cases * For identification: * Issue "ID tokens" as an api where you lookup name/uuid and get the userentry + sshkeys + group - entries. This allows one-shot caching of relevent types, and groups would not store the member + entries. This allows one-shot caching of relevant types, and groups would not store the member link on the client. Allows the client to "cache" any extra details into the stored record as required. This would be used for linux/mac to get uid/gid details and ssh keys for distribution. * Would inherit search permissions for connection. @@ -172,7 +172,7 @@ that have unique cookie keys to prevent forgery of writable master cookies) of group uuids + names derferenced so that a client can make all authorisation decisions from a single datapoint -* Groups require the ability to be ephemeral/temporary or permament. +* Groups require the ability to be ephemeral/temporary or permanent. * each token can be unique based on the type of auth (ie 2fa needed to get access to admin groups) @@ -180,7 +180,7 @@ to admin groups) Cookie/Token Auth Considerations -------------------------------- -* Must prevent replay attacks from occuring at any point during the authentication process +* Must prevent replay attacks from occurring at any point during the authentication process * Minimise (but not eliminate) state on the server. This means that an auth process must remain on a single server, but the token granted should be valid on any server. @@ -243,10 +243,10 @@ struct AuthClientStep { Vec } -The server verifies the credential, and marks that type of credential as failed or fufilled. +The server verifies the credential, and marks that type of credential as failed or fulfilled. On failure of a credential, AuthDenied is immediately sent. On success of a credential the server can issue AuthSuccess or AuthResponse with new possible challenges. For example, -consider we initiall send "password". The client provides the password. The server follows +consider we initially send "password". The client provides the password. The server follows by "totp" as the next type. The client fails the totp, and is denied. If the response is AuthSuccess, an auth token is issued. The auth token is a bearer token @@ -281,7 +281,7 @@ We have metadata on each groups generate memberOf (based on group info itself). says what "strength and type" of authentication is required. The auth request would ask for password, then when password is provided (and correct), it then requests totp OR finalise. If you take finalise, you get authSuccess but the issued token -only has the group "low". +only has the group "low". If you take totp, then finalise, you get authSuccess and the group low *and* high. @@ -289,11 +289,11 @@ Method Two ========== Groups define if they are "always issued" or "requestable". All group types define -requirements to be fufilled for the request such as auth strength, connection +requirements to be fulfilled for the request such as auth strength, connection type, auth location etc. In the AuthRequest if you specific no groups, you do the 'minimum' auth required by -the set of your "always" groups. +the set of your "always" groups. If you do AuthRequest and you request "high", this is now extended into the set of your minimum auth required, which causes potentially more auth steps. However @@ -380,8 +380,8 @@ the TLS tunnel? More Brain Dumping ================== -- need a way to just pw check even if mfa is on (for sudo). Perhaps have a seperate sudo password attr? -- ntpassword attr is seperate +- need a way to just pw check even if mfa is on (for sudo). Perhaps have a separate sudo password attr? +- ntpassword attr is separate - a way to check application pw which attaches certain rights (is this just a generalisation of sudo?) - the provided token (bearer etc?) contains the "memberof" for the session. - How to determine what memberof an api provides? Could be policy object that says "api pw of name X @@ -395,7 +395,7 @@ More Brain Dumping - That would make userPassword and webauthn only for webui and api direct access. - All other pw validations would use application pw case. - SSH would just read ssh key - should this have a similar group filter/allow - mechanism like aplication pw? + mechanism like application pw? - Groups take a "type" - credentials also have a "type" @@ -405,7 +405,7 @@ More Brain Dumping - Means a change to auth to take an entry as part of auth, or at least, it's group list for the - session. + session. - policy to define if pw types like sudo or radius are linked. diff --git a/designs/auth_proto_rewrite_late_2020.rst b/designs/auth_proto_rewrite_late_2020.rst index 4c4ee2ede..95b0d387c 100644 --- a/designs/auth_proto_rewrite_late_2020.rst +++ b/designs/auth_proto_rewrite_late_2020.rst @@ -52,7 +52,7 @@ change. Currently Credentials can have *any* combination of factors. -This should be changed to reperesent the valid set of factors. +This should be changed to represent the valid set of factors. * Password (only) * GeneratedPassword diff --git a/designs/credential-display.rst b/designs/credential-display.rst index 50f058d46..95fba5fa6 100644 --- a/designs/credential-display.rst +++ b/designs/credential-display.rst @@ -47,7 +47,7 @@ perform the correct transforms over the credential types to prevent data leaks. The ability to view credentials is bound by the standard search access control rules. The API would return a list of credential details, which is an enum of the possible classes supported -by the server. This ensures during addition of new credetial types or changes we update these protocol +by the server. This ensures during addition of new credential types or changes we update these protocol types. This also helps to support future webui elements for credentials. diff --git a/designs/credential-update.rst b/designs/credential-update.rst index db9a365c9..dbaddaad2 100644 --- a/designs/credential-update.rst +++ b/designs/credential-update.rst @@ -45,10 +45,10 @@ If the access exists, a intent token is created into a link which can be provide Exchange of this intent token, creates the time limited credential update session token. -This allows the intent token to have a seperate time window, to the credential update session token. +This allows the intent token to have a separate time window, to the credential update session token. If the intent token creates a credential update session, and the credential update session is *not* -commited, it can be re-started by the intent token. +committed, it can be re-started by the intent token. If the credential update session has been committed, then the intent token can NOT create new credential update sessions (it is once-use). @@ -103,10 +103,10 @@ As a result, the built set of changes *is* persisted on the server in the creden as the user interacts with and builds the set of changes. This allows the server to enforce that the update session *must* represent a valid and complete set of compliant credentials before commit. -The user may cancel the session at anytime, discarding any set of changes they had inflight. This allows +The user may cancel the session at anytime, discarding any set of changes they had in-flight. This allows another session to now begin. -If the user chooses to commit the changes, the server will assemble the changes into a modification +If the user chooses to commit the changes, the server will assemble the changes into a modification and apply it. The write is applied with server internal permissions - since we checked the permissions during the create of the update session we can trust that the origin of this update has been validated. Additionally since this is not an arbitrary write interface, this constrains potential risk. @@ -140,7 +140,7 @@ so that the server that receives the token can enforce the credential adheres to If the client successfully enrolls, a new entry for the enrollment is created in the database. This allows replication of the new credential to occur. -The main session of the credential update can then check for the existance of this stub uuid in the +The main session of the credential update can then check for the existence of this stub uuid in the db and wait for it to replicate in. This can be checked by the "polling" action. When it has been replicated in, and polling has found the credential, the credentials are added to the session. The credential diff --git a/designs/device-authentication.rst b/designs/device-authentication.rst index 6c1daed08..1c3285242 100644 --- a/designs/device-authentication.rst +++ b/designs/device-authentication.rst @@ -7,7 +7,7 @@ devices vary from desktops, laptops, tablets, mobile phones and more. Each of th different security and trust levels, as well as a variety of input methods. Historically authentication providers have *not* factored in multiple device classes to -authentication leading to processes that are inconvinent to insecure for humans to handle when they +authentication leading to processes that are inconvenient to insecure for humans to handle when they want to use their account between devices. Example of a Bad Workflow @@ -51,7 +51,7 @@ Roaming vs Platform Authenticators ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ In our example our laptop and phone both have platform authenticators, which are security devices -bound to the platform (they are inseperable). Rather than using a platform authenticator we *may* +bound to the platform (they are inseparable). Rather than using a platform authenticator we *may* allow a roaming authenticator to be used to bootstrap the phone's platform authenticator. An example of a roaming authenticator is a yubikey, which can be plugged into the laptop, and then disconnected and connected to the phone. This changes the steps of the process to be. diff --git a/designs/downgrade.rst b/designs/downgrade.rst index b6dbd0844..3f1628515 100644 --- a/designs/downgrade.rst +++ b/designs/downgrade.rst @@ -27,7 +27,7 @@ rather than events that are fully resolved. This way within the changelog trim window, a server can be downgraded, and it's RUV move backwards, but the missing updates will be "replayed" backwards to it. Second, it means we have to consider making replication either version (typed) -data agnostic *or* have CSN's reperesent a dataset version from the server which gates or blocks replication events from newer to older instances until *they* are upgraded. +data agnostic *or* have CSN's represent a dataset version from the server which gates or blocks replication events from newer to older instances until *they* are upgraded. Having the version gate does have a good benefit. Imagine we have three servers A, B, C. We upgrade A and B, and they migrate UTF8STRING to XDATA. Server C has @@ -37,8 +37,8 @@ This means that *all changes* from A and B post upgrade will NOT be sent to C. C may accept changes and will continue to provide them to A and B (provided all other update resolution steps uphold). If we now revert B, the changes from A will not flow to B which has been downgraded, but C's changes that were accepted WILL -continue to be acceptted by B. Similar with A. This means in a downgrade scenario -that any data writen on upgraded nodes that are downgraded will be lost, but +continue to be accepted by B. Similar with A. This means in a downgrade scenario +that any data written on upgraded nodes that are downgraded will be lost, but that all replication as a whole will still be valid. This is good! It does mean we need to consider that we have to upgrade data as it comes in from diff --git a/designs/idm_rest_layout.rst b/designs/idm_rest_layout.rst index 65f3bad34..59dcd5de7 100644 --- a/designs/idm_rest_layout.rst +++ b/designs/idm_rest_layout.rst @@ -136,7 +136,7 @@ account GET -> list the credentials DELETE -> /v1/account/{id}/_credential/{id}/_lock - POST -> lock this credential until time (or null for permament) + POST -> lock this credential until time (or null for permanent) DELETE -> unlock this account /v1/account/{id}/_radius GET -> get the accounts radius credentials diff --git a/designs/indexing.md b/designs/indexing.md index 4dfcae530..22d03027e 100644 --- a/designs/indexing.md +++ b/designs/indexing.md @@ -5,7 +5,7 @@ search term (filter) faster. ## World without indexing -Almost all databases are built ontop of a key-value storage engine of some nature. In our case we +Almost all databases are built on top of a key-value storage engine of some nature. In our case we are using (feb 2019) sqlite and hopefully SLED in the future. So our entries that contain sets of avas, these are serialised into a byte format (feb 2019, json @@ -98,7 +98,7 @@ containing 250,000 ids. Even with idl compression, this is still a lot of data! There tend to be two types of searches against a directory like Kanidm. - Broad searches -- Targetted single entry searches +- Targeted single entry searches For broad searches, filter optimising does little - we just have to load those large idls, and use them. (Yes, loading the large idl and using it is still better than full table scan though!) @@ -141,13 +141,13 @@ We load the single idl value for name, and then as we are below the test-thresho and apply the filter to entry ID 1 - yielding a match or no match. Notice in the second, by promoting the "smaller" idl, we were able to save the work of the idl load -and intersection as our first equality of "name" was more targetted? +and intersection as our first equality of "name" was more targeted? Filter optimisation is about re-arranging these filters in the server using our insight to data to provide faster searches and avoid indexes that are costly unless they are needed. In this case, we would _demote_ any filter where Eq(class, ...) to the _end_ of the And, because it -is highly likely to be less targetted than the other Eq types. Another example would be promotion of +is highly likely to be less targeted than the other Eq types. Another example would be promotion of Eq filters to the front of an And over a Sub term, wherh Sub indexes tend to be larger and have longer IDLs. @@ -182,7 +182,7 @@ the tables as: They will be structured as string, string for both - where the uuid and name column matches the correct direction, and is the primary key. We could use a single table, but if we change to sled we -need to split this, so we pre-empt this change and duplicate the data here. +need to split this, so we preempt this change and duplicate the data here. # Indexing States diff --git a/designs/kanidm-trust.rst b/designs/kanidm-trust.rst index 425a5a8f6..e5a1c51e0 100644 --- a/designs/kanidm-trust.rst +++ b/designs/kanidm-trust.rst @@ -1,7 +1,7 @@ Trust Design and Thoughts ------------------------- -Trust is a process where users and groups of a seperate kanidm instance may be granted access +Trust is a process where users and groups of a separate kanidm instance may be granted access to resources through this system. Trust is a one way concept, but of course, could be implemented twice in each direction to achieve bidirectional trust. @@ -9,9 +9,9 @@ Why? ---- There are a number of reasons why a trust configuration may be desired. You may have -a seperate business to customer instance, where business users should be able to authenticate +a separate business to customer instance, where business users should be able to authenticate to customer resources, but not the inverse. You may have two businesses merge or cooperate and -require resource sharing. It allows seperation of high value credentials onto different infrastructure. +require resource sharing. It allows separation of high value credentials onto different infrastructure. You could also potentially use trust as a method of sync between between a different IDM project and this. @@ -50,7 +50,7 @@ There are different ways we can scope a trust out, each with pros-cons. Here are a whitelist. * Fractional Replication - similar to the GC in AD, replicate in a subset of your data, but then ask for redirects or other information. This is used with 389 and RO servers where you may only - replicate a subset of accounts to branch offices or a seperate backend. + replicate a subset of accounts to branch offices or a separate backend. Each of these has pros and cons, good bad, and different models. They each achieve different things. For example, the Kerberos style trust creates silos where the accounts credential material is stored (in the home @@ -84,7 +84,7 @@ So with a lot of though, I'm going to go with fractional replication. * Forwarding - I don't want credentials to be forwarded, or sso to be forwarded. * Cred Silo - I want this because it means you have defined boundaries of where security material is stored by who. * PII limit - I want this as you can control who-has-what PII on the system side. -* Group Mgmt - I want this as it enables rbac and familar group management locally for remote and local entries. +* Group Mgmt - I want this as it enables rbac and familiar group management locally for remote and local entries. * Invite Ext - On the fence - cool idea, but not sure how it fits into kanidm with trusts. * Distributed - I don't want this because it's model is really different to what kani is trying to be * Client Switched - I don't want this because clients should only know they trust an IDM silo, and that does the rest. @@ -113,7 +113,7 @@ With the fractional case in mind, this means we have sets of use cases that exis * RADIUS authentication to a different network infra in the trusting domain (but the Radius creds are local to the site) * Limiting presence of credentials in cloud (but making public key credentials avail) * Limiting distribution of personal information to untrusted sites -* Creating administration domains or other business hierachies that may exist in some complex scenarios +* Creating administration domains or other business hierarchies that may exist in some complex scenarios We need to consider how to support these use cases of course :) @@ -196,7 +196,7 @@ if multiple urls exist in the trustanchor, we should choose randomly which to co authentications. If a URL is not available, we move to the next URL (failover) We could consider in-memory caching these values, but then we have to consider the cache expiry -and management of this data. Additionally types like TOTP aren't cachable. I think we should +and management of this data. Additionally types like TOTP aren't cacheable. I think we should avoid caching in these cases. Auth Scenarios @@ -257,7 +257,7 @@ Excluding items from Domain B from replicating back In a situation where domain A trusts B, and inverse B trusts A, then A will contain trust stubs to entries in B. -Due to the use of spn's we can replicate only our entries for domain to the trust reciever. +Due to the use of spn's we can replicate only our entries for domain to the trust receiver. :: @@ -280,7 +280,7 @@ How do we get the domain at setup time for spn? We already require domain for we we write this into the system_info? This means we need to determine a difference between a localgroup and a group that will -be synced for trust. This may require a seperate class or label? +be synced for trust. This may require a separate class or label? We need to make name -> SPN on groups/accounts that can be sent across a trust boundary. @@ -304,7 +304,7 @@ is a requirement for replication anyway, and SID regeneration is not a complex t unlikely that we would ever see duplicates anyway as this is a 32bit field. An alternate option is to have the stub objects generate ids, but to have a trusted_uuid field -that is used for replication checking, and a seperate CSN for trust replication. +that is used for replication checking, and a separate CSN for trust replication. Webauthn diff --git a/designs/ldap_gateway.rst b/designs/ldap_gateway.rst index 0cd0a5f8e..e9643d817 100644 --- a/designs/ldap_gateway.rst +++ b/designs/ldap_gateway.rst @@ -90,7 +90,7 @@ beyond the attribute name: We will accept (and prefer) that Kanidm attribute names are provided in the LDAP filter for applications that can be customised. -Compatability Attributes +Compatibility Attributes ======================== Some attributes exist in LDAP that have no direct equivalent in Kanidm. These are often from existing @@ -101,7 +101,7 @@ two are: * EntryUUID These should be provided through an ldapCompat class in kanidm, and require no other transformation. They -may require generation from the server, as legacy applications expect their existance and kanidm created +may require generation from the server, as legacy applications expect their existence and kanidm created accounts would need the attributes to exist to work with these. Entry and Attribute Transformations diff --git a/designs/logging.rst b/designs/logging.rst index 6945594a0..8e51cc80b 100644 --- a/designs/logging.rst +++ b/designs/logging.rst @@ -69,7 +69,7 @@ This leads to the following log categories: * The unique event ID is provided in any operation success or failure. * Security (aka audit) * Filtering of security sensitive attributes (via debug/display features) - * Display of sufficent information to establish a security picture of connected actions via the user's uuid/session id. + * Display of sufficient information to establish a security picture of connected actions via the user's uuid/session id. * Tracking of who-changed-what-when-why * Replication * TODO @@ -78,7 +78,7 @@ It can be seen pretty quickly that multiple message types are useful across cate example, the unique event id for all messages, how hard errors affect operation errors or how an operation error can come from a security denial. -Logging must also remain a seperate thread and async for performance. +Logging must also remain a separate thread and async for performance. This means that the best way to declare these logs is a unified log which can be filtered based on the admins or consumers needs. diff --git a/designs/memberof.rst b/designs/memberof.rst index 917be8fbe..5e75d39f1 100644 --- a/designs/memberof.rst +++ b/designs/memberof.rst @@ -42,7 +42,7 @@ where the inverse look up becomes N operations to resolve the full structure. Design ------ -Due to the nature of this plugin, there is a single attribute - 'member' - whos content is examined +Due to the nature of this plugin, there is a single attribute - 'member' - whose content is examined to build the relationship to others - 'memberOf'. We will examine a single group and user situation without nesting. We assume the user already exists, as the situation where the group exists and we add the user can't occur due to refint. diff --git a/designs/mfa-device-enrollment-process.rst b/designs/mfa-device-enrollment-process.rst index 3b246418a..ea8b8fa7b 100644 --- a/designs/mfa-device-enrollment-process.rst +++ b/designs/mfa-device-enrollment-process.rst @@ -16,9 +16,9 @@ Situation We have a user with a device E(nrolled), and a device N(ew) that they wish to be able to use. -Each device contains a unique webauthn device that is inseperable from the device. +Each device contains a unique webauthn device that is inseparable from the device. -Each device may be connected to a seperate Kanidm instance - IE we can not assume that +Each device may be connected to a separate Kanidm instance - IE we can not assume that the data in the system may be point-in-time consistent due to replication as an asynchronous process. @@ -91,7 +91,7 @@ Device N may have to wait for replication back for the WebauthnCredential to app Possible Changes ================ -Do not require the approval step, as an OTP has already been provided, which is evidence of possesion -of an account which has sufficent permissions. +Do not require the approval step, as an OTP has already been provided, which is evidence of possession +of an account which has sufficient permissions. diff --git a/designs/mfa_backup_code.rst b/designs/mfa_backup_code.rst index 5a8243ed7..2607f9194 100644 --- a/designs/mfa_backup_code.rst +++ b/designs/mfa_backup_code.rst @@ -35,13 +35,13 @@ A user can generate one batch of Backup codes at a time, which will be saved in - Each code in the batch can be used only once - If the user generates a new batch of Backup codes, the last batch of codes become invalid. -To invalidate the Backup code after usage, we should send an async message to the async task queue. Since the auth process is currently in a single transaction lock that is NOT writeable, using an async action allows the auth process to proceed in parallel and prevent opening nested transactions. **Note** that there is a small window between "use of the backup code" and the async action being processed to actually cause the invalidation, but this window is short enough that it's an acceptable compromise. +To invalidate the Backup code after usage, we should send an async message to the async task queue. Since the auth process is currently in a single transaction lock that is NOT writeable, using an async action allows the auth process to proceed in parallel and prevent opening nested transactions. **Note** that there is a small window between "use of the backup code" and the async action being processed to actually cause the invalidation, but this window is short enough that it's an acceptable compromise. To prevent attackers from bruteforcing these Backup code at a high rate, we need a rate limiting mechanism similar to what exists for passwords: * After 5 incorrect attempts the account is rate limited by an increasing time window within the API. This limit delays the response to the auth (regardless of success) * After X attempts, the account is soft locked on the affected server only for a time window of Y increasing up to Z. - * If the attempts continue, the account is hard locked and signalled to an external system that this has occured. + * If the attempts continue, the account is hard locked and signalled to an external system that this has occurred. (See designs/account_policy.rst#rate-limiting for details) Access Control @@ -49,7 +49,7 @@ Access Control With the existing access profile infrastructure, we can decide which users/groups can self-admin the Backup code via a membership to a ``idm_account_mfa_backup_code_self_priv`` group. -If users want to admin Backup codes for others, they have to be members of the ``idm_account_mfa_backup_code_manage_priv`` group. +If users want to admin Backup codes for others, they have to be members of the ``idm_account_mfa_backup_code_manage_priv`` group. (See kanidmd/src/lib/constants/entries.rs for examples of existing groups for privileges) diff --git a/designs/oauth.rst b/designs/oauth.rst index d53f315a5..5953c9930 100644 --- a/designs/oauth.rst +++ b/designs/oauth.rst @@ -32,7 +32,7 @@ code and exchanges it for a valid token that may be provided to the client. The resource server may optionally contact the token introspection endpoint about the provided oauth token, which yields extra metadata about the identity that holds the token and completed the authorisation. This metadata may include identity information, -but also may include extended metadata, sometimes refered to as "claims". Claims are +but also may include extended metadata, sometimes referred to as "claims". Claims are information bound to a token based on properties of the session that may allow the resource server to make extended authorisation decisions without the need to contact the authorisation server to arbitrate. @@ -42,7 +42,7 @@ In this model, Kanidm will function as the authorisation server. Kanidm UAT Claims ----------------- -To ensure that we can filter and make certain autorisation decisions, the Kanidm UAT +To ensure that we can filter and make certain authorisation decisions, the Kanidm UAT needs to be extended with extra claims similar to the token claims. Since we have the ability to strongly type these, we can add these to the UAT. These should include. @@ -154,7 +154,7 @@ pkce: https://tools.ietf.org/html/rfc7636 token introspection: https://tools.ietf.org/html/rfc7662 bearer: https://tools.ietf.org/html/rfc6750 device authorisation grant: https://datatracker.ietf.org/doc/html/rfc8628 -claims ad krb: https://syfuhs.net/2017/07/29/active-directory-claims-and-kerberos-net/ +claims ad krb: https://syfuhs.net/2017/07/29/active-directory-claims-and-kerberos-net/ openid connect: https://openid.net/developers/specs/ diff --git a/designs/password-import.rst b/designs/password-import.rst index 88107d139..f52c1f458 100644 --- a/designs/password-import.rst +++ b/designs/password-import.rst @@ -55,7 +55,7 @@ provided to be able to take over high privilege kanidm accounts. For this reason, the ability to import passwords must be limited to: * A service account with strong credentials -* high_privilige accounts may NOT have their passwords set in this manner +* high_privilege accounts may NOT have their passwords set in this manner Once kanidm implements password badlist checks in the auth path, passwords that have been synced into kanidm via this route may not function as they are found in the badlist, causing the account diff --git a/designs/radius.rst b/designs/radius.rst index 5cb136b74..ccb6b6601 100644 --- a/designs/radius.rst +++ b/designs/radius.rst @@ -39,8 +39,8 @@ of a positive user experience, having MSCHAPv2 is essential. Nice To Have ------------ -To limit the scope of damage in an attack, RADIUS passwords should be seperate from the main -account password due to their weak storage. Because these are seperate and shared between devices +To limit the scope of damage in an attack, RADIUS passwords should be separate from the main +account password due to their weak storage. Because these are separate and shared between devices this does lead to some interesting behaviours we can use. Storing the RADIUS password in plaintext now becomes an option, meaning that we can have autoconfiguration @@ -61,7 +61,7 @@ With the above in mind, this leads to the following conclusions: * There is only a single RADIUS configuration profile per-kanidm topology * A user only requires a single RADIUS infrastructure password as the network is considered a single entity and resources are arbitrated elsewhere. * Groups define what vlan a users belongs to (and possibly other ip resources). -* The users RADIUS password is seperate from their main account, and has no other function than RADIUS authentication. +* The users RADIUS password is separate from their main account, and has no other function than RADIUS authentication. * The users RADIUS password can be server-side generated, and have pathways to distribute it to devices that remove the need for human interaction Design Details diff --git a/designs/recycle_bin.rst b/designs/recycle_bin.rst index 9fa27babc..5995f9843 100644 --- a/designs/recycle_bin.rst +++ b/designs/recycle_bin.rst @@ -39,7 +39,7 @@ potentially. This is an argument for the filter-scan method, that checks if any the class, deleted, and if it does, we do not wrap with the AndNot term. -The best solution is a whole seperate interface (/search/recycle/) that has it's own access controls +The best solution is a whole separate interface (/search/recycle/) that has it's own access controls that is used. By default searches don't look at recycled items (but internal do). This interface would remove that limitation, but would require access controls to prevent read/changes. diff --git a/designs/repl_future_considerations.rst b/designs/repl_future_considerations.rst index 8f31bff6a..52788db35 100644 --- a/designs/repl_future_considerations.rst +++ b/designs/repl_future_considerations.rst @@ -10,7 +10,7 @@ At first glance it may seem correct to no-op a change where the state is: with a "purge name; add name william". -However, this doesn't express the full possibities of the replication topology +However, this doesn't express the full possibilities of the replication topology in the system. The follow events could occur: :: @@ -22,9 +22,9 @@ in the system. The follow events could occur: del: name n: w -The events of DB 1 seem correct in isolation, to no-op the del and re-add, however +The events of DB 1 seem correct in isolation, to no-op the delete and re-add, however when the changelogs will be replayed, they will then cause the events of DB2 to -be the final state - whet the timing of events on DB 1 should actually be the +be the final state - whereas the timing of events on DB 1 should actually be the final state. To contrast if you no-oped the purge name: @@ -166,7 +166,7 @@ To achieve this, we store a list of CID's and what entries were affected within One can imagine a situation where two servers change the entry, but between those changes the read-only is supplied the CID. We don't care in what order they did change, -only that a change *must* have occured. +only that a change *must* have occurred. So example: let's take entry A with server A and B, and read-only R. @@ -300,7 +300,7 @@ to prevent this situation such as: A 0/3 A 0/1 A 0/3 B 0/1 B 0/4 B 0/1 -In this case, one can imagine B would then supply data, and when A recieved B's changes, it would again +In this case, one can imagine B would then supply data, and when A Received B's changes, it would again supply to R. However, this can be easily avoided by adhering to the following: * A server can only supply to a read-only if all of the suppling server's RUV CSN MAX are contained @@ -367,7 +367,7 @@ the following: GRUV A: R (A: 0/0, ) -So A has connected to R and polled the RUV and recieved a 0/0. We now can supply our changes to +So A has connected to R and polled the RUV and Received a 0/0. We now can supply our changes to R: :: @@ -380,7 +380,7 @@ R: As R is a read-only it has no concept of the changelog, so it sets MIN to MAX. -Now, we then poll the RUV again. Protocol wise RUV polling should be seperate to suppling of data! +Now, we then poll the RUV again. Protocol wise RUV polling should be separate to suppling of data! :: diff --git a/designs/replication.rst b/designs/replication.rst index cc1dd8cc6..ce95533be 100644 --- a/designs/replication.rst +++ b/designs/replication.rst @@ -19,21 +19,21 @@ has a number of negative cultural connotations, and is not used by this project. * Read-Write server This is a server that is fully writable. It accepts external client writes, and these -writes are propogated to the topology. Many read-write servers can be in a topology +writes are propagated to the topology. Many read-write servers can be in a topology and written to in parallel. * Transport Hub This is a server that is not writeable to clients, but can accept incoming replicated -writes, and then propogates these to other servers. All servers that are directly after -this server inthe topology must not be a read-write, as writes may not propogate back +writes, and then propagates these to other servers. All servers that are directly after +this server in the topology must not be a read-write, as writes may not propagate back from the transport hub. IE the following is invalid :: RW 1 ---> HUB <--- RW 2 -Note the replication direction in this, and that changes into HUB will not propogate +Note the replication direction in this, and that changes into HUB will not propagate back to RW 1 or RW 2. * Read-Only server @@ -43,7 +43,7 @@ incoming replicated changes, and has no outbound replication agreements. Replication systems are dictated by CAP theorem. This is a theory that states from -"consistency, availability and paritition tolerance" you may only have two of the +"consistency, availability and partition tolerance" you may only have two of the three at any time. * Consistency @@ -55,12 +55,12 @@ see the latest data. * Availability -This is the property that every request will recieve a non-error response without +This is the property that every request will receive a non-error response without the guarantee that the data is "up to date". * Partition Tolerance -This is the property that your topology in the face of patition tolerance will +This is the property that your topology in the face of partition tolerance will continue to provide functional services (generally reads). Almost all systems expect partition tolerance, so the choice becomes between consistency @@ -82,7 +82,7 @@ at in a system like Kanidm. Object Level inconsistency occurs when two read-write servers who are partitioned, both allocate the same entry UUID to an entry. Since the uuid is the "primary key" -which anchors all other changes, and can not be duplicated, when the paritioning +which anchors all other changes, and can not be duplicated, when the partitioning is resolved, the replication will occur, and one of the two items must be discarded as inconsistent. @@ -121,7 +121,7 @@ assertions in a change. The possible entry states are: -* NonExistant +* NonExistent * Live * Recycled * Tombstone @@ -153,12 +153,12 @@ a CID. :: - create + NonExistant -> Live + create + NonExistent -> Live modify + Live -> Live recycle + Live -> Recycled revive + Recycled -> Live tombstoned + Recycled -> Tombstone - purge + Tombstone -> NonExistant + purge + Tombstone -> NonExistent .. image:: diagrams/object-lifecycle-states.png :width: 800 @@ -171,12 +171,12 @@ Entry Change Log Within Kanidm id2entry is the primary store of active entry state representation. However the content of id2entry is a reflection of the series of modifications and changes that -have applied to create that entitiy. As a result id2entry can be considered as an entry +have applied to create that entity. As a result id2entry can be considered as an entry state cache. -The true stable storage and representation for an entry will exist in a seperate Entry +The true stable storage and representation for an entry will exist in a separate Entry Change Log type. Each entry will have it's own internal changelog that represents the -changes that have occured in the entries lifetime and it's relevant state at that time. +changes that have occurred in the entries lifetime and it's relevant state at that time. The reason for making a per-entry change log is to allow fine grained testing of the conflict resolution state machine on a per-entry scale, and then to be able to test @@ -216,7 +216,7 @@ structure of how we will code this within Kanidm. │├───────────────────────────────┤│ │ │ │ ││CID 3 ││ │ │ │ │├───────────────────────────────┤│ │ └─────────────────────────┘ - ││CID 4 ││ │ + ││CID 4 ││ │ │├───────────────────────────────┤│ │ ┌─────────────────────────┐ ││CID 5 ││ │ │ e2 - entry change log │ │├───────────────────────────────┤│ │ │ ┌─────────────────────┐ │ @@ -228,13 +228,13 @@ structure of how we will code this within Kanidm. │├───────────────────────────────┤│ │ ... │ ││CID 9 ││ │ │ │├───────────────────────────────┤│ └─────────────────────────┘ - ││CID 10 ││ - │├───────────────────────────────┤│ - ││CID 11 ││ - │├───────────────────────────────┤│ - ││CID 12 ││ - │└───────────────────────────────┘│ - └─────────────────────────────────┘ + ││CID 10 ││ + │├───────────────────────────────┤│ + ││CID 11 ││ + │├───────────────────────────────┤│ + ││CID 12 ││ + │└───────────────────────────────┘│ + └─────────────────────────────────┘ This allows expression of both: @@ -265,7 +265,7 @@ to be able to replay those events. For example: │├───────────────────────────────┤│ │ │ │ ││CID 3 ││ │ │ │ │├───────────────────────────────┤│ │ └─────────────────────────┘ - ││CID 4 ││ │ + ││CID 4 ││ │ │├───────────────────────────────┤│ │ ┌─────────────────────────┐ ││CID 5 ││ │ │ e2 - entry change log │ │├───────────────────────────────┤│ │ │ ┌─────────────────────┐ │ @@ -277,13 +277,13 @@ to be able to replay those events. For example: │├───────────────────────────────┤│ │ ... │ ││CID 9 ││ │ │ │├───────────────────────────────┤│ └─────────────────────────┘ - ││CID 10 ││ - │├───────────────────────────────┤│ - ││CID 11 ││ - │├───────────────────────────────┤│ - ││CID 12 ││ - │└───────────────────────────────┘│ - └─────────────────────────────────┘ + ││CID 10 ││ + │├───────────────────────────────┤│ + ││CID 11 ││ + │├───────────────────────────────┤│ + ││CID 12 ││ + │└───────────────────────────────┘│ + └─────────────────────────────────┘ Since CID 1 has been inserted previous to CID 2 we need to "undo" the changes of CID 2 in @@ -324,7 +324,7 @@ snapshot that describes the entry as the sum of previous changes. │ │ └─────────────────────────┘ -In our example here we would find the snapshot preceeding our newely inserted CID (in this case +In our example here we would find the snapshot preceding our newely inserted CID (in this case our Anchor) and from that we would then replay all subsequent changes to ensure they apply correctly (or are rejected as conflicts). @@ -362,7 +362,7 @@ it's simpler and correct to continue to consider them. Changelog Comparison - Replication Update Vector (RUV) ====================================================== -A changelog is a single servers knowledge of all changes that have occured in history +A changelog is a single servers knowledge of all changes that have occurred in history of a topology. Of course, the point of replication is that multiple servers are exchanging their changes, and potentially that a server must proxy changes to other servers. For this to occur we need a method of comparing changelog states, and then allowing fractional @@ -432,7 +432,7 @@ As a more graphical representation, we could consider our ruv as follows: ││ max: CID 12────┼┼──┤ │ │ │├───────────────────────────────┤│ │ │ │ │└───────────────────┘│ ├───┼───────────▶│CID 3 ││ │ │ │ └─────────────────────┘ │ │ │ │├───────────────────────────────┤│ │ └─────────────────────────┘ - │ └───────────▶│CID 4 ││ │ + │ └───────────▶│CID 4 ││ │ │ │ │├───────────────────────────────┤│ │ ┌─────────────────────────┐ │ ││CID 5 ││ │ │ e2 - entry change log │ │ │ │├───────────────────────────────┤│ │ │ ┌─────────────────────┐ │ @@ -444,13 +444,13 @@ As a more graphical representation, we could consider our ruv as follows: │ │├───────────────────────────────┤│ │ ... │ │ ││CID 9 ││ │ │ │ │├───────────────────────────────┤│ └─────────────────────────┘ - │ ││CID 10 ││ - │ │├───────────────────────────────┤│ - │ ││CID 11 ││ - │ │├───────────────────────────────┤│ - └───────────────▶│CID 12 ││ - │└───────────────────────────────┘│ - └─────────────────────────────────┘ + │ ││CID 10 ││ + │ │├───────────────────────────────┤│ + │ ││CID 11 ││ + │ │├───────────────────────────────┤│ + └───────────────▶│CID 12 ││ + │└───────────────────────────────┘│ + └─────────────────────────────────┘ It may be that we also add a RUV index that allows the association of exact set of CID's to a server's cl, or if during CL replay we just iterate through the CL index finding all values that are @@ -518,12 +518,12 @@ re-apply these changes, discarding changes that would be invalid for those state :: - create + NonExistant -> Live + create + NonExistent -> Live modify + Live -> Live recycle + Live -> Recycled revive + Recycled -> Live tombstoned + Recycled -> Tombstone - purge(*) + Tombstone -> NonExistant + purge(*) + Tombstone -> NonExistent Lets now show a conflict case: @@ -538,18 +538,18 @@ Lets now show a conflict case: Notice that both servers create E1. In order to resolve this conflict, we use the only synchronisation mechanism that we possess - time. On Server B at T3 when the changelog -of Server A is recieved, the events are replayed, and linearised to: +of Server A is Received, the events are replayed, and linearised to: :: - T0: NonExistant E1 # For illustration only + T0: NonExistent E1 # For illustration only T1: Create E1 (from A) T2: Create E1 (from B) As the event at T2 can not be valid, the change at T2 is *skipped* - E1 from B is turned into a conflict + recycled entry. See conflict UUID generation above. -Infact, having this state machine means we can see exactly what can and can not be resolved +In fact, having this state machine means we can see exactly what can and can not be resolved correctly as combinations. Here is the complete list of valid combinations. :: diff --git a/designs/resource_limits.rst b/designs/resource_limits.rst index 373f7e71e..e8a218dad 100644 --- a/designs/resource_limits.rst +++ b/designs/resource_limits.rst @@ -3,7 +3,7 @@ Resource Limits As security sensitive software, kanidm must be "available" (as defined by confidentiality, integrity, and availability). This means that as a service we must -be able to handle a large volume of potentially malicous traffic, and still able +be able to handle a large volume of potentially malicious traffic, and still able to serve legitimate requests without fault or failure. To achieve this, the resources of the server must be managed and distributed to allow @@ -11,7 +11,7 @@ potentially thousands of operations per second, while preventing exhaustion of t resources. Kanidm is structured as a database, where each request requires a process -to resolve that query into an answer. This could be a request for authetication +to resolve that query into an answer. This could be a request for authentication which is a true/false response, or a request for an identity so that we can determine their groups for authorisation, or even just a request to find someone's email address in a corporate directory context. @@ -19,7 +19,7 @@ someone's email address in a corporate directory context. Each operation requires resources to complete individually, but many operations can be processed in parallel. -Resource exhaustion occurs when input from a client consumes more resources +Resource exhaustion occurs when input from a client consumes more resources than the server can provide. This means the attack surface is any possible input and how the server interprets and processes that input. In kanidm this could be a search filter for example - the query may be small, but it could be expensive diff --git a/designs/schema_reference_types.rst b/designs/schema_reference_types.rst index 7e0af6520..56320e38e 100644 --- a/designs/schema_reference_types.rst +++ b/designs/schema_reference_types.rst @@ -16,7 +16,7 @@ during ProtoEntry <-> Entry transformations. This means that renames of objects, references, but does mean they continue to render their linkage correctly. * We can implement native referential integrity for the types rather than relying on admin and plugin configuration to match the internal types. -* User defined classes will inherit referential behavious by using +* User defined classes will inherit referential behaviour by using the correct schema attribute types. Implementation diff --git a/designs/session_logout.rst b/designs/session_logout.rst index c7d0e8011..50e7f6ea2 100644 --- a/designs/session_logout.rst +++ b/designs/session_logout.rst @@ -44,7 +44,7 @@ metadata of the "creation" of the session, this is why we use the stub form that its expiry. On a replication attribute conflict, an expired state will always "overrule" an active state, even -if the CID of expiry preceeds that of the active state. We merge the expiry into the metadata in +if the CID of expiry precedes that of the active state. We merge the expiry into the metadata in this case. Token Usage / Revocation @@ -58,8 +58,8 @@ Both are described, but we have chosen to use positive validation with limited i Positive Validation ------------------- -This is a positive validation of the validity of a session. The abscence of a positive session -existance, is what implies revocation. +This is a positive validation of the validity of a session. The absence of a positive session +existence, is what implies revocation. The session will have a "grace window", to account for replication delay. This is so that if the session is used on another kanidm server which has not yet received the latest revocation list @@ -87,7 +87,7 @@ Clean Up ^^^^^^^^ Sessions can only be cleaned up once a sufficient replication window has passed, and the session is in an expired state, -since the abscence of the session also implies revocation has occured. +since the absence of the session also implies revocation has occurred. This way once the changelog window is passed, we assume the specific session in question can be removed. An active session *should never* be deleted, it *must* pass through the expired state first. This is so that @@ -107,13 +107,13 @@ When a session is invalidated, it's session id is added to a "site-wide" revocat the maximum time of use of that session id. When a session is check as part of a standard UAT check, or an OAuth 2.0 refresh, if the session -id is present in the revocation list, it is denied access. Abscence from the revocation list implies +id is present in the revocation list, it is denied access. Absence from the revocation list implies the session remains valid. This method requires no gracewindow, since the replication of the revocation list will be bound to the performance of replication and it's distribution. -The risk is that all sessions *must* have a maximum life, so that their existance in the revocation +The risk is that all sessions *must* have a maximum life, so that their existence in the revocation list is not unbounded. This version may have a greater risk of disk/memory usage due to the size of the list that may exist in large deployments. @@ -137,7 +137,7 @@ and intervention. As a result, it is safer and more thorough for us to provide a system, which accurately describes the exact state of what is valid at a point in time. The specific restore scenario is that a token is issued at time A. A backup is taken now at time B. -Next the user revokes the token at time C, and replication has not yet occured. At this point the backup +Next the user revokes the token at time C, and replication has not yet occurred. At this point the backup from time B was restored. In this scenario, without access to the token itself, or without scouring logs to find the session @@ -187,9 +187,9 @@ A "worst case" scenario is when we involve system failure along with an attempte have three kanidm servers in replication. * Refresh Token A is stolen, but not used used. -* Token A expires. The refesh is sent to Server 1. Token B is issued. +* Token A expires. The refresh is sent to Server 1. Token B is issued. * Before replication can occur, Server 1 goes down. -* Stolen refesh Token A is exchanged on Server 3. +* Stolen refresh Token A is exchanged on Server 3. * Token B is used on Server 2. * Replication between server 2 and 3 occurs. @@ -199,9 +199,9 @@ legitimate token B can continue to be used. To achieve this we need to determine an order of the events. Let's assume a better scenario first. * Refresh Token A is stolen, but not used used. -* Token A expires. The refesh is sent to Server 1. Token B is issued. +* Token A expires. The refresh is sent to Server 1. Token B is issued. * Token B is used on Server 1. -* Stolen refesh Token A is exchanged on Server 1. +* Stolen refresh Token A is exchanged on Server 1. We store a "refresh id" in the refresh token, and a issued-by id in the access token. Additionally we store an issued-at timestamp (from the replication CID) in both. @@ -219,16 +219,16 @@ gracewindow to assume that our issuance was *valid*. In this design we can see the following would occur. * Refresh Token A is stolen, but not used used. -* Token A expires. The refesh is sent to Server 1. Token B is issued. (This updates the issued-by id) +* Token A expires. The refresh is sent to Server 1. Token B is issued. (This updates the issued-by id) * Token B is used on Server 1. (valid, issued-by id matches) -* Stolen refesh Token A is exchanged on Server 1. (invalid, not the currently defined refresh token) +* Stolen refresh Token A is exchanged on Server 1. (invalid, not the currently defined refresh token) In the first case. * Refresh Token A is stolen, but not used used. -* Token A expires. The refesh is sent to Server 1. Token B is issued. (updates the issued-by id) +* Token A expires. The refresh is sent to Server 1. Token B is issued. (updates the issued-by id) * Before replication can occur, Server 1 goes down. -* Stolen refesh Token A is exchanged on Server 3. Token C is issued (updates the issued-by id) +* Stolen refresh Token A is exchanged on Server 3. Token C is issued (updates the issued-by id) * Token B is used on Server 2. (valid, matches the current defined issued-by id) * Token B is used on Server 3. (valid, within gracewindow even though issued-by is incorrect) * Replication between server 2 and 3 occurs. (Conflict occurs in session. Second issued-by is revoked, meaning token C is now invalid) diff --git a/designs/sudo.rst b/designs/sudo.rst index 74490eb2f..e229a006b 100644 --- a/designs/sudo.rst +++ b/designs/sudo.rst @@ -21,7 +21,7 @@ This will allow filtering on sudo=true, meaning that certain default access cont altered to enforce that they require sudo mode. Some accounts by default represent a high level of privilege. These should have implicit sudo -granted when they are autheticated. This will be based on a group membership idm_hp_implicit_sudo +granted when they are authenticated. This will be based on a group membership idm_hp_implicit_sudo and should only apply to admin/idm_admin by default. This will pin the sudo expiry to the expiry time of the session (rather than a shorter time). diff --git a/designs/system_protected_objects.rst b/designs/system_protected_objects.rst index 0273bfec1..d158d5482 100644 --- a/designs/system_protected_objects.rst +++ b/designs/system_protected_objects.rst @@ -54,7 +54,7 @@ It was considered to provide default ACP's that would protect system items. This * it would require a "deny" acp type, and I do not wish to create this, as people could then create their own deny rules (always incorrect!) * There would be a lot of acp's involved in this protection (but acp's are expressive enough to provide it!) * The acp's would need a self-referencing acp to protect themselves from modification. -* Having a seperate plugin to protect this will be faster than acp processing because we check less filters (But this is not a strong argument) +* Having a separate plugin to protect this will be faster than acp processing because we check less filters (But this is not a strong argument) * the plugin can provide targeted error messages about why they were denied, rather than a generic acp denied message. * the plugin can provide detailed testing of edge cases in a confined manner diff --git a/designs/uid_gid_generation.rst b/designs/uid_gid_generation.rst index 37b7916c7..fb9754b7a 100644 --- a/designs/uid_gid_generation.rst +++ b/designs/uid_gid_generation.rst @@ -15,7 +15,7 @@ by extracting the last 32 bits. Why only gid number? -------------------- -It's a common misconception that uid is the only seperation on linux that matters. When a user +It's a common misconception that uid is the only separation on linux that matters. When a user account exists, it has a primary user id AND a primary group id. Default umask grants rw to any member of the same primary group id, which leads to misconfigurations where an admin in the intent of saying "all users belong to default_users" ends up granting all users the right to read and write @@ -28,7 +28,7 @@ SSSD's dynamic gid allocation from AD and FreeIPA) make effort to assign a user- to combat this issue. Instead of creating a group per account, we instead *imply* that the gidnumber *is* the uidnumber, -and that a posixaccount *implies* the existance of a user private group that the pam/nsswitch +and that a posixaccount *implies* the existence of a user private group that the pam/nsswitch tools will generate on the client. This also guarantees that posixgroups will never conflict or overlap with the uid namespace with weth attr uniqueness plugin. diff --git a/examples/server.toml b/examples/server.toml index 81216a5b3..4f8d14fbe 100644 --- a/examples/server.toml +++ b/examples/server.toml @@ -72,7 +72,7 @@ origin = "https://idm.example.com:8443" # Defaults to "" (no path set) # path = "/var/lib/kanidm/backups/" # -# The schedule to run online backups. All times are interpretted in UTC. +# The schedule to run online backups. All times are interpreted in UTC. # The format of the cron expression is: # # sec min hour day of month month day of week year diff --git a/examples/server_container.toml b/examples/server_container.toml index 2bfd3c8b9..e8c58e48b 100644 --- a/examples/server_container.toml +++ b/examples/server_container.toml @@ -14,7 +14,7 @@ bindaddress = "[::]:8443" # To preserve the original IP of the caller, these systems # will often add a header such as "Forwarded" or # "X-Forwarded-For". If set to true, then this header is -# respected as the "authoritive" source of the IP of the +# respected as the "authoritative" source of the IP of the # connected client. If you are not using a load balancer # then you should leave this value as default. # Defaults to false diff --git a/insecure_generate_tls.ps1 b/insecure_generate_tls.ps1 index 2cc4ef0a7..6b8ed477d 100644 --- a/insecure_generate_tls.ps1 +++ b/insecure_generate_tls.ps1 @@ -90,7 +90,7 @@ if ( $LastExitCode -ne 0 ){ exit 1 } -Write-Output "Generating the certficate signing request" +Write-Output "Generating the certificate signing request" openssl req -sha256 -config "${ALTNAME_FILE}" -days 31 -new -extensions v3_req -key "${KEYFILE}" -out "${CSRFILE}" if ( $LastExitCode -ne 0 ){ exit 1 diff --git a/insecure_generate_tls.sh b/insecure_generate_tls.sh index d352ed04d..aba20fb34 100755 --- a/insecure_generate_tls.sh +++ b/insecure_generate_tls.sh @@ -162,7 +162,7 @@ openssl req -batch -config "${CANAME_FILE}" \ echo "Generating the server private key..." openssl ecparam -genkey -name prime256v1 -noout -out "${KEYFILE}" -echo "Generating the certficate signing request..." +echo "Generating the certificate signing request..." openssl req -sha256 -new \ -batch \ -config "${ALTNAME_FILE}" -extensions v3_req \ diff --git a/kanidm_book/src/DEVELOPER_README.md b/kanidm_book/src/DEVELOPER_README.md index a2e674c34..481662a02 100644 --- a/kanidm_book/src/DEVELOPER_README.md +++ b/kanidm_book/src/DEVELOPER_README.md @@ -145,7 +145,7 @@ git commit -m 'Commit message' change_file.rs ... git push ``` -If you receive advice or make further changes, just keep commiting to the branch, and pushing to +If you receive advice or make further changes, just keep committing to the branch, and pushing to your branch. When we are happy with the code, we'll merge in GitHub, meaning you can now clean up your branch. @@ -307,7 +307,7 @@ To speed up testing across platforms, we're leveraging GitHub actions to build c use. Whenever code is merged with the `master` branch of Kanidm, containers are automatically built for -`kanidmd` and `radius`. Sometimes they fail to build, but we'll try to keep them avilable. +`kanidmd` and `radius`. Sometimes they fail to build, but we'll try to keep them available. To find information on the packages, [visit the Kanidm packages page](https://github.com/orgs/kanidm/packages?repo_name=kanidm). diff --git a/kanidm_book/src/accounts_and_groups.md b/kanidm_book/src/accounts_and_groups.md index f0c832a05..489e41410 100644 --- a/kanidm_book/src/accounts_and_groups.md +++ b/kanidm_book/src/accounts_and_groups.md @@ -5,7 +5,7 @@ for these data. As a result, there are many concepts and important details to un ## Service Accounts vs Person Accounts -Kanidm seperates accounts into two types. Person accounts (or persons) are intended for use by +Kanidm separates accounts into two types. Person accounts (or persons) are intended for use by humans that will access the system in an interactive way. Service accounts are intended for use by computers or services that need to identify themself to Kanidm. Generally a person or group of persons will be responsible for and will manage service accounts. Because of this distinction these @@ -32,7 +32,7 @@ There are two builtin system administration accounts. `admin` is the default service account which has privileges to configure and administer kanidm as a whole. This account can manage access controls, schema, integrations and more. However the `admin` -can not manage persons by default to seperate the priviliges. As this is a service account is is +can not manage persons by default to separate the privileges. As this is a service account is is intended for limited use. `idm_admin` is the default service account which has privileges to create persons and to manage @@ -42,7 +42,7 @@ Both the `admin` and the `idm_admin` user should _NOT_ be used for daily activit initial system configuration, and for disaster recovery scenarios. You should delegate permissions as required to named user accounts instead. -The majority of the builtin groups are privilige groups that provide rights over Kanidm +The majority of the builtin groups are privilege groups that provide rights over Kanidm administrative actions. These include groups for account management, person management (personal and sensitive data), group management, and more. diff --git a/kanidm_book/src/developers/designs/access_profiles_and_security.md b/kanidm_book/src/developers/designs/access_profiles_and_security.md index ca6b373c7..648f80fcd 100644 --- a/kanidm_book/src/developers/designs/access_profiles_and_security.md +++ b/kanidm_book/src/developers/designs/access_profiles_and_security.md @@ -10,7 +10,7 @@ override even if applicable. They should only be created by system access profil changes must be denied. Access profiles are stored as entries and are dynamically loaded into a structure that is more -efficent for use at runtime. `Schema` and its transactions are a similar implementation. +efficient for use at runtime. `Schema` and its transactions are a similar implementation. ## Search Requirements @@ -28,7 +28,7 @@ An example: > `legalName`), and their public `email`. Worded a bit differently. You need permission over the scope of entries, you need to be able to read -the attribute to filter on it, and you need to be able to read the attribute to recieve it in the +the attribute to filter on it, and you need to be able to read the attribute to receive it in the result entry. If Alice searches for `(&(name=william)(secretdata=x))`, we should not allow this to proceed because @@ -74,7 +74,7 @@ acp class user: Pres(name) allow, Pres(desc) deny. Invert and Append So the filter now is: -``` +```text And: { AndNot: { Eq("class", "user") @@ -90,7 +90,7 @@ This would now only allow access to the `name` and `description` of the class `g If we extend this to a third, this would work. A more complex example: -``` +```text search { action: allow targetscope: Eq("class", "group") @@ -153,7 +153,7 @@ An example: ## Create Requirements A `create` profile defines the following limits to what objects can be created, through the -combination of filters and atttributes. +combination of filters and attributes. An example: @@ -211,7 +211,7 @@ CHANGE: Receiver should be a group, and should be single value/multivalue? Can _ Example profiles: -``` +```text search { action: allow receiver: Eq("memberof", "admins") @@ -344,7 +344,7 @@ exist! However, each one must still list their respective actions to allow prope The set of access controls is checked, and the set where receiver matches the current identified user is collected. These then are added to the users requested search as: -``` +```text And(, Or(, #[serde(skip_serializing_if = "Option::is_none")] - /// Space seperated list of scopes that were approved, if this differs from the + /// Space separated list of scopes that were approved, if this differs from the /// original request. pub scope: Option, #[serde(skip_serializing_if = "Option::is_none")] @@ -219,7 +219,7 @@ pub enum SubjectType { #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] #[serde(rename_all = "UPPERCASE")] -// WE REFUSE TO SUPPORT NONE. DONT EVEN ASK. IT WONT HAPPEN. +// WE REFUSE TO SUPPORT NONE. DONT EVEN ASK. IT WON'T HAPPEN. pub enum IdTokenSignAlg { ES256, RS256, diff --git a/kanidm_proto/src/scim_v1.rs b/kanidm_proto/src/scim_v1.rs index 998551b03..2a1c5659c 100644 --- a/kanidm_proto/src/scim_v1.rs +++ b/kanidm_proto/src/scim_v1.rs @@ -53,7 +53,7 @@ pub struct ScimSyncPerson { } // Need to allow this because clippy is broken and doesn't realise scimentry is out of crate -// so this can't be fufilled +// so this can't be fulfilled #[allow(clippy::from_over_into)] impl Into for ScimSyncPerson { fn into(self) -> ScimEntry { @@ -107,7 +107,7 @@ pub struct ScimExternalMember { } // Need to allow this because clippy is broken and doesn't realise scimentry is out of crate -// so this can't be fufilled +// so this can't be fulfilled #[allow(clippy::from_over_into)] impl Into for ScimExternalMember { fn into(self) -> ScimComplexAttr { @@ -135,7 +135,7 @@ pub struct ScimSyncGroup { } // Need to allow this because clippy is broken and doesn't realise scimentry is out of crate -// so this can't be fufilled +// so this can't be fulfilled #[allow(clippy::from_over_into)] impl Into for ScimSyncGroup { fn into(self) -> ScimEntry { diff --git a/kanidm_proto/src/v1.rs b/kanidm_proto/src/v1.rs index 156b9091c..11becec01 100644 --- a/kanidm_proto/src/v1.rs +++ b/kanidm_proto/src/v1.rs @@ -261,7 +261,7 @@ impl PartialEq for OperationError { } /* ===== higher level types ===== */ -// These are all types that are conceptually layers ontop of entry and +// These are all types that are conceptually layers on top of entry and // friends. They allow us to process more complex requests and provide // domain specific fields for the purposes of IDM, over the normal // entry/ava/filter types. These related deeply to schema. @@ -733,7 +733,7 @@ pub struct BackupCodesView { /* ===== low level proto types ===== */ // ProtoEntry vs Entry -// There is a good future reason for this seperation. It allows changing +// There is a good future reason for this separation. It allows changing // the in memory server core entry type, without affecting the protoEntry type // diff --git a/kanidm_rlm_python/mods-available/eap b/kanidm_rlm_python/mods-available/eap index 768cab822..0a17fe0de 100644 --- a/kanidm_rlm_python/mods-available/eap +++ b/kanidm_rlm_python/mods-available/eap @@ -339,7 +339,7 @@ eap { # # You can selectively disable TLS versions for - # compatability with old client devices. + # compatibility with old client devices. # # If your system has OpenSSL 1.1.0 or greater, do NOT # use these. Instead, set tls_min_version and diff --git a/kanidm_tools/src/cli/person.rs b/kanidm_tools/src/cli/person.rs index 63ee39ab5..a831a83c7 100644 --- a/kanidm_tools/src/cli/person.rs +++ b/kanidm_tools/src/cli/person.rs @@ -663,11 +663,11 @@ async fn totp_enroll_prompt(session_token: &CUSessionToken, client: &KanidmClien }) => totp_secret, Ok(status) => { debug!(?status); - eprintln!("An error occured -> InvalidState"); + eprintln!("An error occurred -> InvalidState"); return; } Err(e) => { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); return; } }; @@ -728,7 +728,7 @@ async fn totp_enroll_prompt(session_token: &CUSessionToken, client: &KanidmClien .idm_account_credential_update_cancel_mfareg(session_token) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } @@ -781,7 +781,7 @@ async fn totp_enroll_prompt(session_token: &CUSessionToken, client: &KanidmClien .idm_account_credential_update_accept_sha1_totp(session_token) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } @@ -792,7 +792,7 @@ async fn totp_enroll_prompt(session_token: &CUSessionToken, client: &KanidmClien .idm_account_credential_update_cancel_mfareg(session_token) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } @@ -802,11 +802,11 @@ async fn totp_enroll_prompt(session_token: &CUSessionToken, client: &KanidmClien } Ok(status) => { debug!(?status); - eprintln!("An error occured -> InvalidState"); + eprintln!("An error occurred -> InvalidState"); return; } Err(e) => { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); return; } } @@ -825,11 +825,11 @@ async fn passkey_enroll_prompt(session_token: &CUSessionToken, client: &KanidmCl }) => pk_reg, Ok(status) => { debug!(?status); - eprintln!("An error occured -> InvalidState"); + eprintln!("An error occurred -> InvalidState"); return; } Err(e) => { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); return; } }; @@ -860,7 +860,7 @@ async fn passkey_enroll_prompt(session_token: &CUSessionToken, client: &KanidmCl { Ok(_) => println!("success"), Err(e) => { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } }; } @@ -943,7 +943,7 @@ async fn credential_update_exec( { Ok(status) => display_status(status), Err(e) => { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } } } @@ -970,7 +970,7 @@ async fn credential_update_exec( eprintln!(" - {}", fb_item) } } - _ => eprintln!("An error occured -> {:?}", e), + _ => eprintln!("An error occurred -> {:?}", e), } } else { println!("Successfully reset password."); @@ -987,7 +987,7 @@ async fn credential_update_exec( .idm_account_credential_update_remove_totp(&session_token) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } @@ -1012,10 +1012,10 @@ async fn credential_update_exec( } Ok(status) => { debug!(?status); - eprintln!("An error occured -> InvalidState"); + eprintln!("An error occurred -> InvalidState"); } Err(e) => { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } } } @@ -1029,7 +1029,7 @@ async fn credential_update_exec( .idm_account_credential_update_primary_remove(&session_token) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } @@ -1055,7 +1055,7 @@ async fn credential_update_exec( } } Err(e) => { - eprintln!("An error occured pulling existing credentials -> {:?}", e); + eprintln!("An error occurred pulling existing credentials -> {:?}", e); } } let uuid_s: String = Input::new() @@ -1071,13 +1071,13 @@ async fn credential_update_exec( .interact_text() .expect("Failed to interact with interactive session"); - // Remeber, if it's NOT a valid uuid, it must have been empty as a termination. + // Remember, if it's NOT a valid uuid, it must have been empty as a termination. if let Ok(uuid) = Uuid::parse_str(&uuid_s) { if let Err(e) = client .idm_account_credential_update_passkey_remove(&session_token, uuid) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } @@ -1099,7 +1099,7 @@ async fn credential_update_exec( .idm_account_credential_update_commit(&session_token) .await { - eprintln!("An error occured -> {:?}", e); + eprintln!("An error occurred -> {:?}", e); } else { println!("success"); } diff --git a/kanidm_tools/src/cli/synch.rs b/kanidm_tools/src/cli/synch.rs index 665d75762..e53543924 100644 --- a/kanidm_tools/src/cli/synch.rs +++ b/kanidm_tools/src/cli/synch.rs @@ -79,7 +79,7 @@ impl SynchOpt { .default(false) .with_prompt("Do you want to continue? This operation can NOT be undone.") .interact() - .unwrap() + .expect("Failed to get a valid response!") { info!("No changes were made"); return; @@ -96,7 +96,7 @@ impl SynchOpt { .default(false) .with_prompt("Do you want to continue? This operation can NOT be undone.") .interact() - .unwrap() + .expect("Failed to get a valid response!") { info!("No changes were made"); return; diff --git a/kanidm_unix_int/src/daemon.rs b/kanidm_unix_int/src/daemon.rs index f6d05fa70..9a60bba58 100644 --- a/kanidm_unix_int/src/daemon.rs +++ b/kanidm_unix_int/src/daemon.rs @@ -704,7 +704,7 @@ async fn main() { if let Err(e) = handle_task_client(socket, &task_channel_tx, &mut task_channel_rx).await { - error!("Task client error occured; error = {:?}", e); + error!("Task client error occurred; error = {:?}", e); } // If they DC we go back to accept. } @@ -727,7 +727,7 @@ async fn main() { tokio::spawn(async move { if let Err(e) = handle_client(socket, cachelayer_ref.clone(), &tc_tx).await { - error!("handle_client error occured; error = {:?}", e); + error!("handle_client error occurred; error = {:?}", e); } }); } diff --git a/kanidm_unix_int/src/db.rs b/kanidm_unix_int/src/db.rs index 2e354479a..fd69e716e 100644 --- a/kanidm_unix_int/src/db.rs +++ b/kanidm_unix_int/src/db.rs @@ -171,9 +171,9 @@ impl<'a> DbTxn<'a> { } pub fn commit(mut self) -> Result<(), ()> { - // debug!("Commiting BE txn"); + // debug!("Committing BE txn"); if self.committed { - error!("Invalid state, SQL transaction was already commited!"); + error!("Invalid state, SQL transaction was already committed!"); return Err(()); } self.committed = true; diff --git a/kanidmd/core/src/actors/mod.rs b/kanidmd/core/src/actors/mod.rs index 64d5b59a1..4b5e762ed 100644 --- a/kanidmd/core/src/actors/mod.rs +++ b/kanidmd/core/src/actors/mod.rs @@ -1,5 +1,5 @@ //! This module contains the server's async tasks that are called from the various frontend -//! components to conduct operations. These are seperated based on protocol versions and +//! components to conduct operations. These are separated based on protocol versions and //! if they are read or write transactions internally. pub mod v1_read; diff --git a/kanidmd/core/src/actors/v1_read.rs b/kanidmd/core/src/actors/v1_read.rs index 797779329..c8077340f 100644 --- a/kanidmd/core/src/actors/v1_read.rs +++ b/kanidmd/core/src/actors/v1_read.rs @@ -55,7 +55,7 @@ impl QueryServerReadV1 { &(*x_ref) } - // The server only recieves "Message" structures, which + // The server only receives "Message" structures, which // are whole self contained DB operations with all parsing // required complete. We still need to do certain validation steps, but // at this point our just is just to route to do_ diff --git a/kanidmd/core/src/https/middleware.rs b/kanidmd/core/src/https/middleware.rs index 3cda10069..f85a2d93b 100644 --- a/kanidmd/core/src/https/middleware.rs +++ b/kanidmd/core/src/https/middleware.rs @@ -87,7 +87,7 @@ impl tide::Middleware for StaticCon } #[derive(Default)] -/// Adds the folloing headers to responses +/// Adds the following headers to responses /// - x-frame-options /// - x-content-type-options /// - cross-origin-resource-policy diff --git a/kanidmd/core/src/https/routemaps.rs b/kanidmd/core/src/https/routemaps.rs index 8befd96c5..7803a3432 100644 --- a/kanidmd/core/src/https/routemaps.rs +++ b/kanidmd/core/src/https/routemaps.rs @@ -1,6 +1,6 @@ ///! Route-mapping magic for tide /// -/// Instead of adding routes with (for example) the .post method you add them with .mapped_post, pasing an instance of [RouteMap] and it'll do the rest... +/// Instead of adding routes with (for example) the .post method you add them with .mapped_post, passing an instance of [RouteMap] and it'll do the rest... use serde::{Deserialize, Serialize}; use tide::{Endpoint, Route}; diff --git a/kanidmd/core/src/https/v1.rs b/kanidmd/core/src/https/v1.rs index 32550cf4a..dc5263b70 100644 --- a/kanidmd/core/src/https/v1.rs +++ b/kanidmd/core/src/https/v1.rs @@ -265,7 +265,7 @@ pub async fn json_rest_event_delete_attr( mut req: tide::Request, filter: Filter, uuid_or_name: String, - // Seperate for account_delete_id_radius + // Separate for account_delete_id_radius attr: String, ) -> tide::Result { let uat = req.get_current_uat(); diff --git a/kanidmd/core/src/interval.rs b/kanidmd/core/src/interval.rs index 57d205d03..73cb90e34 100644 --- a/kanidmd/core/src/interval.rs +++ b/kanidmd/core/src/interval.rs @@ -128,7 +128,7 @@ impl IntervalActor { ) .await { - error!(?e, "An online backup error occured."); + error!(?e, "An online backup error occurred."); } } } diff --git a/kanidmd/core/src/ldaps.rs b/kanidmd/core/src/ldaps.rs index c60075bbd..6d78aefd9 100644 --- a/kanidmd/core/src/ldaps.rs +++ b/kanidmd/core/src/ldaps.rs @@ -123,7 +123,7 @@ async fn tls_acceptor( match accept_result { Ok((tcpstream, client_socket_addr)) => { // Start the event - // From the parms we need to create an SslContext. + // From the parameters we need to create an SslContext. let mut tlsstream = match Ssl::new(tls_parms.context()) .and_then(|tls_obj| SslStream::new(tls_obj, tcpstream)) { diff --git a/kanidmd/core/src/lib.rs b/kanidmd/core/src/lib.rs index 9eaaa1612..66edaa221 100644 --- a/kanidmd/core/src/lib.rs +++ b/kanidmd/core/src/lib.rs @@ -95,7 +95,7 @@ fn setup_backend_vacuum( // TODO #54: We could move most of the be/schema/qs setup and startup // outside of this call, then pass in "what we need" in a cloneable -// form, this way we could have seperate Idm vs Qs threads, and dedicated +// form, this way we could have separate Idm vs Qs threads, and dedicated // threads for write vs read async fn setup_qs_idms( be: Backend, @@ -456,7 +456,7 @@ pub async fn domain_rename_core(config: &Configuration) { match r { Ok(_) => info!("Domain Rename Success!"), Err(e) => { - error!("Domain Rename Failed - Rollback has occured: {:?}", e); + error!("Domain Rename Failed - Rollback has occurred: {:?}", e); std::process::exit(1); } }; @@ -529,7 +529,7 @@ pub async fn recover_account_core(config: &Configuration, name: &str) { Ok(new_pw) => match idms_prox_write.commit() { Ok(_) => new_pw, Err(e) => { - error!("A critical error during commit occured {:?}", e); + error!("A critical error during commit occurred {:?}", e); std::process::exit(1); } }, @@ -587,7 +587,7 @@ impl CoreHandle { impl Drop for CoreHandle { fn drop(&mut self) { if !self.clean_shutdown { - eprintln!("⚠️ UNCLEAN SHUTDOWN OCCURED ⚠️ "); + eprintln!("⚠️ UNCLEAN SHUTDOWN OCCURRED ⚠️ "); } // Can't enable yet until we clean up unix_int cache layer test // debug_assert!(self.clean_shutdown); diff --git a/kanidmd/lib/Cargo.toml b/kanidmd/lib/Cargo.toml index de959d0f1..85c9c9cd2 100644 --- a/kanidmd/lib/Cargo.toml +++ b/kanidmd/lib/Cargo.toml @@ -41,7 +41,7 @@ ldap3_proto.workspace = true libc.workspace = true libsqlite3-sys.workspace = true num_enum.workspace = true -# We need to explicitly ask for openssl-sys so that we get the version propogated +# We need to explicitly ask for openssl-sys so that we get the version propagated # into the build.rs for legacy feature checks. openssl-sys.workspace = true openssl.workspace = true diff --git a/kanidmd/lib/src/be/idl_arc_sqlite.rs b/kanidmd/lib/src/be/idl_arc_sqlite.rs index 43f9f7e28..bce955a92 100644 --- a/kanidmd/lib/src/be/idl_arc_sqlite.rs +++ b/kanidmd/lib/src/be/idl_arc_sqlite.rs @@ -742,7 +742,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { /// Index Slope Analysis. For the purpose of external modules you can consider this as a /// module that generates "weights" for each index that we have. Smaller values are faster /// indexes - larger values are more costly ones. This is not intended to yield perfect - /// weights. The intent is to seperate over obviously more effective indexes rather than + /// weights. The intent is to separate over obviously more effective indexes rather than /// to min-max the fine tuning of these. Consider name=foo vs class=*. name=foo will always /// be better than class=*, but comparing name=foo to spn=foo is "much over muchness" since /// both are really fast. @@ -755,7 +755,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { * * Since we have the filter2idl threshold, we want to find "what is the smallest * and most unique index asap so we can exit faster". This allows us to avoid - * loading larger most costly indexs that either have large idls, high variation + * loading larger most costly indexes that either have large idls, high variation * or few keys and are likely to miss and have to go out to disk. * * A few methods were proposed, but thanks to advice from Perri Boulton (psychology @@ -874,7 +874,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { * the "slopeyness" aka the jank of the line, or more precisely, the angle. * * Now we need a way to numerically compare these lines. Since the points could be - * anywere on our graph: + * anywhere on our graph: * * | * 4 + * @@ -905,7 +905,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { * ───────────┼ * nkeys * - * Since this is right angled we can use arctan to work out the degress of the line. This + * Since this is right angled we can use arctan to work out the degrees of the line. This * gives us a value from 1.0 to 90.0 (We clamp to a minimum of 1.0, because we use 0 as "None" * in the NonZeroU8 type in filter.rs, which allows ZST optimisation) * @@ -914,7 +914,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { * to minimise this loss and then we convert. * * And there we have it! A slope factor of the index! A way to compare these sets quickly - * at query optimisation time to minimse index access. + * at query optimisation time to minimise index access. */ let slopes: HashMap<_, _> = data .into_iter() @@ -938,7 +938,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { let l: u32 = data.len().try_into().unwrap_or(u32::MAX); let c = f64::from(l); let mean = data.iter().take(u32::MAX as usize).sum::() / c; - let varience: f64 = data + let variance: f64 = data .iter() .take(u32::MAX as usize) .map(|len| { @@ -948,7 +948,7 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { .sum::() / (c - 1.0); - let sd = varience.sqrt(); + let sd = variance.sqrt(); // This is saying ~85% of values will be at least this len or less. let sd_1 = mean + sd; @@ -956,14 +956,14 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> { } else if data.len() == 1 { (1.0, data[0]) } else { - // Cant resolve. + // Can't resolve. return IdxSlope::MAX; }; // Now we know sd_1 and number of keys. We can use this as a triangle to work out // the angle along the hypotenuse. We use this angle - or slope - to show which // elements have the smallest sd_1 and most keys available. Then because this - // is bound between 0.0 -> 90.0, we "unfurl" this around a half circle by multipling + // is bound between 0.0 -> 90.0, we "unfurl" this around a half circle by multiplying // by 2. This gives us a little more precision when we drop the decimal point. let sf = (sd_1 / n_keys).atan().to_degrees() * 2.8; diff --git a/kanidmd/lib/src/be/mod.rs b/kanidmd/lib/src/be/mod.rs index c958189f8..a85414b96 100644 --- a/kanidmd/lib/src/be/mod.rs +++ b/kanidmd/lib/src/be/mod.rs @@ -513,7 +513,7 @@ pub trait BackendTransaction { FilterResolved::Inclusion(l, _) => { // For inclusion to be valid, every term must have *at least* one element present. // This really relies on indexing, and so it's internal only - generally only - // for fully indexed existance queries, such as from refint. + // for fully indexed existence queries, such as from refint. // This has a lot in common with an And and Or but not really quite either. let mut plan = Vec::new(); @@ -787,7 +787,7 @@ pub trait BackendTransaction { // Check the other entry:attr indexes are valid // - // This is acutally pretty hard to check, because we can check a value *should* + // This is actually pretty hard to check, because we can check a value *should* // exist, but not that a value should NOT be present in the index. Thought needed ... // Got here? Ok! @@ -1101,7 +1101,7 @@ impl<'a> BackendWriteTransaction<'a> { let id_list: IDLBitRange = tombstones.iter().map(|e| e.get_id()).collect(); // Ensure nothing here exists in the RUV index, else it means - // we didn't trim properly, or some other state violation has occured. + // we didn't trim properly, or some other state violation has occurred. if !((&ruv_idls & &id_list).is_empty()) { admin_error!("RUV still contains entries that are going to be removed."); return Err(OperationError::ReplInvalidRUVState); @@ -1770,7 +1770,7 @@ impl Backend { */ } -// What are the possible actions we'll recieve here? +// What are the possible actions we'll receive here? #[cfg(test)] mod tests { @@ -2150,7 +2150,7 @@ mod tests { match result { Err(e) => { - // if the error is the file is not found, thats what we want so continue, + // if the error is the file is not found, that's what we want so continue, // otherwise return the error match e.kind() { std::io::ErrorKind::NotFound => {} @@ -2205,7 +2205,7 @@ mod tests { match result { Err(e) => { - // if the error is the file is not found, thats what we want so continue, + // if the error is the file is not found, that's what we want so continue, // otherwise return the error match e.kind() { std::io::ErrorKind::NotFound => {} diff --git a/kanidmd/lib/src/constants/acp.rs b/kanidmd/lib/src/constants/acp.rs index 7d27435eb..64fd3c1ef 100644 --- a/kanidmd/lib/src/constants/acp.rs +++ b/kanidmd/lib/src/constants/acp.rs @@ -259,7 +259,7 @@ pub const JSON_IDM_ACP_PEOPLE_MANAGE_PRIV_V1: &str = r#"{ // 31 - password import modification priv // right now, create requires you to have access to every attribute in a single snapshot, // so people will need to two step (create then import pw). Later we could add another -// acp that allows the create here too? Should it be seperate? +// acp that allows the create here too? Should it be separate? pub const JSON_IDM_ACP_PEOPLE_ACCOUNT_PASSWORD_IMPORT_PRIV_V1: &str = r#"{ "attrs": { "class": [ diff --git a/kanidmd/lib/src/constants/entries.rs b/kanidmd/lib/src/constants/entries.rs index 0dd28f08c..ed4cb141e 100644 --- a/kanidmd/lib/src/constants/entries.rs +++ b/kanidmd/lib/src/constants/entries.rs @@ -445,7 +445,7 @@ pub const JSON_IDM_HP_SYNC_ACCOUNT_MANAGE_PRIV: &str = r#"{ "class": ["group", "object"], "name": ["idm_hp_sync_account_manage_priv"], "uuid": ["00000000-0000-0000-0000-000000000037"], - "description": ["Builtin IDM Group for managing sychronisation from external identity sources"], + "description": ["Builtin IDM Group for managing synchronisation from external identity sources"], "member": [ "00000000-0000-0000-0000-000000000019" ] diff --git a/kanidmd/lib/src/constants/system_config.rs b/kanidmd/lib/src/constants/system_config.rs index 6c155854f..22b822ce6 100644 --- a/kanidmd/lib/src/constants/system_config.rs +++ b/kanidmd/lib/src/constants/system_config.rs @@ -1,5 +1,5 @@ /// Default entries for system_config -/// This is seperated because the password badlist section may become very long +/// This is separated because the password badlist section may become very long pub const JSON_SYSTEM_CONFIG_V1: &str = r####"{ "attrs": { "class": ["object", "system_config", "system"], diff --git a/kanidmd/lib/src/credential/mod.rs b/kanidmd/lib/src/credential/mod.rs index 4022b17d6..4d3168237 100644 --- a/kanidmd/lib/src/credential/mod.rs +++ b/kanidmd/lib/src/credential/mod.rs @@ -128,7 +128,7 @@ impl TryFrom<&str> for Password { // As we may add more algos, we keep the match algo single for later. #[allow(clippy::single_match)] fn try_from(value: &str) -> Result { - // There is probably a more efficent way to try this given different types? + // There is probably a more efficient way to try this given different types? // test django - algo$salt$hash let django_pbkdf: Vec<&str> = value.split('$').collect(); @@ -1201,7 +1201,7 @@ mod tests { */ /* - * wbrown - 20221104 - I tried to programatically enable the legacy provider, but + * wbrown - 20221104 - I tried to programmatically enable the legacy provider, but * it consistently "did nothing at all", meaning we have to rely on users to enable * this for this test. */ diff --git a/kanidmd/lib/src/entry.rs b/kanidmd/lib/src/entry.rs index 7b01d1d14..1e93aad75 100644 --- a/kanidmd/lib/src/entry.rs +++ b/kanidmd/lib/src/entry.rs @@ -267,7 +267,7 @@ impl Default for Entry { impl Entry { pub fn new() -> Self { Entry { - // This means NEVER COMMITED + // This means NEVER COMMITTED valid: EntryInit, state: EntryNew, attrs: Map::new(), @@ -418,7 +418,7 @@ impl Entry { } "index" => { valueset::from_value_iter( - vs.into_iter().map(|v| Value::new_indexs(v.as_str()) + vs.into_iter().map(|v| Value::new_indexes(v.as_str()) .unwrap_or_else(|| { warn!("WARNING: Allowing syntax incorrect attribute to be presented UTF8 string"); Value::new_utf8(v) @@ -474,7 +474,7 @@ impl Entry { ) } ia => { - warn!("WARNING: Allowing invalid attribute {} to be interpretted as UTF8 string. YOU MAY ENCOUNTER ODD BEHAVIOUR!!!", ia); + warn!("WARNING: Allowing invalid attribute {} to be interpreted as UTF8 string. YOU MAY ENCOUNTER ODD BEHAVIOUR!!!", ia); valueset::from_value_iter( vs.into_iter().map(|v| Value::new_utf8(v)) ) @@ -811,7 +811,7 @@ impl Entry { // be in the may/must set, and would FAIL our normal checks anyway. // The set of "may" is a combination of may and must, since we have already - // asserted that all must requirements are fufilled. This allows us to + // asserted that all must requirements are fulfilled. This allows us to // perform extended attribute checking in a single pass. let may: Result, _> = classes .iter() @@ -1048,7 +1048,7 @@ type IdxDiff<'a> = Vec>; impl Entry { - /// If this entry has ever been commited to disk, retrieve it's database id number. + /// If this entry has ever been committed to disk, retrieve it's database id number. pub fn get_id(&self) -> u64 { self.state.id } @@ -1147,7 +1147,7 @@ impl Entry { } #[inline] - /// Given this entry, determine it's relative distinguished named for LDAP compatability. + /// Given this entry, determine it's relative distinguished named for LDAP compatibility. pub(crate) fn get_uuid2rdn(&self) -> String { self.attrs .get("spn") @@ -1420,7 +1420,7 @@ impl Entry { changes } (Some(pre_vs), Some(post_vs)) => { - // it exists in both, we need to work out the differents within the attr. + // it exists in both, we need to work out the difference within the attr. let mut pre_idx_keys = pre_vs.generate_idx_eq_keys(); pre_idx_keys.sort_unstable(); @@ -1973,7 +1973,7 @@ impl Entry { /// multivalue in schema - IE this will *not* fail if the attribute is /// empty, yielding and empty array instead. /// - /// However, the converstion to IndexType is fallaible, so in case of a failure + /// However, the conversion to IndexType is fallaible, so in case of a failure /// to convert, an Err is returned. #[inline(always)] pub(crate) fn get_ava_opt_index(&self, attr: &str) -> Option> { @@ -2374,7 +2374,7 @@ where } /// Remove an attribute-value pair from this entry. If the ava doesn't exist, we - /// don't do anything else since we are asserting the abscence of a value. + /// don't do anything else since we are asserting the absence of a value. pub(crate) fn remove_ava(&mut self, attr: &str, value: &PartialValue) { self.valid .eclog diff --git a/kanidmd/lib/src/event.rs b/kanidmd/lib/src/event.rs index e4afaf54b..7d6389536 100644 --- a/kanidmd/lib/src/event.rs +++ b/kanidmd/lib/src/event.rs @@ -11,7 +11,7 @@ //! //! An "event" is generally then passed to the `QueryServer` for processing. //! By making these fully self contained units, it means that we can assert -//! at event creation time we have all the correct data requried to proceed +//! at event creation time we have all the correct data required to proceed //! with the operation, and a clear path to know how to transform events between //! various types. diff --git a/kanidmd/lib/src/filter.rs b/kanidmd/lib/src/filter.rs index 005461d80..bb7f3e3d3 100644 --- a/kanidmd/lib/src/filter.rs +++ b/kanidmd/lib/src/filter.rs @@ -607,7 +607,7 @@ impl FilterComp { // This probably needs some rework // Getting this each recursion could be slow. Maybe - // we need an inner functon that passes the reference? + // we need an inner function that passes the reference? let schema_attributes = schema.get_attributes(); // We used to check the attr_name by normalising it (lowercasing) // but should we? I think we actually should just call a special @@ -1110,7 +1110,7 @@ impl FilterResolved { } // We set the compound filters slope factor to "None" here, because when we do // optimise we'll actually fill in the correct slope factors after we sort those - // inner terms in a more optimial way. + // inner terms in a more optimal way. FilterComp::Or(vs) => { let fi: Option> = vs .into_iter() diff --git a/kanidmd/lib/src/idm/account.rs b/kanidmd/lib/src/idm/account.rs index de5b4640c..71e4bc097 100644 --- a/kanidmd/lib/src/idm/account.rs +++ b/kanidmd/lib/src/idm/account.rs @@ -237,7 +237,7 @@ impl Account { let cot = OffsetDateTime::unix_epoch() + ct; let vmin = if let Some(vft) = valid_from { - // If current time greater than strat time window + // If current time greater than start time window vft <= &cot } else { // We have no time, not expired. @@ -428,7 +428,7 @@ impl Account { pub(crate) fn existing_credential_id_list(&self) -> Option> { // TODO!!! - // Used in registrations only for disallowing exsiting credentials. + // Used in registrations only for disallowing existing credentials. None } @@ -493,7 +493,7 @@ impl Account { pub struct DestroySessionTokenEvent { // Who initiated this? pub ident: Identity, - // Who is it targetting? + // Who is it targeting? pub target: Uuid, // Which token id. pub token_id: Uuid, @@ -617,7 +617,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { pub struct ListUserAuthTokenEvent { // Who initiated this? pub ident: Identity, - // Who is it targetting? + // Who is it targeting? pub target: Uuid, } diff --git a/kanidmd/lib/src/idm/authsession.rs b/kanidmd/lib/src/idm/authsession.rs index 056e9bb6a..fab1b4a2c 100644 --- a/kanidmd/lib/src/idm/authsession.rs +++ b/kanidmd/lib/src/idm/authsession.rs @@ -32,7 +32,7 @@ use crate::idm::AuthState; use crate::prelude::*; // Each CredHandler takes one or more credentials and determines if the -// handlers requirements can be 100% fufilled. This is where MFA or other +// handlers requirements can be 100% fulfilled. This is where MFA or other // auth policies would exist, but each credHandler has to be a whole // encapsulated unit of function. @@ -534,6 +534,7 @@ impl CredHandler { } } +#[allow(clippy::large_enum_variant)] #[derive(Clone)] /// This interleaves with the client auth step. The client sends an "init" /// and we go to the init state, sending back the list of what can proceed. @@ -672,7 +673,7 @@ impl AuthSession { // time: &Duration, // webauthn: &WebauthnCore, ) -> Result { - // Given some auth mech, select which credential(s) are apropriate + // Given some auth mech, select which credential(s) are appropriate // and attempt to use them. // Today we only select one, but later we could have *multiple* that @@ -702,7 +703,7 @@ impl AuthSession { ( None, Err(OperationError::InvalidAuthState( - "unable to negotitate credentials".to_string(), + "unable to negotiate credentials".to_string(), )), ) } else { @@ -860,7 +861,7 @@ impl AuthSession { // // The lockouts could also be an in-memory concept too? - // If this suceeds audit? + // If this succeeds audit? // If success, to authtoken? response diff --git a/kanidmd/lib/src/idm/credupdatesession.rs b/kanidmd/lib/src/idm/credupdatesession.rs index 777966403..e25407529 100644 --- a/kanidmd/lib/src/idm/credupdatesession.rs +++ b/kanidmd/lib/src/idm/credupdatesession.rs @@ -212,7 +212,7 @@ pub(crate) type CredentialUpdateSessionMutex = Arc, @@ -418,7 +418,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { // Mark that we have created an intent token on the user. // ⚠️ -- remember, there is a risk, very low, but still a risk of collision of the intent_id. - // instead of enforcing unique, which would divulge that the collision occured, we + // instead of enforcing unique, which would divulge that the collision occurred, we // write anyway, and instead on the intent access path we invalidate IF the collision // occurs. let mut modlist = ModifyList::new_append( @@ -589,7 +589,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { } } None => { - admin_error!("Corruption may have occured - index yielded an entry for intent_id, but the entry does not contain that intent_id"); + admin_error!("Corruption may have occurred - index yielded an entry for intent_id, but the entry does not contain that intent_id"); return Err(OperationError::InvalidState); } }; @@ -1970,7 +1970,7 @@ mod tests { let cutxn = idms.cred_update_transaction(); - // Now fake going back in time .... allows the tokne to decrypt, but the sesion + // Now fake going back in time .... allows the tokne to decrypt, but the session // is gone anyway! let c_status = cutxn .credential_update_status(&cust, ct) @@ -2264,7 +2264,7 @@ mod tests { )); // Now good to go, we need to now add our backup codes. - // Whats the right way to get these back? + // What's the right way to get these back? let c_status = cutxn .credential_primary_init_backup_codes(&cust, ct) .expect("Failed to update the primary cred password"); @@ -2386,7 +2386,7 @@ mod tests { let c_status = cutxn .credential_update_cancel_mfareg(&cust, ct) - .expect("Failed to cancel inflight totp change"); + .expect("Failed to cancel in-flight totp change"); assert!(matches!(c_status.mfaregstate, MfaRegStateStatus::None)); assert!(c_status.can_commit); @@ -2404,7 +2404,7 @@ mod tests { // - setup webauthn // - remove webauthn - // - test mulitple webauthn token. + // - test multiple webauthn token. #[idm_test] async fn test_idm_credential_update_onboarding_create_new_passkey( @@ -2445,7 +2445,7 @@ mod tests { assert!(matches!(c_status.mfaregstate, MfaRegStateStatus::None)); assert!(matches!( - // Shuld be none. + // Should be none. c_status.primary.as_ref(), None )); diff --git a/kanidmd/lib/src/idm/group.rs b/kanidmd/lib/src/idm/group.rs index 2f63496ed..21737caaa 100644 --- a/kanidmd/lib/src/idm/group.rs +++ b/kanidmd/lib/src/idm/group.rs @@ -50,12 +50,12 @@ macro_rules! try_from_account_e { let f = filter!(f_or( riter.map(|u| f_eq("uuid", PartialValue::Uuid(u))).collect() )); - let ges: Vec<_> = $qs.internal_search(f).map_err(|e| { + let group_entries: Vec<_> = $qs.internal_search(f).map_err(|e| { admin_error!(?e, "internal search failed"); e })?; // Now convert the group entries to groups. - let groups: Result, _> = ges + let groups: Result, _> = group_entries .iter() .map(|e| Group::try_from_entry(e.as_ref())) .collect(); diff --git a/kanidmd/lib/src/idm/ldap.rs b/kanidmd/lib/src/idm/ldap.rs index 227c8e3a8..b742d1553 100644 --- a/kanidmd/lib/src/idm/ldap.rs +++ b/kanidmd/lib/src/idm/ldap.rs @@ -720,7 +720,7 @@ mod tests { .unwrap() .is_none()); - // Non-existant and invalid DNs + // Non-existent and invalid DNs assert!(task::block_on(ldaps.do_bind( idms, "spn=admin@example.com,dc=clownshoes,dc=example,dc=com", diff --git a/kanidmd/lib/src/idm/mod.rs b/kanidmd/lib/src/idm/mod.rs index 1baa0eb7f..7a8ec0628 100644 --- a/kanidmd/lib/src/idm/mod.rs +++ b/kanidmd/lib/src/idm/mod.rs @@ -1,4 +1,4 @@ -//! The Identity Management components that are layered ontop of the [QueryServer](crate::server::QueryServer). These allow +//! The Identity Management components that are layered on top of the [QueryServer](crate::server::QueryServer). These allow //! rich and expressive events and transformations that are lowered into the correct/relevant //! actions in the [QueryServer](crate::server::QueryServer). Generally this is where "Identity Management" policy and code //! is implemented. diff --git a/kanidmd/lib/src/idm/oauth2.rs b/kanidmd/lib/src/idm/oauth2.rs index 5d5360a2c..6817380a0 100644 --- a/kanidmd/lib/src/idm/oauth2.rs +++ b/kanidmd/lib/src/idm/oauth2.rs @@ -789,7 +789,7 @@ impl<'a> IdmServerProxyReadTransaction<'a> { Vec::with_capacity(0) }; - // Subseqent we then return an encrypted session handle which allows + // Subsequent we then return an encrypted session handle which allows // the user to indicate their consent to this authorisation. // // This session handle is what we use in "permit" to generate the redirect. @@ -1566,7 +1566,7 @@ fn parse_basic_authz(client_authz: &str) -> Result<(String, String), Oauth2Error Oauth2Error::AuthenticationRequired })?; let secret = split_iter.next().ok_or_else(|| { - admin_error!("Basic authz invalid format (missing ':' seperator?)"); + admin_error!("Basic authz invalid format (missing ':' separator?)"); Oauth2Error::AuthenticationRequired })?; @@ -2514,7 +2514,7 @@ mod tests { assert!(matches!(e, Oauth2Error::AuthenticationRequired)); assert!(idms_prox_write.commit().is_ok()); - // Now submit a non-existant/invalid token. Does not affect our tokens validity. + // Now submit a non-existent/invalid token. Does not affect our tokens validity. let mut idms_prox_write = task::block_on(idms.proxy_write(ct)); let revoke_request = TokenRevokeRequest { token: "this is an invalid token, nothing will happen!".to_string(), diff --git a/kanidmd/lib/src/idm/radius.rs b/kanidmd/lib/src/idm/radius.rs index 525e97f7f..36952a9de 100644 --- a/kanidmd/lib/src/idm/radius.rs +++ b/kanidmd/lib/src/idm/radius.rs @@ -74,7 +74,7 @@ impl RadiusAccount { let cot = OffsetDateTime::unix_epoch() + ct; let vmin = if let Some(vft) = &self.valid_from { - // If current time greater than strat time window + // If current time greater than start time window vft < &cot } else { // We have no time, not expired. diff --git a/kanidmd/lib/src/idm/scim.rs b/kanidmd/lib/src/idm/scim.rs index 6e27ea6fd..87382134d 100644 --- a/kanidmd/lib/src/idm/scim.rs +++ b/kanidmd/lib/src/idm/scim.rs @@ -98,7 +98,7 @@ impl SyncAccount { pub struct GenerateScimSyncTokenEvent { // Who initiated this? pub ident: Identity, - // Who is it targetting? + // Who is it targeting? pub target: Uuid, // The label pub label: String, @@ -247,7 +247,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { })?; let sync_account = SyncAccount::try_from_entry_rw(&entry).map_err(|e| { - admin_error!(?e, "Failed to covert sync account"); + admin_error!(?e, "Failed to convert sync account"); e })?; let sync_uuid = sync_account.uuid; @@ -290,7 +290,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { // Importantly, we have to do this for items that are in the recycle bin! // First, get the set of uuids that exist. We need this so we have the set of uuids we'll - // be deleteing *at the end*. + // be deleting *at the end*. let f_all_sync = filter_all!(f_and!([ f_eq("class", PVCLASS_SYNC_OBJECT.clone()), f_eq("sync_parent_uuid", PartialValue::Refer(sync_uuid)) @@ -370,7 +370,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { })?; let sync_account = SyncAccount::try_from_entry_rw(&entry).map_err(|e| { - admin_error!(?e, "Failed to covert sync account"); + admin_error!(?e, "Failed to convert sync account"); e })?; let sync_uuid = sync_account.uuid; @@ -413,7 +413,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { // Importantly, we have to do this for items that are in the recycle bin! // First, get the set of uuids that exist. We need this so we have the set of uuids we'll - // be deleteing *at the end*. + // be deleting *at the end*. let f_all_sync = filter_all!(f_and!([ f_eq("class", PVCLASS_SYNC_OBJECT.clone()), f_eq("sync_parent_uuid", PartialValue::Refer(sync_uuid)) @@ -649,7 +649,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { if fail { return Err(OperationError::InvalidEntryState); } - // From that set of entries, parition to entries that exist and are + // From that set of entries, partition to entries that exist and are // present, and entries that do not yet exist. // // We can't easily parititon here because we need to iterate over the @@ -690,7 +690,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { // // For entries that do exist, mod their external_id // - // Basicly we just set this up as a batch modify and submit it. + // Basically we just set this up as a batch modify and submit it. self.qs_write .internal_batch_modify(change_entries.iter().filter_map(|(u, scim_ent)| { // If the entry has an external id diff --git a/kanidmd/lib/src/idm/server.rs b/kanidmd/lib/src/idm/server.rs index 7778d3ea3..7467e5485 100644 --- a/kanidmd/lib/src/idm/server.rs +++ b/kanidmd/lib/src/idm/server.rs @@ -902,7 +902,7 @@ impl<'a> IdmServerAuthTransaction<'a> { ae: &AuthEvent, ct: Duration, ) -> Result { - trace!(?ae, "Recieved"); + trace!(?ae, "Received"); // Match on the auth event, to see what we need to do. match &ae.step { @@ -1654,7 +1654,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { })? }; - // If we got here, then pre-apply succedded, and that means access control + // If we got here, then pre-apply succeeded, and that means access control // passed. Now we can do the extra checks. // Check the password quality. @@ -1733,7 +1733,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { })? }; - // If we got here, then pre-apply succedded, and that means access control + // If we got here, then pre-apply succeeded, and that means access control // passed. Now we can do the extra checks. self.check_password_quality(pce.cleartext.as_str(), account.related_inputs().as_slice()) @@ -2291,7 +2291,7 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-continue result!" + "A critical error has occurred! We have a non-continue result!" ); panic!(); } @@ -2301,7 +2301,7 @@ mod tests { } Err(e) => { // Should not occur! - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); panic!(); } }; @@ -2338,14 +2338,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-continue result!" + "A critical error has occurred! We have a non-continue result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); // Should not occur! panic!(); } @@ -2379,14 +2379,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-succcess result!" + "A critical error has occurred! We have a non-succcess result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); // Should not occur! panic!(); } @@ -2518,13 +2518,13 @@ mod tests { token } _ => { - error!("A critical error has occured! We have a non-succcess result!"); + error!("A critical error has occurred! We have a non-succcess result!"); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); // Should not occur! panic!(); } @@ -2587,14 +2587,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-succcess result!" + "A critical error has occurred! We have a non-succcess result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); // Should not occur! panic!(); } @@ -2644,14 +2644,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-denied result!" + "A critical error has occurred! We have a non-denied result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); // Should not occur! panic!(); } @@ -2958,7 +2958,7 @@ mod tests { assert!(idms_prox_write.commit().is_ok()); // And auth should now fail due to the lack of PW material (note that - // softlocking WONT kick in because the cred_uuid is gone!) + // softlocking WON'T kick in because the cred_uuid is gone!) let mut idms_auth = idms.auth(); let a3 = task::block_on( idms_auth.auth_unix(&uuae_good, Duration::from_secs(TEST_CURRENT_TIME)), @@ -3119,7 +3119,7 @@ mod tests { fn test_idm_account_valid_from_expire() { run_idm_test!( |qs: &QueryServer, idms: &IdmServer, _idms_delayed: &mut IdmServerDelayed| { - // Any account taht is not yet valrid / expired can't auth. + // Any account that is not yet valrid / expired can't auth. task::block_on(init_admin_w_password(qs, TEST_PASSWORD)) .expect("Failed to setup admin account"); @@ -3327,14 +3327,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-denied result!" + "A critical error has occurred! We have a non-denied result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); panic!(); } }; @@ -3416,14 +3416,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-succcess result!" + "A critical error has occurred! We have a non-succcess result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); // Should not occur! panic!(); } @@ -3489,14 +3489,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-denied result!" + "A critical error has occurred! We have a non-denied result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); panic!(); } }; @@ -3525,14 +3525,14 @@ mod tests { } _ => { error!( - "A critical error has occured! We have a non-denied result!" + "A critical error has occurred! We have a non-denied result!" ); panic!(); } } } Err(e) => { - error!("A critical error has occured! {:?}", e); + error!("A critical error has occurred! {:?}", e); panic!(); } }; diff --git a/kanidmd/lib/src/idm/serviceaccount.rs b/kanidmd/lib/src/idm/serviceaccount.rs index a41cc3a21..0aeb4fe22 100644 --- a/kanidmd/lib/src/idm/serviceaccount.rs +++ b/kanidmd/lib/src/idm/serviceaccount.rs @@ -135,14 +135,14 @@ impl ServiceAccount { pub struct ListApiTokenEvent { // Who initiated this? pub ident: Identity, - // Who is it targetting? + // Who is it targeting? pub target: Uuid, } pub struct GenerateApiTokenEvent { // Who initiated this? pub ident: Identity, - // Who is it targetting? + // Who is it targeting? pub target: Uuid, // The label pub label: String, @@ -169,7 +169,7 @@ impl GenerateApiTokenEvent { pub struct DestroyApiTokenEvent { // Who initiated this? pub ident: Identity, - // Who is it targetting? + // Who is it targeting? pub target: Uuid, // Which token id. pub token_id: Uuid, @@ -204,7 +204,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> { let session_id = Uuid::new_v4(); let issued_at = time::OffsetDateTime::unix_epoch() + ct; - // Normalise to UTC incase it was provided as something else. + // Normalise to UTC in case it was provided as something else. let expiry = gte.expiry.map(|odt| odt.to_offset(time::UtcOffset::UTC)); let purpose = if gte.read_write { diff --git a/kanidmd/lib/src/idm/unix.rs b/kanidmd/lib/src/idm/unix.rs index 81f2e4229..be00393dc 100644 --- a/kanidmd/lib/src/idm/unix.rs +++ b/kanidmd/lib/src/idm/unix.rs @@ -372,9 +372,13 @@ macro_rules! try_from_account_group_e { f_eq("class", PVCLASS_GROUP.clone()), f_or(riter.map(|u| f_eq("uuid", PartialValue::Uuid(u))).collect()) ])); - let ges: Vec<_> = $qs.internal_search(f)?; + let group_entries: Vec<_> = $qs.internal_search(f)?; let groups: Result, _> = iter::once(Ok(upg)) - .chain(ges.iter().map(|e| UnixGroup::try_from_entry(e.as_ref()))) + .chain( + group_entries + .iter() + .map(|e| UnixGroup::try_from_entry(e.as_ref())), + ) .collect(); groups } diff --git a/kanidmd/lib/src/plugins/base.rs b/kanidmd/lib/src/plugins/base.rs index 09da8a374..0c93adfb4 100644 --- a/kanidmd/lib/src/plugins/base.rs +++ b/kanidmd/lib/src/plugins/base.rs @@ -88,8 +88,8 @@ impl Plugin for Base { // an internal operation. if !ce.ident.is_internal() { // TODO: We can't lazy const this as you can't borrow the type down to what - // range and contains on btreeset need, but can we possibly make these constly - // part of the struct somehow at init. rather than needing to parse a lot? + // range and contains on btreeset need, but can we possibly make these + // part of the struct at init. rather than needing to parse a lot? // The internal set is bounded by: UUID_ADMIN -> UUID_ANONYMOUS // Sadly we need to allocate these to strings to make references, sigh. let overlap: usize = cand_uuid.range(UUID_ADMIN..UUID_ANONYMOUS).count(); @@ -143,7 +143,7 @@ impl Plugin for Base { } } Err(e) => { - admin_error!("Error occured checking UUID existance. {:?}", e); + admin_error!("Error occurred checking UUID existence. {:?}", e); return Err(e); } } @@ -335,7 +335,7 @@ mod tests { ); } - // check unparseable uuid + // check unparsable uuid #[test] fn test_pre_create_uuid_invalid() { let preload: Vec> = Vec::new(); diff --git a/kanidmd/lib/src/plugins/domain.rs b/kanidmd/lib/src/plugins/domain.rs index 769dc7bbb..281ddeddc 100644 --- a/kanidmd/lib/src/plugins/domain.rs +++ b/kanidmd/lib/src/plugins/domain.rs @@ -58,7 +58,7 @@ impl Domain { if e.attribute_equality("class", &PVCLASS_DOMAIN_INFO) && e.attribute_equality("uuid", &PVUUID_DOMAIN_INFO) { - // We always set this, because the DB uuid is authorative. + // We always set this, because the DB uuid is authoritative. let u = Value::Uuid(qs.get_domain_uuid()); e.set_ava("domain_uuid", once(u)); trace!("plugin_domain: Applying uuid transform"); diff --git a/kanidmd/lib/src/plugins/memberof.rs b/kanidmd/lib/src/plugins/memberof.rs index db2c3ec31..2ce43639e 100644 --- a/kanidmd/lib/src/plugins/memberof.rs +++ b/kanidmd/lib/src/plugins/memberof.rs @@ -98,7 +98,7 @@ fn do_memberof( fn apply_memberof( qs: &mut QueryServerWriteTransaction, // TODO: Experiment with HashSet/BTreeSet here instead of vec. - // May require https://github.com/rust-lang/rust/issues/62924 to allow poping + // May require https://github.com/rust-lang/rust/issues/62924 to allow popping mut group_affect: Vec, ) -> Result<(), OperationError> { trace!(" => entering apply_memberof"); @@ -189,7 +189,7 @@ fn apply_memberof( trace!("=> processing affected uuid {:?}", auuid); debug_assert!(!tgte.attribute_equality("class", &PVCLASS_GROUP)); do_memberof(qs, auuid, &mut tgte)?; - // Only write if a change occured. + // Only write if a change occurred. if pre.get_ava_set("memberof") != tgte.get_ava_set("memberof") || pre.get_ava_set("directmemberof") != tgte.get_ava_set("directmemberof") { diff --git a/kanidmd/lib/src/plugins/mod.rs b/kanidmd/lib/src/plugins/mod.rs index 024270fdc..72ff93072 100644 --- a/kanidmd/lib/src/plugins/mod.rs +++ b/kanidmd/lib/src/plugins/mod.rs @@ -51,7 +51,7 @@ trait Plugin { fn post_create( _qs: &mut QueryServerWriteTransaction, - // List of what we commited that was valid? + // List of what we committed that was valid? _cand: &[EntrySealedCommitted], _ce: &CreateEvent, ) -> Result<(), OperationError> { diff --git a/kanidmd/lib/src/plugins/refint.rs b/kanidmd/lib/src/plugins/refint.rs index 5dc0b27df..36fcc18c1 100644 --- a/kanidmd/lib/src/plugins/refint.rs +++ b/kanidmd/lib/src/plugins/refint.rs @@ -47,7 +47,7 @@ impl ReferentialIntegrity { e })?; - // Is the existance of all id's confirmed? + // Is the existence of all id's confirmed? if b { Ok(()) } else { @@ -70,7 +70,7 @@ impl Plugin for ReferentialIntegrity { // // There is a situation to account for which is that a create or mod // may introduce the entry which is also to be referenced in the same - // transaction. Rather than have seperate verification paths - one to + // transaction. Rather than have separate verification paths - one to // check the UUID is in the cand set, and one to check the UUID exists // in the DB, we do the "correct" thing, write to the DB, and then assert // that the DB content is complete and valid instead. diff --git a/kanidmd/lib/src/plugins/spn.rs b/kanidmd/lib/src/plugins/spn.rs index d6072b39b..be7c2be2d 100644 --- a/kanidmd/lib/src/plugins/spn.rs +++ b/kanidmd/lib/src/plugins/spn.rs @@ -191,7 +191,7 @@ impl Spn { ); // All we do is purge spn, and allow the plugin to recreate. Neat! It's also all still - // within the transaction, just incase! + // within the transaction, just in case! qs.internal_modify( &filter!(f_or!([ f_eq("class", PVCLASS_GROUP.clone()), diff --git a/kanidmd/lib/src/repl/entry.rs b/kanidmd/lib/src/repl/entry.rs index c8a5a68d6..fa9f80af4 100644 --- a/kanidmd/lib/src/repl/entry.rs +++ b/kanidmd/lib/src/repl/entry.rs @@ -20,7 +20,7 @@ pub struct EntryChangelog { /// A subtle and important piece of information is that an anchor can be considered /// as the "state as existing between two Cid's". This means for Cid X, this state is /// the "moment before X". This is important, as for a create we define the initial anchor - /// as "nothing". It's means for the anchor at time X, that changes that occured at time + /// as "nothing". It's means for the anchor at time X, that changes that occurred at time /// X have NOT been replayed and applied! anchors: BTreeMap, changes: BTreeMap, @@ -34,7 +34,7 @@ impl fmt::Display for EntryChangelog { } */ -/// A change defines the transitions that occured within this Cid (transaction). A change is applied +/// A change defines the transitions that occurred within this Cid (transaction). A change is applied /// as a whole, or rejected during the replay process. #[derive(Debug, Clone)] pub struct Change { @@ -512,7 +512,7 @@ impl EntryChangelog { /* fn insert_anchor(&mut self, cid: Cid, entry_state: State) { // When we insert an anchor, we have to remove all subsequent anchors (but not - // the preceeding ones.) + // the preceding ones.) let _ = self.anchors.split_off(&cid); self.anchors.insert(cid.clone(), entry_state); } @@ -521,7 +521,7 @@ impl EntryChangelog { pub fn trim_up_to(&mut self, cid: &Cid) -> Result<(), OperationError> { // Build a new anchor that is equal or less than this cid. // In other words, the cid we are trimming to, should be remaining - // in the CL, and we should have an anchor that preceeds it. + // in the CL, and we should have an anchor that precedes it. let (entry_state, rejected) = self.replay(Unbounded, Excluded(cid)).map_err(|e| { error!(?e); e diff --git a/kanidmd/lib/src/repl/ruv.rs b/kanidmd/lib/src/repl/ruv.rs index f02ad3efc..d66769d6f 100644 --- a/kanidmd/lib/src/repl/ruv.rs +++ b/kanidmd/lib/src/repl/ruv.rs @@ -155,7 +155,7 @@ impl<'a> ReplicationUpdateVectorTransaction for ReplicationUpdateVectorReadTrans impl<'a> ReplicationUpdateVectorWriteTransaction<'a> { pub fn rebuild(&mut self, entries: &[Arc]) -> Result<(), OperationError> { // Entries and their internal changelogs are the "source of truth" for all changes - // that have ever occured and are stored on this server. So we use them to rebuild our RUV + // that have ever occurred and are stored on this server. So we use them to rebuild our RUV // here! let mut rebuild_ruv: BTreeMap = BTreeMap::new(); diff --git a/kanidmd/lib/src/schema.rs b/kanidmd/lib/src/schema.rs index b2bff30d1..f569019ce 100644 --- a/kanidmd/lib/src/schema.rs +++ b/kanidmd/lib/src/schema.rs @@ -71,7 +71,7 @@ pub struct SchemaReadTransaction { ref_cache: CowCellReadTxn>, } -/// An item reperesenting an attribute and the rules that enforce it. These rules enforce if an +/// An item representing an attribute and the rules that enforce it. These rules enforce if an /// attribute on an [`Entry`] may be single or multi value, must be unique amongst all other types /// of this attribute, if the attribute should be [`indexed`], and what type of data [`syntax`] it may hold. /// @@ -287,7 +287,7 @@ impl SchemaAttribute { } } -/// An item reperesenting a class and the rules for that class. These rules enforce that an +/// An item representing a class and the rules for that class. These rules enforce that an /// [`Entry`]'s avas conform to a set of requirements, giving structure to an entry about /// what avas must or may exist. The kanidm project provides attributes in `systemmust` and /// `systemmay`, which can not be altered. An administrator may extend these in the `must` @@ -1026,7 +1026,7 @@ impl<'a> SchemaWriteTransaction<'a> { name: AttrString::from("acp_receiver_group"), uuid: UUID_SCHEMA_ATTR_ACP_RECEIVER_GROUP, description: String::from( - "The group that recieves this access control to allow access", + "The group that receives this access control to allow access", ), multivalue: false, unique: false, @@ -1059,7 +1059,7 @@ impl<'a> SchemaWriteTransaction<'a> { name: AttrString::from("acp_search_attr"), uuid: UUID_SCHEMA_ATTR_ACP_SEARCH_ATTR, description: String::from( - "The attributes that may be viewed or searched by the reciever on targetscope.", + "The attributes that may be viewed or searched by the receiver on targetscope.", ), multivalue: true, unique: false, @@ -1558,7 +1558,7 @@ impl<'a> SchemaWriteTransaction<'a> { name: AttrString::from("memberof"), uuid: UUID_SCHEMA_CLASS_MEMBEROF, description: String::from( - "Class that is dynamically added to recepients of memberof or directmemberof", + "Class that is dynamically added to recipients of memberof or directmemberof", ), systemmay: vec![ AttrString::from("memberof"), @@ -2448,7 +2448,7 @@ mod tests { fn test_schema_filter_validation() { let schema_outer = Schema::new().expect("failed to create schema"); let schema = schema_outer.read(); - // Test non existant attr name + // Test non existent attr name let f_mixed = filter_all!(f_eq("nonClAsS", PartialValue::new_class("attributetype"))); assert_eq!( f_mixed.validate(&schema), diff --git a/kanidmd/lib/src/server/access.rs b/kanidmd/lib/src/server/access.rs index ca20bd198..7dae0ce69 100644 --- a/kanidmd/lib/src/server/access.rs +++ b/kanidmd/lib/src/server/access.rs @@ -832,7 +832,7 @@ pub trait AccessControlsTransaction<'a> { // Here we have an option<&str> which could mean there is a risk of // a malicious entity attempting to trick us by masking class mods // in non-iutf8 types. However, the server first won't respect their - // existance, and second, we would have failed the mod at schema checking + // existence, and second, we would have failed the mod at schema checking // earlier in the process as these were not correctly type. As a result // we can trust these to be correct here and not to be "None". v.to_str() @@ -1221,7 +1221,7 @@ pub trait AccessControlsTransaction<'a> { ) -> Result, OperationError> { // I think we need a structure like " CheckResult, which is in the order of the // entries, but also stashes the uuid. Then it has search, mod, create, delete, - // as seperate attrs to describe what is capable. + // as separate attrs to describe what is capable. // Does create make sense here? I don't think it does. Create requires you to // have an entry template. I think james was right about the create being @@ -1243,7 +1243,7 @@ pub trait AccessControlsTransaction<'a> { }; trace!(ident = %ident, "Effective permission check"); - // I think we seperate this to multiple checks ...? + // I think we separate this to multiple checks ...? // == search == // Get the relevant acps for this receiver. @@ -1556,7 +1556,7 @@ impl Default for AccessControls { acps_modify: Vec::new(), acps_delete: Vec::new(), }), - // Allow the expect, if this fails it reperesents a programming/development + // Allow the expect, if this fails it represents a programming/development // failure. acp_resolve_filter_cache: ARCacheBuilder::new() .set_size(ACP_RESOLVE_FILTER_CACHE_MAX, ACP_RESOLVE_FILTER_CACHE_LOCAL) @@ -2632,7 +2632,7 @@ mod tests { // In this case, we can make the create event with an empty entry // set because we only reference the entries in r_set in the test. // - // In the realy server code, the entry set is derived from and checked + // In the server code, the entry set is derived from and checked // against the create event, so we have some level of trust in it. let ce_admin = CreateEvent::new_impersonate_identity( diff --git a/kanidmd/lib/src/server/identity.rs b/kanidmd/lib/src/server/identity.rs index c98f8eae0..bbf9ace2e 100644 --- a/kanidmd/lib/src/server/identity.rs +++ b/kanidmd/lib/src/server/identity.rs @@ -101,7 +101,7 @@ pub enum IdentType { /// caching components. pub enum IdentityId { // Time stamp of the originating event. - // The uuid of the originiating user + // The uuid of the originating user User(Uuid), Synch(Uuid), Internal, @@ -204,7 +204,7 @@ impl Identity { pub fn from_impersonate(ident: &Self) -> Self { // TODO #64 ?: In the future, we could change some of this data - // to reflect the fact we are infact impersonating the action + // to reflect the fact we are in fact impersonating the action // rather than the user explicitly requesting it. Could matter // to audits and logs to determine what happened. ident.clone() diff --git a/kanidmd/lib/src/server/migrations.rs b/kanidmd/lib/src/server/migrations.rs index 34a51c13c..4e3281a07 100644 --- a/kanidmd/lib/src/server/migrations.rs +++ b/kanidmd/lib/src/server/migrations.rs @@ -196,7 +196,7 @@ impl<'a> QueryServerWriteTransaction<'a> { e })?; - // If there is nothing, we donn't need to do anything. + // If there is nothing, we don't need to do anything. if pre_candidates.is_empty() { admin_info!("migrate_2_to_3 no entries to migrate, complete"); return Ok(()); @@ -342,7 +342,7 @@ impl<'a> QueryServerWriteTransaction<'a> { e })?; - // If there is nothing, we donn't need to do anything. + // If there is nothing, we don't need to do anything. if pre_candidates.is_empty() { admin_info!("migrate_8_to_9 no entries to migrate, complete"); return Ok(()); diff --git a/kanidmd/lib/src/server/mod.rs b/kanidmd/lib/src/server/mod.rs index 1e6718c2a..ffe1786de 100644 --- a/kanidmd/lib/src/server/mod.rs +++ b/kanidmd/lib/src/server/mod.rs @@ -452,7 +452,7 @@ pub trait QueryServerTransaction<'a> { .ok_or_else(|| OperationError::InvalidAttribute("Invalid boolean syntax".to_string())), SyntaxType::SyntaxId => Value::new_syntaxs(value) .ok_or_else(|| OperationError::InvalidAttribute("Invalid Syntax syntax".to_string())), - SyntaxType::IndexId => Value::new_indexs(value) + SyntaxType::IndexId => Value::new_indexes(value) .ok_or_else(|| OperationError::InvalidAttribute("Invalid Index syntax".to_string())), SyntaxType::Uuid => { // Attempt to resolve this name to a uuid. If it's already a uuid, then @@ -531,7 +531,7 @@ pub trait QueryServerTransaction<'a> { SyntaxType::SyntaxId => PartialValue::new_syntaxs(value).ok_or_else(|| { OperationError::InvalidAttribute("Invalid Syntax syntax".to_string()) }), - SyntaxType::IndexId => PartialValue::new_indexs(value).ok_or_else(|| { + SyntaxType::IndexId => PartialValue::new_indexes(value).ok_or_else(|| { OperationError::InvalidAttribute("Invalid Index syntax".to_string()) }), SyntaxType::Uuid => { @@ -966,7 +966,7 @@ impl QueryServer { .db_tickets .acquire() .await - .expect("unable to aquire db_ticket for qsr"); + .expect("unable to acquire db_ticket for qsr"); QueryServerReadTransaction { be_txn: self.be.read(), @@ -985,14 +985,14 @@ impl QueryServer { .write_ticket .acquire() .await - .expect("unable to aquire writer_ticket for qsw"); + .expect("unable to acquire writer_ticket for qsw"); // We need to ensure a db conn will be available #[allow(clippy::expect_used)] let db_ticket = self .db_tickets .acquire() .await - .expect("unable to aquire db_ticket for qsw"); + .expect("unable to acquire db_ticket for qsw"); let schema_write = self.schema.write(); let mut be_txn = self.be.write(); @@ -1010,7 +1010,7 @@ impl QueryServer { // take ownership of the value, and cause the commit to "only be run // once". // - // The commited flag is however used for abort-specific code in drop + // The committed flag is however used for abort-specific code in drop // which today I don't think we have ... yet. committed: false, phase, @@ -1362,7 +1362,7 @@ impl<'a> QueryServerWriteTransaction<'a> { self.reload_schema()?; } // Determine if we need to update access control profiles - // based on any modifications that have occured. + // based on any modifications that have occurred. // IF SCHEMA CHANGED WE MUST ALSO RELOAD!!! IE if schema had an attr removed // that we rely on we MUST fail this here!! if self.changed_schema || self.changed_acp { diff --git a/kanidmd/lib/src/server/modify.rs b/kanidmd/lib/src/server/modify.rs index 96d14ef4f..c32767dea 100644 --- a/kanidmd/lib/src/server/modify.rs +++ b/kanidmd/lib/src/server/modify.rs @@ -260,7 +260,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } /// Allows writing batches of modified entries without going through - /// the modlist path. This allows more effecient batch transformations + /// the modlist path. This allows more efficient batch transformations /// such as memberof, but at the expense that YOU must guarantee you /// uphold all other plugin and state rules that are important. You /// probably want modify instead. diff --git a/kanidmd/lib/src/server/recycle.rs b/kanidmd/lib/src/server/recycle.rs index 77a1f177d..4a1e756fa 100644 --- a/kanidmd/lib/src/server/recycle.rs +++ b/kanidmd/lib/src/server/recycle.rs @@ -176,7 +176,7 @@ impl<'a> QueryServerWriteTransaction<'a> { } // Do we need to apply pre-mod? - // Very likely, incase domain has renamed etc. + // Very likely, in case domain has renamed etc. Plugins::run_pre_modify(self, &mut candidates, &me).map_err(|e| { admin_error!("Revive operation failed (plugin), {:?}", e); e diff --git a/kanidmd/lib/src/value.rs b/kanidmd/lib/src/value.rs index 46fcc3f4a..c1c5e8f59 100644 --- a/kanidmd/lib/src/value.rs +++ b/kanidmd/lib/src/value.rs @@ -216,7 +216,7 @@ impl TryFrom<&str> for SyntaxType { "REFERENCE_UUID" => Ok(SyntaxType::ReferenceUuid), "JSON_FILTER" => Ok(SyntaxType::JsonFilter), "CREDENTIAL" => Ok(SyntaxType::Credential), - // Compatability for older syntax name. + // Compatibility for older syntax name. "RADIUS_UTF8STRING" | "SECRET_UTF8STRING" => Ok(SyntaxType::SecretUtf8String), "SSHKEY" => Ok(SyntaxType::SshKey), "SECURITY_PRINCIPAL_NAME" => Ok(SyntaxType::SecurityPrincipalName), @@ -442,7 +442,7 @@ impl PartialValue { matches!(self, PartialValue::Refer(_)) } - pub fn new_indexs(s: &str) -> Option { + pub fn new_indexes(s: &str) -> Option { IndexType::try_from(s).map(PartialValue::Index).ok() } @@ -962,7 +962,7 @@ impl Value { Uuid::parse_str(s).map(Value::Uuid).ok() } - // Is this correct? Should ref be seperate? + // Is this correct? Should ref be separate? pub fn is_uuid(&self) -> bool { matches!(self, Value::Uuid(_)) } @@ -992,7 +992,7 @@ impl Value { matches!(self, Value::Syntax(_)) } - pub fn new_indexs(s: &str) -> Option { + pub fn new_indexes(s: &str) -> Option { IndexType::try_from(s).map(Value::Index).ok() } @@ -1295,7 +1295,7 @@ impl Value { } } - // We need a seperate to-ref_uuid to distinguish from normal uuids + // We need a separate to-ref_uuid to distinguish from normal uuids // in refint plugin. pub fn to_ref_uuid(&self) -> Option { match &self { @@ -1483,7 +1483,7 @@ impl Value { } } - // !!!! This function is beind phased out !!! + // !!!! This function is being phased out !!! #[allow(clippy::unreachable)] pub(crate) fn to_proto_string_clone(&self) -> String { match &self { diff --git a/kanidmd/lib/src/valueset/jws.rs b/kanidmd/lib/src/valueset/jws.rs index 5adc0b7a0..0fa035e91 100644 --- a/kanidmd/lib/src/valueset/jws.rs +++ b/kanidmd/lib/src/valueset/jws.rs @@ -28,7 +28,7 @@ impl ValueSetJwsKeyEs256 { .iter() .map(|b| { JwsSigner::from_es256_der(b).map_err(|e| { - debug!(?e, "Error occured parsing ES256 DER"); + debug!(?e, "Error occurred parsing ES256 DER"); OperationError::InvalidValueState }) }) @@ -184,7 +184,7 @@ impl ValueSetJwsKeyRs256 { .iter() .map(|b| { JwsSigner::from_rs256_der(b).map_err(|e| { - debug!(?e, "Error occured parsing RS256 DER"); + debug!(?e, "Error occurred parsing RS256 DER"); OperationError::InvalidValueState }) }) diff --git a/kanidmd/lib/src/valueset/session.rs b/kanidmd/lib/src/valueset/session.rs index 2510c873d..f359021ea 100644 --- a/kanidmd/lib/src/valueset/session.rs +++ b/kanidmd/lib/src/valueset/session.rs @@ -260,7 +260,7 @@ pub struct ValueSetOauth2Session { // this is a "filter" to tell us if as rs_id is used anywhere // in this set. The reason is so that we don't do O(n) searches // on a refer if it's not in this set. The alternate approach is - // an index on these maps, but its more work to mantain for a rare + // an index on these maps, but its more work to maintain for a rare // situation where we actually want to query rs_uuid -> sessions. rs_filter: BTreeSet, } diff --git a/kanidmd/testkit/tests/proto_v1_test.rs b/kanidmd/testkit/tests/proto_v1_test.rs index d46dc6a40..2bab595c9 100644 --- a/kanidmd/testkit/tests/proto_v1_test.rs +++ b/kanidmd/testkit/tests/proto_v1_test.rs @@ -551,7 +551,7 @@ async fn test_server_rest_posix_lifecycle(rsclient: KanidmClient) { assert!(r2.name == "posix_account"); assert!(r3.name == "posix_account"); - // get the group by nam + // get the group by name let r = rsclient .idm_group_unix_token_get("posix_group") .await diff --git a/kanidmd_web_ui/src/components/change_unix_password.rs b/kanidmd_web_ui/src/components/change_unix_password.rs index bebd02b1c..dd2e6daf5 100644 --- a/kanidmd_web_ui/src/components/change_unix_password.rs +++ b/kanidmd_web_ui/src/components/change_unix_password.rs @@ -125,8 +125,8 @@ impl Component for ChangeUnixPassword { let flash = match &self.state { State::Error { emsg, kopid } => { let message = match kopid { - Some(k) => format!("An error occured - {} - {}", emsg, k), - None => format!("An error occured - {} - No Operation ID", emsg), + Some(k) => format!("An error occurred - {} - {}", emsg, k), + None => format!("An error occurred - {} - No Operation ID", emsg), }; html! {