Auth flow docs (#2249)

This commit is contained in:
James Hodgkinson 2023-10-24 14:00:37 +10:00 committed by GitHub
parent 6f3e932f7f
commit 7093149975
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 1827 additions and 539 deletions

View file

@ -52,7 +52,7 @@ jobs:
- name: Build the docs - name: Build the docs
run: | run: |
cargo install mdbook-template cargo install mdbook-template mdbook-mermaid
cargo doc --no-deps cargo doc --no-deps
mdbook build *book mdbook build *book
rm -rf ./docs/ rm -rf ./docs/

View file

@ -135,6 +135,7 @@ codespell:
codespell -c \ codespell -c \
-L 'crate,unexpect,Pres,pres,ACI,aci,ser,te,ue,unx,aNULL' \ -L 'crate,unexpect,Pres,pres,ACI,aci,ser,te,ue,unx,aNULL' \
--skip='./target,./pykanidm/.venv,./pykanidm/.mypy_cache,./.mypy_cache,./pykanidm/poetry.lock' \ --skip='./target,./pykanidm/.venv,./pykanidm/.mypy_cache,./.mypy_cache,./pykanidm/poetry.lock' \
--skip='./book/*.js' \
--skip='./book/book/*' \ --skip='./book/book/*' \
--skip='./book/src/images/*' \ --skip='./book/src/images/*' \
--skip='./docs/*,./.git' \ --skip='./docs/*,./.git' \
@ -175,7 +176,10 @@ doc:
.PHONY: doc/format .PHONY: doc/format
doc/format: ## Format docs and the Kanidm book doc/format: ## Format docs and the Kanidm book
find . -type f -not -path './target/*' -not -path '*/.venv/*' -not -path './vendor/*'\ find . -type f \
-not -path './target/*' \
-not -path './docs/*' \
-not -path '*/.venv/*' -not -path './vendor/*'\
-name \*.md \ -name \*.md \
-exec deno fmt --check $(MARKDOWN_FORMAT_ARGS) "{}" + -exec deno fmt --check $(MARKDOWN_FORMAT_ARGS) "{}" +
@ -188,12 +192,13 @@ doc/format/fix: ## Fix docs and the Kanidm book
.PHONY: book .PHONY: book
book: ## Build the Kanidm book book: ## Build the Kanidm book
book: book:
cargo doc --no-deps echo "Building rust docs"
cargo doc --no-deps --quiet
mdbook build book mdbook build book
rm -rf ./docs/ rm -rf ./docs/
mv ./book/book/ ./docs/ mv ./book/book/ ./docs/
mkdir -p ./docs/rustdoc/${BOOK_VERSION} mkdir -p $(PWD)/docs/rustdoc/${BOOK_VERSION}/
mv ./target/doc/* ./docs/rustdoc/${BOOK_VERSION}/ rsync -a --delete $(PWD)/target/doc/ $(PWD)/docs/rustdoc/${BOOK_VERSION}/
.PHONY: book_versioned .PHONY: book_versioned
book_versioned: book_versioned:

View file

@ -14,5 +14,9 @@ edit-url-template = "https://github.com/kanidm/kanidm/edit/master/book/{path}"
git-repository-url = "https://github.com/kanidm/kanidm" git-repository-url = "https://github.com/kanidm/kanidm"
git-repository-icon = "fa-github" git-repository-icon = "fa-github"
additional-css = ["theme.css"] additional-css = ["theme.css"]
additional-js = ["mermaid.min.js", "mermaid-init.js"]
[preprocessor.template] [preprocessor.template]
[preprocessor.mermaid]
command = "mdbook-mermaid"

1
book/mermaid-init.js Normal file
View file

@ -0,0 +1 @@
mermaid.initialize({startOnLoad:true});

1282
book/mermaid.min.js vendored Normal file

File diff suppressed because one or more lines are too long

View file

@ -204,21 +204,21 @@ git rebase --abort
### Building the Book ### Building the Book
You'll need `mdbook` to build the book: You'll need `mdbook` and the extensions to build the book:
```bash ```shell
cargo install mdbook cargo install mdbook mdbook-mermaid mdbook-template
``` ```
To build it: To build it:
```bash ```shell
make book make book
``` ```
Or to run a local webserver: Or to run a local webserver:
```bash ```shell
cd book cd book
mdbook serve mdbook serve
``` ```

View file

@ -57,14 +57,15 @@
- [Developer Guide](DEVELOPER_README.md) - [Developer Guide](DEVELOPER_README.md)
- [FAQ](developers/faq.md) - [FAQ](developers/faq.md)
- [Design Documents]() - [Design Documents]()
- [Architecture](developers/designs/architecture.md)
- [Access Profiles 2022](developers/designs/access_profiles_rework_2022.md) - [Access Profiles 2022](developers/designs/access_profiles_rework_2022.md)
- [Access Profiles Original](developers/designs/access_profiles_and_security.md) - [Access Profiles Original](developers/designs/access_profiles_and_security.md)
- [REST Interface](developers/designs/rest_interface.md) - [Architecture](developers/designs/architecture.md)
- [Authentication flow](developers/designs/authentication_flow.md)
- [Elevated Priv Mode](developers/designs/elevated_priv_mode.md) - [Elevated Priv Mode](developers/designs/elevated_priv_mode.md)
- [Oauth2 Refresh Tokens](developers/designs/oauth2_refresh_tokens.md) - [Oauth2 Refresh Tokens](developers/designs/oauth2_refresh_tokens.md)
- [Replication Internals](developers/designs/replication.md)
- [Replication Coordinator](developers/designs/replication_coord.md) - [Replication Coordinator](developers/designs/replication_coord.md)
- [Replication Internals](developers/designs/replication.md)
- [REST Interface](developers/designs/rest_interface.md)
- [Python Module](developers/python.md) - [Python Module](developers/python.md)
- [RADIUS Integration](developers/radius.md) - [RADIUS Integration](developers/radius.md)
- [Packaging](packaging.md) - [Packaging](packaging.md)

View file

@ -9,8 +9,8 @@ meet the policy requirements.
## Default Account Policy ## Default Account Policy
A default Account Policy is applied to `idm_all_accounts`. This provides the defaults that A default Account Policy is applied to `idm_all_accounts`. This provides the defaults that influence
influence all accounts in Kanidm. This policy can be modified the same as any other group's policy. all accounts in Kanidm. This policy can be modified the same as any other group's policy.
## Policy Resolution ## Policy Resolution

View file

@ -1 +0,0 @@
# Designs

View file

@ -0,0 +1,428 @@
# Authentication Use Cases
There are many planned integrations for authentication for a service like this. The uses cases for
what kind of auth are below. It's important to consider that today a lot of identification is not
just who you are, but what device you are using, so device security is paramount in the design of
this system. We strongly recommend patching and full disk encryption, as well as high quality
webauthn token like yubikeys or macOS touchid.
As a result, most of the important parts of this system become the auditing and co-operation between
admins on high security events and changes, rather than limiting time of credentials. An important
part of this also is limitation of scope of the credential rather than time as well.
<https://pages.nist.gov/800-63-3/sp800-63b.html>
## Kanidm account system
The login screen is presented to the user. They are challenged for a series of credentials. When
they request an action that is of a certain privilege, they must re-provide the strongest credential
(ie Webauthn token, TOTP). Some actions may require another account to sign off on the action for it
to persist.
This applies to web or CLI usage.
Similar to sudo the privilege lasts for a short time within the session (ie 5 minutes).
## SSO to websites
The login screen is presented to the user. They are challenged for a series of credentials. They are
then able to select any supplemental permissions (if any) they wish to request for the session,
which may request further credentials. They are then redirected to the target site with an
appropriate (oauth) token describing the requested rights.
- <https://developers.google.com/identity/sign-in/web/incremental-auth>
- <https://openid.net/specs/openid-connect-core-1_0.html#UserInfo>
- <https://tools.ietf.org/html/rfc7519>
## Login to workstation (connected)
The user is prompted for a password and or token auth. These are verified by the Kanidm server, and
the login proceeds.
## Login to workstation (disconnected)
The user must have pre-configured their account after a successful authentication as above to
support local password and token authentication. They are then able to provide MFA when disconnected
from the network.
## Sudo on workstation
These are reuse of the above two scenarios.
## Access to VPN or Wifi
The user provides their password OR they provide a distinct network access password which allows
them access.
MFA could be provided here with TOTP?
## SSH to machine (legacy, disconnected)
The user pre-enrolls their SSH key to their account via the Kanidm console. They are then able to
SSH to the machine as usual with their key. SUDO rights are granted via password only once they are
connected (see sudo on workstation).
Agent forwarding is a concern in this scenario to limit scope and lateral movement. Can this be
limited correctly? IMO no, so don't allow it.
## SSH to machine
The user calls a special Kanidm SSH command. This generates a once-off SSH key, and an
authentication request is lodged to the system. Based on policy, the user may need to allow the
request via a web console, or another user may need to sign off to allow the access. Once granted
the module then allows the authentication to continue, and the ephemeral key is allowed access and
the login completes. The key may only be valid for a short time.
Agent forwarding is not a concern in this scenario due to the fact the key is only allowed to be
used for this specific host.
_W: Probably the main one is if a group/permission is granted always or ephemerally on the session.
But that's per group/permission.
I want to limit the amount of configuration policy here, because there are lots of ways that over
configuration can create too many scenarios to effective audit and test. So the permissions would
probably come down to something like "always", "request", and "request-approve", where always is you
always have that, request means you have to re-auth then the permission lasts for X time, and
request-approve would mean you have to request, reauth, then someone else signs off on the approval
to grant.
## SSH via a bastion host
This would work with the SSH to machine scenario, but in thiscase the key is granted rights to the
bastion and the target machine so that agent forwarding can work.
Is there a way to ensure that only this series of jumps is allowed?
## Additionally
- Support services must be able to assist in an account recovery situation
- Some sites may wish allow self-sign up for accounts
- Some sites may want self supporting account recovery
- Accounts should support ephemeral or group-requests
## References
Secure SSH Key Storage
- <https://github.com/sekey/sekey>
- <https://gist.github.com/lizthegrey/9c21673f33186a9cc775464afbdce820>
Secure Bastion hosting
- <https://krypt.co/docs/ssh/using-a-bastion-host.html>
## Implementation ideas for use cases
For identification
- Issue "ID tokens" as an api where you lookup name/uuid and get the `userentry` + `sshkeys` + group
entries. This allows one-shot caching of relevant types, and groups would not store the member
link on the client. Allows the client to "cache" any extra details into the stored record as
required. This would be used for linux/mac to get `uid`/`gid` details and SSH keys for
distribution.
- Would inherit search permissions for connection.
- Some service accounts with permission would get the ntpassword field in this for radius.
- Hosts can use anonymous or have a service account
- Allows cached/disconnected auth.
- Need to be checked periodically for validity (IE account revoke)
- For authentication:
- Cookie/Auth proto - this is for checking pw's and mfa details as required from clients both web
cli and pam. This is probably the most important and core proto, as everything else will derive
from this session in some way.
- Must have a max lifetime or refresh time up to max life to allow revoking.
- If you want to "gain" higher privs, you need to auth-up to the shadow accounts extra
requirements
- You would then have two ID's associated, which may have different lifetimes?
- SSH Key Distribution via the ID tokens (this is great for offline / disconnected auth ...).
- Clients can add password hashes to the ID tokens on successful auth.
- Request based auth proto - a service account creates an auth request, which then must be
acknowledged by the correct Kanidm api, and when acknowledged the authentication can proceed.
- OAuth - This would issue a different token type as required with the right details embedded as
requested.
- Another idea: cli tool that says "I want to login" which generates an ephemeral key that only
works on that host, for that identity with those specific roles you have requested.
Authorisation is a client-specific issue, we just need to provide the correct metadata for each
client to be able to construct correct authorisations.
## Auth Summary
- auth is a stepped protocol (similar to SASL)
- we offer possible authentications
- these proceed until a deny or allow is hit.
- we provide a token that is valid on all server instances (except read-onlies that have unique
cookie keys to prevent forgery of writable master cookies)
- cookies can request tokens, tokens are signed cbor that contains the set of group uuids + names
derferenced so that a client can make all authorisation decisions from a single datapoint
- Groups require the ability to be ephemeral/temporary or permanent.
- each token can be unique based on the type of auth (ie 2fa needed to get access to admin groups)
## Cookie/Token Auth Considerations
- Must prevent replay attacks from occurring at any point during the authentication process
- Minimise (but not eliminate) state on the server. This means that an auth process must remain on a
single server, but the token granted should be valid on any server.
## Cookie/Token Auth Detail
The client sends an AuthRequest to the server in the Init state. Any other request results in
AuthDenied due to lack of the `x-authsession-id` header.
```rust
struct AuthClientRequest {
name: String
application: Option<String>
}
```
The server issues a cookie, and allocates a session id to the cookie. The session id is also stored
in the server with a timeout. The AuthResponse indicates the current possible auth types that can
proceed. This should provided challenges or nonces if required by the auth type.
```rust
enum AuthAllowed {
Anonymous,
Password,
Webauthn {
challenge: // see the webauthn implementation for this
},
TOTP,
}
enum AuthState {
Response {
next: AuthAllowedMech
},
AuthDenied,
AuthSuccess,
}
struct AuthServerResponse {
state AuthState
}
```
The client now sends the cookie and an `AuthRequest` with type Step, that contains the type of
authentication credential being provided, and any other details. This COULD contain multiple
credentials, or a single one.
```rust
enum AuthCredential {
Anonymous,
Password { String },
Webauthn {
// see the webauthn impl for all the bits this will contain ...
},
TOTP {
String
}
}
struct AuthClientStep {
Vec<AuthDetails>
}
```
The server verifies the credential, and marks that type of credential as failed or fulfilled. On
failure of a credential, AuthDenied is immediately sent. On success of a credential the server can
issue AuthSuccess or AuthResponse with new possible challenges. For example, consider we initially
send "password". The client provides the password. The server follows by "totp" as the next type.
The client fails the totp, and is denied.
If the response is AuthSuccess, an auth token is issued. The auth token is a bearer token (that's
what reqwest supports). For more consideration, see, <https://tools.ietf.org/html/rfc6750>.
### Notes
- By tracking what auth steps we have seen in the server, we prevent replay attacks by re-starting
the state machine part way through. THe server enforces the client must always advance.
- If the account has done "too many" auth attempts, we just don't send a cookie in the initial
authRequest, which cause the client to always be denied.
- If the AuthRequest is started but not completed, we time it out within a set number of minutes by
walking the set of sessions and purging incomplete ones which have passed the time stamp.
- The session id is in the cookie to eliminate leaking of the session id (secure cookies), and to
prevent tampering of the session id if possible. It's not perfect, but it helps to prevent casual
attkcs. The session id itself is really the thing that protects us from replays.
## Auth Questions
At a design level, we want to support ephemeral group information. There are two ways I have thought
of to achieve this.
Consider we have a "low priv" and a "high priv" group. The low priv only needs password to "assign"
membership, and the high priv requires password and totp.
### Method One
We have metadata on each groups generate `memberOf` (based on group info itself). This metadata says
what "strength and type" of authentication is required. The auth request would ask for password,
then when password is provided (and correct), it then requests TOTP OR finalise. If you take
finalise, you get authSuccess but the issued token only has the group "low".
If you take TOTP, then finalise, you get authSuccess and the group low _and_ high.
### Method Two
Groups define if they are "always issued" or "requestable". All group types define requirements to
be fulfilled for the request such as auth strength, connection type, auth location etc.
In the AuthRequest if you specific no groups, you do the 'minimum' auth required by the set of your
"always" groups.
If you do AuthRequest and you request "high", this is now extended into the set of your minimum auth
required, which causes potentially more auth steps. However the issued token now has group high in
addition to low.
extra: groups could define a "number of ID points" required, where the server lists each auth type
based on strength. So group high would request 30 points. Password is 10 points, totp is 20 points,
webauthn could be 20 for example. This way, using totp + webauth would still get you a login.
There may be other ways to define this logic, but this applies to method one as well.
### Method Three
Rather than have groups define always or requestable, have a "parent" user and that templates "high
priv" users which have extended credentials. So you may have:
```text
alice {
password
memberof: low
}
alice+high {
parent: alice
totp
memberof: high
}
```
So to distinguish the request, you would login with a different username compared to normal, and
that would then enforce extra auth requirements on the user.
## Considerations
SSH key auth: When we SSH to a machine with SSH distributed id's how do we manage this system?
Because the keys are sent to the machine, I think that the best way is either method three (the SSH
key is an attr of the +high account). However, it would be valid for the client on the machine to
check "yep they used SSH keys" and then assert group high lists SSH as a valid single factor, which
would allow the machine to "login" the user but no token is generated for the authentication. A
benefit to Method three is that the +high and "low" have unique uid/gid so no possible data leak if
they can both SSH in!
With regard to forwarding tokens (no consideration is made to security of this system yet), method
two probably is the best, but you need token constraint to make sure you can't replay to another
host.
<https://techcommunity.microsoft.com/t5/Azure-Active-Directory-Identity/Your-Pa-word-doesn-t-matter/ba-p/731984>
## Brain Dump Internal Details
Credentials should be a real struct on entry, that is serialised to str to dbentry. This allows repl
to still work, but then we can actually keep detailed structures for types in the DB instead. When
we send to proto entry, we could probably keep it as a real struct on protoentry, but then we could
eliminate all private types from transmission.
When we login, we need to know what groups/roles are relevant to that authentication. To achieve
this we can have each group contain a policy of auth types (the credentials above all provide an
auth type). The login then has a known auth type of "how" they logged in, so when we go to generate
the users "token" for that session, we can correlate these, and only attach groups that satisfy the
authentication type requirements.
IE the session associates the method you used to login to your token and a cookie.
If you require extra groups, then we should support a token refresh that given the prior auth +
extra factors, we can then re-issue the token to support the extra groups as presented. We may also
want some auth types to NOT allow refresh.
We may want groups to support expiry where they are not valid past some time stamp. This may
required tagging or other details.
How do we ensure integrity of the token? Do we have to? Is the clients job to trust the token given
the TLS tunnel?
## More Brain Dumping
- need a way to just pw check even if mfa is on (for sudo). Perhaps have a separate sudo password
attr?
- ntpassword attr is separate
- a way to check application pw which attaches certain rights (is this just a generalisation of
sudo?)
- the provided token (bearer etc?) contains the "memberof" for the session.
- How to determine what memberof an api provides? Could be policy object that says "api pw of name
X is allowed Y, Z group". Could be that the user is presented with a list or subset of the
related? Could be both?
- Means we need a "name" and "type" for the api password, also need to be able to search on both
of those details potentially.
- The oauth system is just a case of follow that and provide the scope/groups as required.
- That would make userPassword and webauthn only for webui and api direct access.
- All other pw validations would use application pw case.
- SSH would just read SSH key - should this have a similar group filter/allow mechanism like
application pw?
- Groups take a "type"
- credentials also have a "type"
- The credential if used can provide groups of "type" to that session during auth token generation
- An auth request says it as an auth of type X, to associate what creds it might check.
- Means a change to auth to take an entry as part of auth, or at least, it's group list for the
session.
- policy to define if pw types like sudo or radius are linked.
- Some applications may need to read a credential type.
- attribute/value tagging required?
```text
apptype: unix
apptype: groupware
group: admins
type: unix <<-- indicates it's a requested group
group: emailusers
type: groupware <<-- indicates it's a requested group
user: admin
memberof: admins <<-- Should this be in mo if they are reqgroups? I think yes, because it's only for that "session"
based on the cred do they get the "group list" in cred.
memberof: emailusers
cred: {
'type': unix,
'hash': ...
'grants': 'admins'
}
cred: {
'type': groupware
'hash': ...,
'grants': 'emailusers',
}
cred: {
'type': blah
'hash': ...,
'grants': 'bar', // Can't work because not a memberof bar. Should this only grant valid MO's?
}
ntpassword: ... <<-- needs limited read, and doesn't allocate groups.
sshPublicKey: ... <<-- different due to needing anon read.
```
## Some Dirty Rust Brain Dumps
- Credentials need per-cred locking
- This means they have to be in memory and uniquely ided.
- How can we display to a user that a credential back-off is inplace?
- UAT need to know what Credential was used and its state.
- The Credential associates the claims

View file

@ -1,467 +0,0 @@
Authentication Use Cases
------------------------
There are many planned integrations for authentication for a service like this. The uses cases
for what kind of auth are below. It's important to consider that today a lot of identification
is not just who you are, but what device you are using, so device security is paramount in the
design of this system. We strongly recommend patching and full disk encryption, as well as
high quality webauthn token like yubikeys or macOS touchid.
As a result, most of the important parts of this system become the auditing and co-operation between
admins on high security events and changes, rather than limiting time of credentials. An important
part of this also is limitation of scope of the credential rather than time as well.
https://pages.nist.gov/800-63-3/sp800-63b.html
Kanidm account system
=====================
The login screen is presented to the user. They are challenged for a series of credentials.
When they request an action that is of a certain privilege, they must re-provide the strongest
credential (ie webauthn token, totp). Some actions may require another account to sign off on
the action for it to persist.
This applies to web or cli usage.
Similar to sudo the privilege lasts for a short time within the session (ie 5 minutes).
SSO to websites
===============
The login screen is presented to the user. They are challenged for a series of credentials.
They are then able to select any supplemental permissions (if any) they wish to request for
the session, which may request further credentials. They are then redirected to the target
site with an appropriate (oauth) token describing the requested rights.
https://developers.google.com/identity/sign-in/web/incremental-auth
https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
https://tools.ietf.org/html/rfc7519
Login to workstation (connected)
================================
The user is prompted for a password and or token auth. These are verified by the kanidm server,
and the login proceeds.
Login to workstation (disconnected)
===================================
The user must have pre-configured their account after a successful authentication as above
to support local password and token authentication. They are then able to provide 2fa when
disconnected from the network.
Sudo on workstation
===================
These are reuse of the above two scenarios.
Access to VPN or Wifi
=====================
The user provides their password OR they provide a distinct network access password which
allows them access.
MFA could be provided here with TOTP?
SSH to machine (legacy, disconnected)
=====================================
The user pre-enrolls their SSH key to their account via the kanidm console. They are then able
to ssh to the machine as usual with their key. SUDO rights are granted via password only once
they are connected (see sudo on workstation).
Agent forwarding is a concern in this scenario to limit scope and lateral movement. Can this be
limited correctly? IMO no, so don't allow it.
SSH to machine
==============
The user calls a special kanidm ssh command. This generates a once-off ssh key, and an authentication
request is lodged to the system. Based on policy, the user may need to allow the request via a web
console, or another user may need to sign off to allow the access. Once granted the module then
allows the authentication to continue, and the ephemeral key is allowed access and the login
completes. The key may only be valid for a short time.
Agent forwarding is not a concern in this scenario due to the fact the key is only allowed to be used
for this specific host.
_W: Probably the main one is if a group/permission is granted always or ephemerally on the session. But that's per group/permission.
I want to limit the amount of configuration policy here, because there are lots of ways that over configuration can create
too many scenarios to effective audit and test.
So the permissions would probably come down to something like "always", "request", and "request-approve", where always is
you always have that, request means you have to re-auth then the permission lasts for X time, and request-approve
would mean you have to request, reauth, then someone else signs off on the approval to grant.
SSH via a bastion host
======================
This would work with the ssh to machine scenario, but in thiscase the key is granted rights to the
bastion and the target machine so that agent forwarding can work.
Is there a way to ensure that only this series of jumps is allowed?
Additionally:
* Support services must be able to assist in an account recovery situation
* Some sites may wish allow self-sign up for accounts
* Some sites may want self supporting account recovery
* Accounts should support ephemeral or group-requests
References:
Secure SSH Key Storage
https://github.com/sekey/sekey
https://gist.github.com/lizthegrey/9c21673f33186a9cc775464afbdce820
Secure Bastion hosting
https://krypt.co/docs/ssh/using-a-bastion-host.html
Implementation ideas for use cases
----------------------------------
* For identification:
* Issue "ID tokens" as an api where you lookup name/uuid and get the userentry + sshkeys + group
entries. This allows one-shot caching of relevant types, and groups would not store the member
link on the client. Allows the client to "cache" any extra details into the stored record as
required. This would be used for linux/mac to get uid/gid details and ssh keys for distribution.
* Would inherit search permissions for connection.
* Some service accounts with permission would get the ntpassword field in this for radius.
* Hosts can use anonymous or have a service account
* Allows cached/disconnected auth.
* Need to be checked periodically for validity (IE account revoke)
* For authentication:
* Cookie/Auth proto - this is for checking pw's and mfa details as required from clients both web
cli and pam. This is probably the most important and core proto, as everything else will derive
from this session in some way.
* Must have a max lifetime or refresh time up to max life to allow revoking.
* If you want to "gain" higher privs, you need to auth-up to the shadow accounts extra requirements
* You would then have two ID's associated, which may have different lifetimes?
* SSH Key Distribution via the ID tokens (this is great for offline / disconnected auth ...).
* Clients can add password hashes to the ID tokens on successful auth.
* Request based auth proto - a service account creates an auth request, which then must be acknowledged
by the correct kanidm api, and when acknowledged the authentication can proceed.
* OAuth - This would issue a different token type as required with the right details embedded as
requested.
* Another idea: cli tool that says "I want to login" which generates an ephemeral key that only works
on that host, for that identity with those specific roles you have requested.
Authorisation is a client-specific issue, we just need to provide the correct metadata for each client
to be able to construct correct authorisations.
Cookie/Token Auth Summary
-------------------------
* auth is a stepped protocol (similar to SASL)
* we offer possible authentications
* these proceed until a deny or allow is hit.
* we provide a cookie that is valid on all server instances (except read-onlies
that have unique cookie keys to prevent forgery of writable master cookies)
* cookies can request tokens, tokens are signed cbor that contains the set
of group uuids + names derferenced so that a client can make all authorisation
decisions from a single datapoint
* Groups require the ability to be ephemeral/temporary or permanent.
* each token can be unique based on the type of auth (ie 2fa needed to get access
to admin groups)
Cookie/Token Auth Considerations
--------------------------------
* Must prevent replay attacks from occurring at any point during the authentication process
* Minimise (but not eliminate) state on the server. This means that an auth process must
remain on a single server, but the token granted should be valid on any server.
Cookie/Token Auth Detail
------------------------
Clients begin with no cookie, and no session.
The client sends an AuthRequest to the server in the Init state. Any other request
results in AuthDenied due to lack of cookie. This should contain the optional
application id.
struct AuthClientRequest {
name: String
application: Option<String>
}
The server issues a cookie, and allocates a session id to the cookie. The session id is
also stored in the server with a timeout. The AuthResponse indicates the current possible
auth types that can proceed. This should provided challenges or nonces if required by the auth type.
enum AuthAllowed {
Anonymous,
Password,
Webauthn {
challenge: // see the webauthn implementation for this
},
TOTP,
}
enum AuthState {
Response {
next: AuthAllowedMech
},
AuthDenied,
AuthSuccess,
}
struct AuthServerResponse {
state AuthState
}
The client now sends the cookie and an AuthRequest with type Step, that contains the type
of authentication credential being provided, and any other details. This COULD contain multiple
credentials, or a single one.
enum AuthCredential {
Anonymous,
Password { String },
Webauthn {
// see the webauthn impl for all the bits this will contain ...
},
TOTP {
String
}
}
struct AuthClientStep {
Vec<AuthDetails>
}
The server verifies the credential, and marks that type of credential as failed or fulfilled.
On failure of a credential, AuthDenied is immediately sent. On success of a credential
the server can issue AuthSuccess or AuthResponse with new possible challenges. For example,
consider we initially send "password". The client provides the password. The server follows
by "totp" as the next type. The client fails the totp, and is denied.
If the response is AuthSuccess, an auth token is issued. The auth token is a bearer token
(that's what reqwest supports). For more consideration, see, https://tools.ietf.org/html/rfc6750.
Notes:
* By tracking what auth steps we have seen in the server, we prevent replay attacks by re-starting
the state machine part way through. THe server enforces the client must always advance.
* If the account has done "too many" auth attempts, we just don't send a cookie in the
initial authRequest, which cause the client to always be denied.
* If the AuthRequest is started but not completed, we time it out within a set number of minutes
by walking the set of sessions and purging incomplete ones which have passed the time stamp.
* The session id is in the cookie to eliminate leaking of the session id (secure cookies), and
to prevent tampering of the session id if possible. It's not perfect, but it helps to prevent
casual attkcs. The session id itself is really the thing that protects us from replays.
Auth Questions
--------------
At a design level, we want to support ephemeral group information. There are two ways I have
thought of to achieve this.
Consider we have a "low priv" and a "high priv" group. The low priv only needs password
to "assign" membership, and the high priv requires password and totp.
Method One
==========
We have metadata on each groups generate memberOf (based on group info itself). This metadata
says what "strength and type" of authentication is required. The auth request would ask for
password, then when password is provided (and correct), it then requests
totp OR finalise. If you take finalise, you get authSuccess but the issued token
only has the group "low".
If you take totp, then finalise, you get authSuccess and the group low *and* high.
Method Two
==========
Groups define if they are "always issued" or "requestable". All group types define
requirements to be fulfilled for the request such as auth strength, connection
type, auth location etc.
In the AuthRequest if you specific no groups, you do the 'minimum' auth required by
the set of your "always" groups.
If you do AuthRequest and you request "high", this is now extended into the set
of your minimum auth required, which causes potentially more auth steps. However
the issued token now has group high in addition to low.
extra: groups could define a "number of ID points" required, where the
server lists each auth type based on strength. So group high would request
30 points. Password is 10 points, totp is 20 points, webauthn could be 20
for example. This way, using totp + webauth would still get you a login.
There may be other ways to define this logic, but this applies to method
one as well.
Method Three
============
Rather than have groups define always or requestable, have a "parent" user
and that templates "high priv" users which have extended credentials. So you
may have:
alice {
password
memberof: low
}
alice+high {
parent: alice
totp
memberof: high
}
So to distinguish the request, you would login with a different username
compared to normal, and that would then enforce extra auth requirements on
the user.
Considerations
==============
ssh key auth: When we ssh to a machine with ssh distributed id's how do
we manage this system? Because the keys are sent to the machine, I think
that the best way is either method three (the ssh key is an attr of the
+high account. However, it would be valid for the client on the machine
to check "yep they used ssh keys" and then assert group high lists ssh
as a valid single factor, which would allow the machine to "login" the
user but no token is generated for the authentication. A benefit to Method
three is that the +high and "low" have unique uid/gid so no possible data
leak if they can both ssh in!
With regard to forwarding tokens (no consideration is made to security of this
system yet), method two probably is the best, but you need token constraint
to make sure you can't replay to another host.
https://techcommunity.microsoft.com/t5/Azure-Active-Directory-Identity/Your-Pa-word-doesn-t-matter/ba-p/731984
Brain Dump Internal Details
===========================
Credentials should be a real struct on entry, that is serialised to str to dbentry. This allows repl
to still work, but then we can actually keep detailed structures for types in the DB instead. When
we send to proto entry, we could probably keep it as a real struct on protoentry, but then we could
eliminate all private types from transmission.
When we login, we need to know what groups/roles are relevant to that authentication. To achieve this
we can have each group contain a policy of auth types (the credentials above all provide an auth
type). The login then has a known auth type of "how" they logged in, so when we go to generate
the users "token" for that session, we can correlate these, and only attach groups that satisfy
the authentication type requirements.
IE the session associates the method you used to login to your token and a cookie.
If you require extra groups, then we should support a token refresh that given the prior auth +
extra factors, we can then re-issue the token to support the extra groups as presented. We may
also want some auth types to NOT allow refresh.
We may want groups to support expiry where they are not valid past some time stamp. This may
required tagging or other details.
How do we ensure integrity of the token? Do we have to? Is the clients job to trust the token given
the TLS tunnel?
More Brain Dumping
==================
- need a way to just pw check even if mfa is on (for sudo). Perhaps have a separate sudo password attr?
- ntpassword attr is separate
- a way to check application pw which attaches certain rights (is this just a generalisation of sudo?)
- the provided token (bearer etc?) contains the "memberof" for the session.
- How to determine what memberof an api provides? Could be policy object that says "api pw of name X
is allowed Y, Z group". Could be that the user is presented with a list or subset of the related?
Could be both?
- Means we need a "name" and "type" for the api password, also need to be able to search
on both of those details potentially.
- The oauth system is just a case of follow that and provide the scope/groups as required.
- That would make userPassword and webauthn only for webui and api direct access.
- All other pw validations would use application pw case.
- SSH would just read ssh key - should this have a similar group filter/allow
mechanism like application pw?
- Groups take a "type"
- credentials also have a "type"
- The credential if used can provide groups of "type" to that session during auth token
generation
- An auth request says it as an auth of type X, to associate what creds it might check.
- Means a change to auth to take an entry as part of auth, or at least, it's group list for the
session.
- policy to define if pw types like sudo or radius are linked.
- Some applications may need to read a credential type.
- attribute/value tagging required?
apptype: unix
apptype: groupware
group: admins
type: unix <<-- indicates it's a requested group
group: emailusers
type: groupware <<-- indicates it's a requested group
user: admin
memberof: admins <<-- Should this be in mo if they are reqgroups? I think yes, because it's only for that "session"
based on the cred do they get the "group list" in cred.
memberof: emailusers
cred: {
'type': unix,
'hash': ...
'grants': 'admins'
}
cred: {
'type': groupware
'hash': ...,
'grants': 'emailusers',
}
cred: {
'type': blah
'hash': ...,
'grants': 'bar', // Can't work because not a memberof bar. Should this only grant valid MO's?
}
ntpassword: ... <<-- needs limited read, and doesn't allocate groups.
sshPublicKey: ... <<-- different due to needing anon read.
Some Dirty Rust Brain Dumps
===========================
- Credentials need per-cred locking
- This means they have to be in memory and uniquely ided.
- How can we display to a user that a credential back-off is inplace?
- UAT need to know what Credential was used and it's state.
- The Credential associates the claims

View file

@ -0,0 +1,46 @@
# The Authentication Flow
1. Client sends an init request. This can be either:
1. `AuthStep::Init` which just includes the username, or
2. `AuthStep::Init2` which can request a "privileged" session
2. The server responds with a list of authentication methods.
(`AuthState::Choose(Vec<AuthAllowed>)`)
3. Client requests auth with a method (`AuthStep::Begin(AuthMech)`)
4. Server responds with an acknowledgement (`AuthState::Continue(Vec<AuthAllowed>)`). This is so the
challenge can be included in the response, for Passkeys or other challenge-response methods.
- If required, this challenge/response continues in a loop until the requirements are satisfied -
for example, TOTP + Password.
5. The result is returned, either:
- Success, with the User Auth Token as a `String`.
- Denied, with a reason as a `String`.
```mermaid
sequenceDiagram;
autonumber
participant Client
participant Kanidm
Note over Client: "I'm Ferris and I want to start auth!"
Client ->> Kanidm: AuthStep::Init(username)
Note over Kanidm: "You can use the following methods"
Kanidm ->> Client: AuthState::Choose(Vec<AuthAllowed>)
loop Authentication Checks
Note over Client: I want to use this mechanism
Client->>Kanidm: AuthStep::Begin(AuthMech)
Note over Kanidm: Ok, you can do that.
Kanidm->>Client: AuthState::Continue(Vec<AuthAllowed>)
Note over Client: Here is my credential
Client->>Kanidm: AuthStep::Cred(AuthCredential)
Note over Kanidm: Kanidm validates the Credential,<br /> and if more methods are required,<br /> return them.
Kanidm->>Client: AuthState::Continue(Vec<AuthAllowed>)
Note over Client, Kanidm: If there's no more credentials required, break the loop.
end
Note over Client,Kanidm: If Successful, return the auth token
Kanidm->>Client: AuthState::Success(String Token)
Note over Client,Kanidm: If Failed, return that and a message why.
Kanidm-xClient: AuthState::Denied(String Token)
```

View file

@ -5,49 +5,15 @@
{{#template ../../templates/kani-warning.md {{#template ../../templates/kani-warning.md
imagepath=../../images/ imagepath=../../images/
title=Note! title=Note!
text=Here begins some early notes on the REST interface - much better ones are in the repository's designs directory. text=This is a work in progress and not all endpoints have perfect schema definitions, but they're all covered!
}} }}
<!-- deno-fmt-ignore-end --> <!-- deno-fmt-ignore-end -->
There's an endpoint at `/<api_version>/routemap` (for example, `https://localhost/v1/routemap`) We're generating an OpenAPI specification file and Swagger interface using
which is based on the API routes as they get instantiated. [utoipa](https://crates.io/crates/utoipa).
It's _very, very, very_ early work, and should not be considered stable at all. The Swagger UI is available at `/docs/swagger-ui` on your server (ie, if your origin is
`https://example.com:8443`, visit `https://example.com:8443/docs/swagger-ui`).
An example of some elements of the output is below: The OpenAPI schema is similarly available at `/docs/v1/openapi.json`.
```json
{
"routelist": [
{
"path": "/",
"method": "GET"
},
{
"path": "/robots.txt",
"method": "GET"
},
{
"path": "/ui/",
"method": "GET"
},
{
"path": "/v1/account/:id/_unix/_token",
"method": "GET"
},
{
"path": "/v1/schema/attributetype/:id",
"method": "GET"
},
{
"path": "/v1/schema/attributetype/:id",
"method": "PUT"
},
{
"path": "/v1/schema/attributetype/:id",
"method": "PATCH"
}
]
}
```

View file

@ -73,9 +73,14 @@ Don't [ask](https://www.youtube.com/watch?v=0QaAKi0NFkA). They just
[do](https://www.youtube.com/shorts/WizH5ae9ozw). [do](https://www.youtube.com/shorts/WizH5ae9ozw).
## Why aren't snaps launching with `home_alias` set? ## Why aren't snaps launching with `home_alias` set?
Snaps rely on AppArmor and [AppArmor doesn't follow symlinks](https://bugs.launchpad.net/apparmor/+bug/1485055). When `home_alias` is any value other than `none` a symlink will be created and pointing to `home_attr`. It is recommended to use alternative software packages to snaps.
All users in Kanidm can change their name (and their spn) at any time. If you change `home_attr` from `uuid` you must have a plan on how to manage these directory renames in your system. Snaps rely on AppArmor and
[AppArmor doesn't follow symlinks](https://bugs.launchpad.net/apparmor/+bug/1485055). When
`home_alias` is any value other than `none` a symlink will be created and pointing to `home_attr`.
It is recommended to use alternative software packages to snaps.
All users in Kanidm can change their name (and their spn) at any time. If you change `home_attr`
from `uuid` you must have a plan on how to manage these directory renames in your system.
## Why won't you take this FAQ thing seriously? ## Why won't you take this FAQ thing seriously?

View file

@ -82,7 +82,8 @@ to `spn`.
> UUID folder. Automatic support is provided for this via the unixd tasks daemon, as documented > UUID folder. Automatic support is provided for this via the unixd tasks daemon, as documented
> here. > here.
> **NOTE:** Ubuntu users please see: [Why aren't snaps launching with home_alias set?](../frequently_asked_questions.md#why-arent-snaps-launching-with-home_alias-set) > **NOTE:** Ubuntu users please see:
> [Why aren't snaps launching with home_alias set?](../frequently_asked_questions.md#why-arent-snaps-launching-with-home_alias-set)
`use_etc_skel` controls if home directories should be prepopulated with the contents of `/etc/skel` `use_etc_skel` controls if home directories should be prepopulated with the contents of `/etc/skel`
when first created. Defaults to false. when first created. Defaults to false.

View file

@ -1000,7 +1000,7 @@ impl KanidmClient {
.map_err(|e| ClientError::JsonDecode(e, opid)) .map_err(|e| ClientError::JsonDecode(e, opid))
} }
#[instrument(level = "debug")] #[instrument(level = "debug", skip(self))]
pub async fn auth_step_init(&self, ident: &str) -> Result<Set<AuthMech>, ClientError> { pub async fn auth_step_init(&self, ident: &str) -> Result<Set<AuthMech>, ClientError> {
let auth_init = AuthRequest { let auth_init = AuthRequest {
step: AuthStep::Init2 { step: AuthStep::Init2 {
@ -1024,6 +1024,7 @@ impl KanidmClient {
.map(|mechs| mechs.into_iter().collect()) .map(|mechs| mechs.into_iter().collect())
} }
#[instrument(level = "debug", skip(self))]
pub async fn auth_step_begin(&self, mech: AuthMech) -> Result<Vec<AuthAllowed>, ClientError> { pub async fn auth_step_begin(&self, mech: AuthMech) -> Result<Vec<AuthAllowed>, ClientError> {
let auth_begin = AuthRequest { let auth_begin = AuthRequest {
step: AuthStep::Begin(mech), step: AuthStep::Begin(mech),
@ -1043,6 +1044,7 @@ impl KanidmClient {
// .map(|allowed| allowed.into_iter().collect()) // .map(|allowed| allowed.into_iter().collect())
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_step_anonymous(&self) -> Result<AuthResponse, ClientError> { pub async fn auth_step_anonymous(&self) -> Result<AuthResponse, ClientError> {
let auth_anon = AuthRequest { let auth_anon = AuthRequest {
step: AuthStep::Cred(AuthCredential::Anonymous), step: AuthStep::Cred(AuthCredential::Anonymous),
@ -1058,6 +1060,7 @@ impl KanidmClient {
r r
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_step_password(&self, password: &str) -> Result<AuthResponse, ClientError> { pub async fn auth_step_password(&self, password: &str) -> Result<AuthResponse, ClientError> {
let auth_req = AuthRequest { let auth_req = AuthRequest {
step: AuthStep::Cred(AuthCredential::Password(password.to_string())), step: AuthStep::Cred(AuthCredential::Password(password.to_string())),
@ -1072,6 +1075,7 @@ impl KanidmClient {
r r
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_step_backup_code( pub async fn auth_step_backup_code(
&self, &self,
backup_code: &str, backup_code: &str,
@ -1089,6 +1093,7 @@ impl KanidmClient {
r r
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_step_totp(&self, totp: u32) -> Result<AuthResponse, ClientError> { pub async fn auth_step_totp(&self, totp: u32) -> Result<AuthResponse, ClientError> {
let auth_req = AuthRequest { let auth_req = AuthRequest {
step: AuthStep::Cred(AuthCredential::Totp(totp)), step: AuthStep::Cred(AuthCredential::Totp(totp)),
@ -1103,6 +1108,7 @@ impl KanidmClient {
r r
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_step_securitykey_complete( pub async fn auth_step_securitykey_complete(
&self, &self,
pkc: Box<PublicKeyCredential>, pkc: Box<PublicKeyCredential>,
@ -1120,6 +1126,7 @@ impl KanidmClient {
r r
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_step_passkey_complete( pub async fn auth_step_passkey_complete(
&self, &self,
pkc: Box<PublicKeyCredential>, pkc: Box<PublicKeyCredential>,
@ -1137,6 +1144,7 @@ impl KanidmClient {
r r
} }
#[instrument(level = "debug", skip(self))]
pub async fn auth_anonymous(&self) -> Result<(), ClientError> { pub async fn auth_anonymous(&self) -> Result<(), ClientError> {
let mechs = match self.auth_step_init("anonymous").await { let mechs = match self.auth_step_init("anonymous").await {
Ok(s) => s, Ok(s) => s,
@ -1164,7 +1172,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug")] #[instrument(level = "debug", skip(self, password))]
pub async fn auth_simple_password( pub async fn auth_simple_password(
&self, &self,
ident: &str, ident: &str,
@ -1194,6 +1202,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip(self, password, totp))]
pub async fn auth_password_totp( pub async fn auth_password_totp(
&self, &self,
ident: &str, ident: &str,
@ -1244,6 +1253,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip(self, password, backup_code))]
pub async fn auth_password_backup_code( pub async fn auth_password_backup_code(
&self, &self,
ident: &str, ident: &str,
@ -1294,6 +1304,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip(self))]
pub async fn auth_passkey_begin( pub async fn auth_passkey_begin(
&self, &self,
ident: &str, ident: &str,
@ -1320,6 +1331,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip_all)]
pub async fn auth_passkey_complete( pub async fn auth_passkey_complete(
&self, &self,
pkc: Box<PublicKeyCredential>, pkc: Box<PublicKeyCredential>,
@ -1345,6 +1357,7 @@ impl KanidmClient {
}) })
} }
#[instrument(level = "debug", skip_all)]
pub async fn reauth_simple_password(&self, password: &str) -> Result<(), ClientError> { pub async fn reauth_simple_password(&self, password: &str) -> Result<(), ClientError> {
let state = match self.reauth_begin().await { let state = match self.reauth_begin().await {
Ok(mut s) => s.pop(), Ok(mut s) => s.pop(),
@ -1366,6 +1379,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip_all)]
pub async fn reauth_password_totp(&self, password: &str, totp: u32) -> Result<(), ClientError> { pub async fn reauth_password_totp(&self, password: &str, totp: u32) -> Result<(), ClientError> {
let state = match self.reauth_begin().await { let state = match self.reauth_begin().await {
Ok(s) => s, Ok(s) => s,
@ -1401,6 +1415,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip_all)]
pub async fn reauth_passkey_begin(&self) -> Result<RequestChallengeResponse, ClientError> { pub async fn reauth_passkey_begin(&self) -> Result<RequestChallengeResponse, ClientError> {
let state = match self.reauth_begin().await { let state = match self.reauth_begin().await {
Ok(mut s) => s.pop(), Ok(mut s) => s.pop(),
@ -1414,6 +1429,7 @@ impl KanidmClient {
} }
} }
#[instrument(level = "debug", skip_all)]
pub async fn reauth_passkey_complete( pub async fn reauth_passkey_complete(
&self, &self,
pkc: Box<PublicKeyCredential>, pkc: Box<PublicKeyCredential>,

View file

@ -944,6 +944,7 @@ impl fmt::Display for AuthMech {
#[derive(Debug, Serialize, Deserialize, Copy, Clone, ToSchema)] #[derive(Debug, Serialize, Deserialize, Copy, Clone, ToSchema)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
// TODO: what is this actually used for?
pub enum AuthIssueSession { pub enum AuthIssueSession {
Token, Token,
} }
@ -951,20 +952,21 @@ pub enum AuthIssueSession {
#[derive(Debug, Serialize, Deserialize, ToSchema)] #[derive(Debug, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
pub enum AuthStep { pub enum AuthStep {
// name /// "I want to authenticate with this username"
Init(String), Init(String),
// A new way to issue sessions. Doing this as a new init type /// A new way to issue sessions. Doing this as a new init type
// to prevent breaking existing clients. Allows requesting of the type /// to prevent breaking existing clients. Allows requesting of the type
// of session that will be issued at the end if successful. /// of session that will be issued at the end if successful.
Init2 { Init2 {
username: String, username: String,
issue: AuthIssueSession, issue: AuthIssueSession,
#[serde(default)] #[serde(default)]
/// If true, the session will have r/w access.
privileged: bool, privileged: bool,
}, },
// We want to talk to you like this. /// We want to talk to you like this.
Begin(AuthMech), Begin(AuthMech),
// Provide a response to a challenge. /// Provide a response to a challenge.
Cred(AuthCredential), Cred(AuthCredential),
} }
@ -1042,14 +1044,13 @@ impl fmt::Display for AuthAllowed {
#[derive(Debug, Serialize, Deserialize, ToSchema)] #[derive(Debug, Serialize, Deserialize, ToSchema)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
pub enum AuthState { pub enum AuthState {
// You need to select how you want to talk to me. /// You need to select how you want to talk to me.
Choose(Vec<AuthMech>), Choose(Vec<AuthMech>),
// Continue to auth, allowed mechanisms/challenges listed. /// Continue to auth, allowed mechanisms/challenges listed.
Continue(Vec<AuthAllowed>), Continue(Vec<AuthAllowed>),
// Something was bad, your session is terminated and no cookie. /// Something was bad, your session is terminated and no cookie.
Denied(String), Denied(String),
// Everything is good, your bearer token has been issued and is within /// Everything is good, your bearer token has been issued and is within the result.
// the result.
Success(String), Success(String),
} }