Foundations of pam/nss multi resolver

This starts the support for multi-resolver operation as well as a system level nss resolver.

In future we'll add the remaining support to auth system users with pam too.
This commit is contained in:
Firstyear 2024-08-16 09:54:35 +10:00 committed by GitHub
parent 4feec82482
commit b1099dfa3b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
14 changed files with 1574 additions and 1061 deletions

40
Cargo.lock generated
View file

@ -743,9 +743,9 @@ checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce"
[[package]] [[package]]
name = "bytemuck" name = "bytemuck"
version = "1.16.1" version = "1.16.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b236fc92302c97ed75b38da1f4917b5cdda4984745740f153a5d3059e48d725e" checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83"
[[package]] [[package]]
name = "byteorder" name = "byteorder"
@ -1982,7 +1982,7 @@ dependencies = [
"gix-utils", "gix-utils",
"itoa", "itoa",
"thiserror", "thiserror",
"winnow 0.6.16", "winnow 0.6.18",
] ]
[[package]] [[package]]
@ -2026,7 +2026,7 @@ dependencies = [
"smallvec", "smallvec",
"thiserror", "thiserror",
"unicode-bom", "unicode-bom",
"winnow 0.6.16", "winnow 0.6.18",
] ]
[[package]] [[package]]
@ -2183,7 +2183,7 @@ dependencies = [
"itoa", "itoa",
"smallvec", "smallvec",
"thiserror", "thiserror",
"winnow 0.6.16", "winnow 0.6.18",
] ]
[[package]] [[package]]
@ -2266,7 +2266,7 @@ dependencies = [
"gix-validate", "gix-validate",
"memmap2", "memmap2",
"thiserror", "thiserror",
"winnow 0.6.16", "winnow 0.6.18",
] ]
[[package]] [[package]]
@ -2604,7 +2604,7 @@ dependencies = [
"futures-sink", "futures-sink",
"futures-util", "futures-util",
"http 0.2.12", "http 0.2.12",
"indexmap 2.2.6", "indexmap 2.3.0",
"slab", "slab",
"tokio", "tokio",
"tokio-util", "tokio-util",
@ -2623,7 +2623,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"http 1.1.0", "http 1.1.0",
"indexmap 2.2.6", "indexmap 2.3.0",
"slab", "slab",
"tokio", "tokio",
"tokio-util", "tokio-util",
@ -3023,9 +3023,9 @@ dependencies = [
[[package]] [[package]]
name = "indexmap" name = "indexmap"
version = "2.2.6" version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0"
dependencies = [ dependencies = [
"equivalent", "equivalent",
"hashbrown 0.14.5", "hashbrown 0.14.5",
@ -4645,7 +4645,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [ dependencies = [
"fixedbitset", "fixedbitset",
"indexmap 2.2.6", "indexmap 2.3.0",
"serde", "serde",
"serde_derive", "serde_derive",
] ]
@ -5572,7 +5572,7 @@ dependencies = [
"chrono", "chrono",
"hex", "hex",
"indexmap 1.9.3", "indexmap 1.9.3",
"indexmap 2.2.6", "indexmap 2.3.0",
"serde", "serde",
"serde_derive", "serde_derive",
"serde_json", "serde_json",
@ -5845,9 +5845,9 @@ dependencies = [
[[package]] [[package]]
name = "target-lexicon" name = "target-lexicon"
version = "0.12.15" version = "0.12.16"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4873307b7c257eddcb50c9bedf158eb669578359fb28428bef438fec8e6ba7c2" checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1"
[[package]] [[package]]
name = "tempfile" name = "tempfile"
@ -6099,9 +6099,9 @@ dependencies = [
[[package]] [[package]]
name = "toml_datetime" name = "toml_datetime"
version = "0.6.7" version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8fb9f64314842840f1d940ac544da178732128f1c78c21772e876579e0da1db" checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41"
[[package]] [[package]]
name = "toml_edit" name = "toml_edit"
@ -6109,7 +6109,7 @@ version = "0.19.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421"
dependencies = [ dependencies = [
"indexmap 2.2.6", "indexmap 2.3.0",
"toml_datetime", "toml_datetime",
"winnow 0.5.40", "winnow 0.5.40",
] ]
@ -6458,7 +6458,7 @@ version = "4.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23" checksum = "c5afb1a60e207dca502682537fefcfd9921e71d0b83e9576060f09abc6efab23"
dependencies = [ dependencies = [
"indexmap 2.2.6", "indexmap 2.3.0",
"serde", "serde",
"serde_json", "serde_json",
"utoipa-gen", "utoipa-gen",
@ -7091,9 +7091,9 @@ dependencies = [
[[package]] [[package]]
name = "winnow" name = "winnow"
version = "0.6.16" version = "0.6.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b480ae9340fc261e6be3e95a1ba86d54ae3f9171132a73ce8d4bbaf68339507c" checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f"
dependencies = [ dependencies = [
"memchr", "memchr",
] ]

View file

@ -87,6 +87,7 @@
- [Replication Coordinator](developers/designs/replication_coordinator.md) - [Replication Coordinator](developers/designs/replication_coordinator.md)
- [Replication Design and Notes](developers/designs/replication_design_and_notes.md) - [Replication Design and Notes](developers/designs/replication_design_and_notes.md)
- [REST Interface](developers/designs/rest_interface.md) - [REST Interface](developers/designs/rest_interface.md)
- [Unixd Multi Resolver 2024](developers/designs/unixd_multi_resolver_2024.md)
- [Python Module](developers/python_module.md) - [Python Module](developers/python_module.md)
- [RADIUS Module Development](developers/radius.md) - [RADIUS Module Development](developers/radius.md)
- [Release Checklist](developers/release_checklist.md) - [Release Checklist](developers/release_checklist.md)

View file

@ -0,0 +1,418 @@
## Unixd MultiResolver Support
Up until July 2024 the purpose and motivation of the Kanidm Unixd component (`unix_integration` in
the source tree) was to allow Unix-like platforms to authenticate and resolve users against a Kanidm
instance.
However, throughout 2023 and 2024 this project has expanded in scope - from the addition of TPM
support to protect cached credentials (the first pam module to do so!), to use of the framework by
himmelblau to enable Azure AD authentication.
We also have new features we want to add including LDAP backends (as an alternative to SSSD), the
ability to resolve local system users, as well as support for PIV and CTAP2 for desktop login.
This has pushed the current design of the resolver to it's limits, and it's increasingly challenging
to improve it as a result. This will necesitate a major rework of the project.
### Current Architecture
```
┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐
┌───────┐ ┌───────┐ ┌───────┐ │ ┌───────────────────┐ │
│ │ │ │ │ │ │ │
│ NSS │ │ PAM │ │ CLI │ │ │ Tasks Daemon │ │
│ │ │ │ │ │ │ │
└───────┘ └───────┘ └───────┘ │ └───────────────────┘ │
▲ ▲ ▲ ▲
─ ─ ─ ─ ┼ ─ ─ ─ ─ ─│─ ─ ─ ─ ─ ┼ ─ ─ ─ ─ ┴ ─ ─ ─ ─ ─ ┼ ─ ─ ─ ─ ─ ─ ─ ┤
│ ▼ ▼ ▼ │
┌─────────────────────────────┐ ┌───────────┐ │
│ │ │ │ │
│ ClientCodec │ │ Tasks │ │
│ │ │ │ │
└─────────────────────────────┘ └───────────┘ │
┌ ─ ─ ─ ─ ─ ┘ ▲ ▲
│ │ │
│ ▼ │
┌───────────────┐ ┌────────────────────────────────┐ │ │
│ │ │ │ │ │
│ Kani Client │◀────▶│ Daemon / Event Loop │─────┘ │
│ │ │ │ │
└───────────────┘ └────────────────────────────────┘ │
│ ▲ ▲
│ │ │
│ ▼ ▼
┌──────────────────┐ ┌────────┐ │
│ │ │ │ │
│ DB / Cache │ │ TPM │ │
│ │ │ │ │
└──────────────────┘ └────────┘ │
─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ── ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┴
```
The current design treated the client as a trivial communication layer. The daemon/event loop
contained all state including if the resolver was online or offline. Additionally the TPM and
password caching operations primarily occured in the daemon layer, which limited the access of these
features to the client backend itself.
### Future Features
#### Files Resolver
The ability to resolve and authenticate local users from `/etc/{passwd,group,shadow}`. The classic
mechanisms to resolve this are considered "slow" since they require a full-file-parse each
operation.
In addition, these files are limited by their formats and can not be extended with future
authentication mechanisms like CTAP2 or PIV.
Unixd already needs to parse these files to understand and prevent conflicts between local items and
remote ones. Extending this functionality will allow us to resolve local users from memory.
Not only this, we need to store information *permanently* that goes beyore what /etc/passwd and
similar can store. It would be damaging to users if their CTAP2 (passkeys) were deleted randomly
on a cache clear!
#### Local Group Extension
An often requested feature is the ability to extend a local group with the members from a remote
group. Often people attempt to achieve this by "overloading" a group remotely such as creating a
group called "wheel" in Kanidm and then attempting to resolve it on their systems. This can
introduce issues as different distributions may have the same groups but with different gidNumbers
which can break systems, or it can cause locally configured items to be masked.
Instead, we should allow group _extension_. A local group can be nominated for extension, and paired
to a remote group. For example this could be configured as:
```
[group."wheel"]
extend_from = "opensuse_wheel"
```
This allows the local group "wheel" to be resolved and _extended_ with the members from the remote
group `opensuse_wheel`.
#### Multiple Resolvers
We would like to support multiple backends simultaneously and in our source tree. This is a major
motivator of this rework as the himmelblau project wishes to contribute their client layer into our
source tree, while maintaining the bulk of their authentication code in a separate libhimmelblau
project.
We also want to support LDAP and other resolvers too.
The major challenge here is that this shift the cache state from the daemon to the client. This
requires each client to track it's own online/offline state and to self-handle that state machine
correctly. Since we won't allow dynamic modules this mitigates the risk as we can audit all the
source of interfaces committed to the project for correct behaviour here.
#### Resolvers That Can't Resolve Without Authentication Attempts
Some resolvers are unable to resolve accounts without actually attempting an authentication attempt
such as Himmelblau. This isn't a limitation of Himmelblau, but of Azure AD itself.
This has consequences on how we performance authentication flows generally.
#### Domain Joining of Resolvers
Some Clients (and in the future Kanidm) need to be able to persist some state related to Domain
Joining, where the client registers to the authentication provider. This provides extra
functionality beyond the scope of this document, but a domain join work flow needs to be possible
for the providers in some manner.
#### Encrypted Caches
To protect caches from offline modification content should be optionally encrypted / signed in the
future.
#### CTAP2 / TPM-PIN
We want to allow local authentication with CTAP2 or a TPM with PIN. Both provide stronger assurances
of both who the user is, and that they are in posession of a specific cryptographic device. The
nice part of this is that they both implement hardware bruteforce protections. For soft-tpm we
can emulate this with a strict bruteforce lockout prevention mechanism.
The weakness is that PIN's which are used on both CTAP2 and TPM, tend to be shorter, ranging from
4 to 8 characters, generally numeric. This makes them unsuitable for remote auth.
This means for SSH without keys, we *must* use a passphrase or similar instead. We must not allow
SSH auth with PIN to a TPM as this can easily become a remote DOS via the bruteforce prevention
mechanism.
This introduces it's own challenge - we are now juggling multiple potential credentials and need
to account for their addition and removal, as well as changing.
Another significant challenge is that linux is heavily embedded in "passwords as the only factor"
meaning that many systems are underdeveloped like gnome keyring - this expects stacked pam modules
to unlock the keyring as it proceeds.
*Local Users*
Local Users will expect on login equivalent functionality that `/etc/passwd` provides today, meaning
that local wallets and keyrings are unlocked at login. This necesitates that any CTAP2 or TPM unlock
need to be able to unlock the keyring.
This also has implications for how users expect to interact with the feature. A user will expect that
changing their PIN will continue to allow unlock of their system. And a change of the users password
should not invalidate their existing PIN's or CTAP devices. To achieve this we will need some methods
to cryptographically protect credentials and allow these updates.
To achieve this, we need to make the compromise that the users password must be stored in a reversible
form on the system. Without this, the various wallets/keyrings won't work. This trade is acceptable
since `pam_kanidm` is already a module that handles password material in plaintext, so having a
mechanism to securely retrieve this *while* the user is entering equivalent security material is
reasonable.
The primary shift is that rather than storing a *kdf/hash* of the users output, we will be storing
an authenticated encrypted object where valid decryption of that object is proof that the password
matches.
For the soft-tpm, due to PIN's short length, we will need to aggressively increase the KDF rounds
and consider HMAC of the output.
```
HMAC-Secret
Password PIN output
│ │ │
│ │ │
│ │ │
▼ ▼ ▼
┌──────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ │ │ │ │ │
│ KDF │ │ PIN Object │ │ CTAP Object │
│ │ │ │ │ │
└──────────────────┘ └─────────────────┘ └─────────────────┘
│ │ ▲ │ ▲
│ │ │ │ │
│ Releases │ │
├───────KDF value──────┴─────┼───────────────┘ │
│ │ │
┌──────────────────┐ │ │
│ │
│ Sealed Object │ │ │
│ │─ ─ ─ ─Unlocks─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
│ │
└──────────────────┘
Release
Password
┌──────────────────┐
│ │
│pam_gnome_keyring │
│ pam_kwallet │
│ │
└──────────────────┘
```
*Remote Users (such as Kanidm)*
After a lot of thinking, the conclusion we arrived at is that trying to handle password stacking for
later pam modules is out of scope at this time.
Initially, remote users will be expected to have a password they can use to access the system. In
the future we may derive a way to distribute TPM PIN objects securely to domain joined machines.
We may allow PINs to be set on a per-machine basis, rather than syncing them via the remote source.
This would require that a change of the password remotely invalidates set PINs unless we think of
some way around this.
We also think that in the case of things like password managers such as desktop wallets, these should
have passwords that are the concern of the user, not the remote auth source so that our IDM has no
knowledge of the material to unlock these.
### Challenges
- The order of backend resolvers needs to be stable.
- Accounts/Groups should _not_ move/flip-flop between resolvers.
- Resolvers need to uniquely identify entries in some manner.
- The ability to store persistent/permananent accounts in the DB that can _not_ be purged in a cache
clear.
- Simplicity of the interfaces for providers so that they don't need to duplicate effort.
- Ability to clear _single items_ from the cache rather than a full clear.
- Resolvers that can't pre-resolve items
### New Architecture
```
┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┐
┌───────┐ ┌───────┐ ┌───────┐ │ ┌───────────────────┐ │
│ │ │ │ │ │ │ │
│ NSS │ │ PAM │ │ CLI │ │ │ Tasks Daemon │ │
│ │ │ │ │ │ │ │
└───────┘ └───────┘ └───────┘ │ └───────────────────┘ │
▲ ▲ ▲ ▲
┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┼ ─ ─ ─ ─ ─│─ ─ ─ ─ ─ ┼ ─ ─ ─ ─ ┴ ─ ─ ─ ─ ─ ┼ ─ ─ ─ ─ ─ ─ ─ ┤
▼ ▼ ▼ │
│ ┌─────────────────────────────┐ ┌───────────┐ │
│ │ │ │
│ │ ClientCodec │ │ Tasks │ │
┌──────────┐ │ │ │ │
│ │ │ └─────────────────────────────┘ └───────────┘ │
│ Files │◀────┐ ▲ ▲
│ │ │ │ │ │ │
└──────────┘ │ ▼ │
│ ┌───────────────┐│ ┌────────────────────────────────┐ │ │
│ │└─────┤ │ │
│ │ Kani Client │◀─┬──▶│ Daemon / Event Loop │─────┘ │
│ │ │ │ │
│ └───────────────┘◀─│┐ └────────────────────────────────┘ │
┌───────────────┐ │ ▲
│ │ │ ││ │ │
│ LDAP Client │◀─┤ ▼
│ │ │ ││ ┌────────┐ ┌──────────────────┐ │
└───────────────┘◀ ┼ │ │ │ │
│ ┌───────────────┐ │└ ─ ─│ TPM │ │ DB / Cache │ │
│ Himmleblau │ │ │ │ │ │
│ │ Client │◀─┘ └────────┘ └──────────────────┘ │
│ │
└ ┴───────────────┴ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ┴
```
#### Online/Offline State Machine
The major change that that this diagram may not clearly show is that the online/offline state
machine moves into each of the named clients (excluding files). This also has some future impacts on
things like pre-emptive item reloading and other task scheduling. This will require the backends to
"upcall" into the daemon, as the TPM transaction needs to be passed from the daemon back down to the
provider. Alternately, the provider needs to be able to register scheduled tasks into the daemon
with some generic interface.
#### Resolution Flow
The most important change is that with multiple resolvers we need to change how accounts resolve. In
pseudo code the "online" flow (ignoring caches) is:
```
if files.contains(item_id):
if item_id.is_extensible:
# This only seeks items from the providers, not files for extensibility.
item += resolver.get(item_id.extended_from)
return item
# Providers are sorted by priority.
for provider in providers:
if provider.contains(item_id)
return item
return None
```
Key points here:
- One provider is marked as "default".
- Providers are sorted by priority from highest to lowest.
- Default always sorts as "highest".
- The default provider returns items with Name OR SPN.
- Non-default providers always return by SPN.
Once at item is located it is then added to the cache. The provider marks the item with a cache
timeout that the cache respects. The item is also marked to which provider is the _origin_ of the
item.
Once an item-id exists in the cache, it may only be serviced by the corresponding origin provider.
This prevents an earlier stacked provider from "hijacking" an item from another provider. Only if
the provider indicates the item no longer exists OR the cache is cleared of that item (either by
single item or full clear) can the item change provider as the item goes through the general
resolution path.
If we consider these behaviours now with the cache, the flow becomes:
```
def resolve:
if files.contains(item_id):
if item_id.is_extensible:
# This only seeks items from the providers, not files for extensibility.
item += resolver.get(item_id.extended_from)
return item
resolver.get(item_id)
def resolver.get:
# Hot Path
if cache.contains(item):
if item.expired:
provider = item.provider
# refresh if possible
let refreshed_item = provider.refresh(item)
match refreshed_item {
Missing => break; # Bail and let other providers have at it.
Offline => Return the cached item
Updated => item = refreshed_item
};
return item
# Cold Path
#
# Providers are sorted by priority. Default provider is first.
#
# Providers are looped *only* if an item isn't already in
# the cache in some manner.
let item = {
for provider in providers:
if provider.contains(item_id)
if provider.is_default():
item.id = name || spn
else:
item.id = spn
break item
}
cache.add(item)
return None
```
#### Cache and Database Persistence
The existing cache has always been considered ephemeral and able to be deleted at any time. With a
move to Domain Join and other needs for long term persistence our cache must now also include
elements that are permanent.
The existing cache of items also is highly limited by the fact that we "rolled our own" db schema
and rely on sqlite heavily.
We should migrate to a primarily in-memory cache, where sqlite is used only for persistence. The
sqlite content should be optionally able to be encrypted by a TPM bound key.
To obsfucate details, the sqlite db should be a single table of key:value where keys are uuids
associated to the item. The uuid is a local detail, not related to the provider.
The cache should move to a concread based concurrent tree which will also allow us to multi-thread
the resolver for high performance deployments. Mail servers is an often requested use case for
Kanidm in this space.
#### Extensible Entries
Currently UserToken and GroupTokens are limited and are unable to contain provider specific keys. We
should allow a generic BTreeMap of Key:Values. This will allow providers to extend entries as
required
#### Offline Password/Credential Caching
The caching of credentials should move to be a provider specific mechanism supported by the presence
of extensible UserToken entries. This also allows other types of credentials to be stored that can
be consumed by the User.
#### Alternate Credential Caching
A usecase is that for application passwords a mail server may wish to cache and locally store the
application password. Only domain joined systems should be capable of this, and need to protect the
application password appropriately.

View file

@ -692,8 +692,11 @@ impl KanidmClient {
#[cfg(any(test, debug_assertions))] #[cfg(any(test, debug_assertions))]
if !matching { if !matching {
error!("You're in debug/dev mode, so we're going to quit here."); if !std::env::var("KANIDM_DEV_YOLO").is_ok() {
std::process::exit(1); eprintln!("⚠️ You're in debug/dev mode, so we're going to quit here.");
eprintln!("If you really must do this, set KANIDM_DEV_YOLO=1");
std::process::exit(1);
}
} }
// Check is done once, mark as no longer needing to occur // Check is done once, mark as no longer needing to occur

View file

@ -1,14 +1,33 @@
use crate::unix_passwd::{EtcGroup, EtcUser};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct NssUser { pub struct NssUser {
pub name: String, pub name: String,
pub uid: u32,
pub gid: u32, pub gid: u32,
pub gecos: String, pub gecos: String,
pub homedir: String, pub homedir: String,
pub shell: String, pub shell: String,
} }
impl<T> From<&T> for NssUser
where
T: AsRef<EtcUser>,
{
fn from(etc_user: &T) -> Self {
let etc_user = etc_user.as_ref();
NssUser {
name: etc_user.name.clone(),
uid: etc_user.uid,
gid: etc_user.gid,
gecos: etc_user.gecos.clone(),
homedir: etc_user.homedir.clone(),
shell: etc_user.shell.clone(),
}
}
}
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct NssGroup { pub struct NssGroup {
pub name: String, pub name: String,
@ -16,6 +35,20 @@ pub struct NssGroup {
pub members: Vec<String>, pub members: Vec<String>,
} }
impl<T> From<&T> for NssGroup
where
T: AsRef<EtcGroup>,
{
fn from(etc_group: &T) -> Self {
let etc_group = etc_group.as_ref();
NssGroup {
name: etc_group.name.clone(),
gid: etc_group.gid,
members: etc_group.members.clone(),
}
}
}
/* RFC8628: 3.2. Device Authorization Response */ /* RFC8628: 3.2. Device Authorization Response */
#[derive(Serialize, Deserialize, Clone, Debug)] #[derive(Serialize, Deserialize, Clone, Debug)]
pub struct DeviceAuthorizationResponse { pub struct DeviceAuthorizationResponse {
@ -111,6 +144,12 @@ impl ClientRequest {
} }
} }
#[derive(Serialize, Deserialize, Debug)]
pub struct ProviderStatus {
pub name: String,
pub online: bool,
}
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub enum ClientResponse { pub enum ClientResponse {
SshKeys(Vec<String>), SshKeys(Vec<String>),
@ -122,6 +161,8 @@ pub enum ClientResponse {
PamStatus(Option<bool>), PamStatus(Option<bool>),
PamAuthenticateStepResponse(PamAuthResponse), PamAuthenticateStepResponse(PamAuthResponse),
ProviderStatus(Vec<ProviderStatus>),
Ok, Ok,
Error, Error,
} }

View file

@ -102,6 +102,7 @@ async fn main() -> ExitCode {
| ClientResponse::NssAccount(_) | ClientResponse::NssAccount(_)
| ClientResponse::NssGroup(_) | ClientResponse::NssGroup(_)
| ClientResponse::NssGroups(_) | ClientResponse::NssGroups(_)
| ClientResponse::ProviderStatus(_)
| ClientResponse::Ok | ClientResponse::Ok
| ClientResponse::Error | ClientResponse::Error
| ClientResponse::PamStatus(_) => { | ClientResponse::PamStatus(_) => {
@ -228,7 +229,15 @@ async fn main() -> ExitCode {
} else { } else {
match call_daemon(cfg.sock_path.as_str(), req, cfg.unix_sock_timeout).await { match call_daemon(cfg.sock_path.as_str(), req, cfg.unix_sock_timeout).await {
Ok(r) => match r { Ok(r) => match r {
ClientResponse::Ok => println!("working!"), ClientResponse::ProviderStatus(results) => {
for provider in results {
println!(
"{}: {}",
provider.name,
if provider.online { "online" } else { "offline" }
);
}
}
_ => { _ => {
error!("Error: unexpected response -> {:?}", r); error!("Error: unexpected response -> {:?}", r);
} }

View file

@ -19,7 +19,7 @@ use std::path::{Path, PathBuf};
use std::process::ExitCode; use std::process::ExitCode;
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::{Duration, SystemTime};
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use clap::{Arg, ArgAction, Command}; use clap::{Arg, ArgAction, Command};
@ -31,6 +31,7 @@ use kanidm_unix_common::unix_passwd::{parse_etc_group, parse_etc_passwd};
use kanidm_unix_common::unix_proto::{ClientRequest, ClientResponse, TaskRequest, TaskResponse}; use kanidm_unix_common::unix_proto::{ClientRequest, ClientResponse, TaskRequest, TaskResponse};
use kanidm_unix_resolver::db::{Cache, Db}; use kanidm_unix_resolver::db::{Cache, Db};
use kanidm_unix_resolver::idprovider::kanidm::KanidmProvider; use kanidm_unix_resolver::idprovider::kanidm::KanidmProvider;
use kanidm_unix_resolver::idprovider::system::SystemProvider;
use kanidm_unix_resolver::resolver::Resolver; use kanidm_unix_resolver::resolver::Resolver;
use kanidm_unix_resolver::unix_config::{HsmType, KanidmUnixdConfig}; use kanidm_unix_resolver::unix_config::{HsmType, KanidmUnixdConfig};
@ -409,11 +410,8 @@ async fn handle_client(
} }
ClientRequest::Status => { ClientRequest::Status => {
debug!("status check"); debug!("status check");
if cachelayer.test_connection().await { let status = cachelayer.provider_status().await;
ClientResponse::Ok ClientResponse::ProviderStatus(status)
} else {
ClientResponse::Error
}
} }
}; };
reqs.send(resp).await?; reqs.send(resp).await?;
@ -447,12 +445,7 @@ async fn process_etc_passwd_group(cachelayer: &Resolver) -> Result<(), Box<dyn E
let groups = parse_etc_group(contents.as_slice()).map_err(|_| "Invalid group content")?; let groups = parse_etc_group(contents.as_slice()).map_err(|_| "Invalid group content")?;
let id_iter = users cachelayer.reload_system_identities(users, groups).await;
.iter()
.map(|user| (user.name.clone(), user.uid))
.chain(groups.iter().map(|group| (group.name.clone(), group.gid)));
cachelayer.reload_nxset(id_iter).await;
Ok(()) Ok(())
} }
@ -815,8 +808,6 @@ async fn main() -> ExitCode {
} }
}; };
let idprovider = KanidmProvider::new(rsclient);
let db = match Db::new(cfg.db_path.as_str()) { let db = match Db::new(cfg.db_path.as_str()) {
Ok(db) => db, Ok(db) => db,
Err(_e) => { Err(_e) => {
@ -837,6 +828,8 @@ async fn main() -> ExitCode {
// Check for and create the hsm pin if required. // Check for and create the hsm pin if required.
if let Err(err) = write_hsm_pin(cfg.hsm_pin_path.as_str()).await { if let Err(err) = write_hsm_pin(cfg.hsm_pin_path.as_str()).await {
let diag = kanidm_lib_file_permissions::diagnose_path(cfg.hsm_pin_path.as_ref());
info!(%diag);
error!(?err, "Failed to create HSM PIN into {}", cfg.hsm_pin_path.as_str()); error!(?err, "Failed to create HSM PIN into {}", cfg.hsm_pin_path.as_str());
return ExitCode::FAILURE return ExitCode::FAILURE
}; };
@ -845,6 +838,8 @@ async fn main() -> ExitCode {
let hsm_pin = match read_hsm_pin(cfg.hsm_pin_path.as_str()).await { let hsm_pin = match read_hsm_pin(cfg.hsm_pin_path.as_str()).await {
Ok(hp) => hp, Ok(hp) => hp,
Err(err) => { Err(err) => {
let diag = kanidm_lib_file_permissions::diagnose_path(cfg.hsm_pin_path.as_ref());
info!(%diag);
error!(?err, "Failed to read HSM PIN from {}", cfg.hsm_pin_path.as_str()); error!(?err, "Failed to read HSM PIN from {}", cfg.hsm_pin_path.as_str());
return ExitCode::FAILURE return ExitCode::FAILURE
} }
@ -910,6 +905,25 @@ async fn main() -> ExitCode {
} }
}; };
let Ok(system_provider) = SystemProvider::new(
) else {
error!("Failed to configure System Provider");
return ExitCode::FAILURE
};
let Ok(idprovider) = KanidmProvider::new(
rsclient,
SystemTime::now(),
&mut (&mut db_txn).into(),
&mut hsm,
&machine_key
) else {
error!("Failed to configure Kanidm Provider");
return ExitCode::FAILURE
};
drop(machine_key);
if let Err(err) = db_txn.commit() { if let Err(err) = db_txn.commit() {
error!(?err, "Failed to commit database transaction, unable to proceed"); error!(?err, "Failed to commit database transaction, unable to proceed");
return ExitCode::FAILURE return ExitCode::FAILURE
@ -926,9 +940,9 @@ async fn main() -> ExitCode {
let cl_inner = match Resolver::new( let cl_inner = match Resolver::new(
db, db,
Box::new(idprovider), Arc::new(system_provider),
Arc::new(idprovider),
hsm, hsm,
machine_key,
cfg.cache_timeout, cfg.cache_timeout,
cfg.pam_allowed_login_groups.clone(), cfg.pam_allowed_login_groups.clone(),
cfg.default_shell.clone(), cfg.default_shell.clone(),
@ -937,7 +951,6 @@ async fn main() -> ExitCode {
cfg.home_alias, cfg.home_alias,
cfg.uid_attr_map, cfg.uid_attr_map,
cfg.gid_attr_map, cfg.gid_attr_map,
cfg.allow_local_account_override.clone(),
) )
.await .await
{ {
@ -955,6 +968,8 @@ async fn main() -> ExitCode {
let task_listener = match UnixListener::bind(cfg.task_sock_path.as_str()) { let task_listener = match UnixListener::bind(cfg.task_sock_path.as_str()) {
Ok(l) => l, Ok(l) => l,
Err(_e) => { Err(_e) => {
let diag = kanidm_lib_file_permissions::diagnose_path(cfg.task_sock_path.as_ref());
info!(%diag);
error!("Failed to bind UNIX socket {}", cfg.task_sock_path.as_str()); error!("Failed to bind UNIX socket {}", cfg.task_sock_path.as_str());
return ExitCode::FAILURE return ExitCode::FAILURE
} }

View file

@ -1,12 +1,8 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt; use std::fmt;
use std::time::Duration;
use crate::idprovider::interface::{GroupToken, Id, UserToken}; use crate::idprovider::interface::{GroupToken, Id, UserToken};
use async_trait::async_trait; use async_trait::async_trait;
use kanidm_lib_crypto::CryptoPolicy;
use kanidm_lib_crypto::DbPasswordV1;
use kanidm_lib_crypto::Password;
use libc::umask; use libc::umask;
use rusqlite::{Connection, OptionalExtension}; use rusqlite::{Connection, OptionalExtension};
use tokio::sync::{Mutex, MutexGuard}; use tokio::sync::{Mutex, MutexGuard};
@ -14,7 +10,7 @@ use uuid::Uuid;
use serde::{de::DeserializeOwned, Serialize}; use serde::{de::DeserializeOwned, Serialize};
use kanidm_hsm_crypto::{HmacKey, LoadableHmacKey, LoadableMachineKey, Tpm}; use kanidm_hsm_crypto::{LoadableHmacKey, LoadableMachineKey};
const DBV_MAIN: &str = "main"; const DBV_MAIN: &str = "main";
@ -49,13 +45,11 @@ pub enum CacheError {
pub struct Db { pub struct Db {
conn: Mutex<Connection>, conn: Mutex<Connection>,
crypto_policy: CryptoPolicy,
} }
pub struct DbTxn<'a> { pub struct DbTxn<'a> {
conn: MutexGuard<'a, Connection>, conn: MutexGuard<'a, Connection>,
committed: bool, committed: bool,
crypto_policy: &'a CryptoPolicy,
} }
pub struct KeyStoreTxn<'a, 'b> { pub struct KeyStoreTxn<'a, 'b> {
@ -83,15 +77,9 @@ impl Db {
DbError::Sqlite DbError::Sqlite
})?; })?;
let _ = unsafe { umask(before) }; let _ = unsafe { umask(before) };
// We only build a single thread. If we need more than one, we'll
// need to re-do this to account for path = "" for debug.
let crypto_policy = CryptoPolicy::time_target(Duration::from_millis(250));
debug!("Configured {:?}", crypto_policy);
Ok(Db { Ok(Db {
conn: Mutex::new(conn), conn: Mutex::new(conn),
crypto_policy,
}) })
} }
} }
@ -103,7 +91,7 @@ impl Cache for Db {
#[allow(clippy::expect_used)] #[allow(clippy::expect_used)]
async fn write<'db>(&'db self) -> Self::Txn<'db> { async fn write<'db>(&'db self) -> Self::Txn<'db> {
let conn = self.conn.lock().await; let conn = self.conn.lock().await;
DbTxn::new(conn, &self.crypto_policy) DbTxn::new(conn)
} }
} }
@ -114,16 +102,15 @@ impl fmt::Debug for Db {
} }
impl<'a> DbTxn<'a> { impl<'a> DbTxn<'a> {
fn new(conn: MutexGuard<'a, Connection>, crypto_policy: &'a CryptoPolicy) -> Self { fn new(conn: MutexGuard<'a, Connection>) -> Self {
// Start the transaction // Start the transaction
// debug!("Starting db WR txn ..."); // trace!("Starting db WR txn ...");
#[allow(clippy::expect_used)] #[allow(clippy::expect_used)]
conn.execute("BEGIN TRANSACTION", []) conn.execute("BEGIN TRANSACTION", [])
.expect("Unable to begin transaction!"); .expect("Unable to begin transaction!");
DbTxn { DbTxn {
committed: false, committed: false,
conn, conn,
crypto_policy,
} }
} }
@ -324,7 +311,7 @@ impl<'a> DbTxn<'a> {
":value": &data, ":value": &data,
}) })
.map(|r| { .map(|r| {
debug!("insert -> {:?}", r); trace!("insert -> {:?}", r);
}) })
.map_err(|e| self.sqlite_error("execute", &e)) .map_err(|e| self.sqlite_error("execute", &e))
} }
@ -545,7 +532,7 @@ impl<'a> DbTxn<'a> {
":value": &data, ":value": &data,
}) })
.map(|r| { .map(|r| {
debug!("insert -> {:?}", r); trace!("insert -> {:?}", r);
}) })
.map_err(|e| self.sqlite_error("execute", &e)) .map_err(|e| self.sqlite_error("execute", &e))
} }
@ -587,7 +574,7 @@ impl<'a> DbTxn<'a> {
":value": &data, ":value": &data,
}) })
.map(|r| { .map(|r| {
debug!("insert -> {:?}", r); trace!("insert -> {:?}", r);
}) })
.map_err(|e| self.sqlite_error("execute", &e)) .map_err(|e| self.sqlite_error("execute", &e))
} }
@ -715,7 +702,7 @@ impl<'a> DbTxn<'a> {
":expiry": &expire, ":expiry": &expire,
}) })
.map(|r| { .map(|r| {
debug!("insert -> {:?}", r); trace!("insert -> {:?}", r);
}) })
.map_err(|error| self.sqlite_transaction_error(&error, &stmt))?; .map_err(|error| self.sqlite_transaction_error(&error, &stmt))?;
} }
@ -730,7 +717,7 @@ impl<'a> DbTxn<'a> {
stmt.execute([&account_uuid]) stmt.execute([&account_uuid])
.map(|r| { .map(|r| {
debug!("delete memberships -> {:?}", r); trace!("delete memberships -> {:?}", r);
}) })
.map_err(|error| self.sqlite_transaction_error(&error, &stmt))?; .map_err(|error| self.sqlite_transaction_error(&error, &stmt))?;
@ -745,7 +732,7 @@ impl<'a> DbTxn<'a> {
":g_uuid": &g.uuid.as_hyphenated().to_string(), ":g_uuid": &g.uuid.as_hyphenated().to_string(),
}) })
.map(|r| { .map(|r| {
debug!("insert membership -> {:?}", r); trace!("insert membership -> {:?}", r);
}) })
.map_err(|error| self.sqlite_transaction_error(&error, &stmt)) .map_err(|error| self.sqlite_transaction_error(&error, &stmt))
}) })
@ -771,88 +758,6 @@ impl<'a> DbTxn<'a> {
.map_err(|e| self.sqlite_error("account_t delete", &e)) .map_err(|e| self.sqlite_error("account_t delete", &e))
} }
pub fn update_account_password(
&mut self,
a_uuid: Uuid,
cred: &str,
hsm: &mut dyn Tpm,
hmac_key: &HmacKey,
) -> Result<(), CacheError> {
let pw =
Password::new_argon2id_hsm(self.crypto_policy, cred, hsm, hmac_key).map_err(|e| {
error!("password error -> {:?}", e);
CacheError::Cryptography
})?;
let dbpw = pw.to_dbpasswordv1();
let data = serde_json::to_vec(&dbpw).map_err(|e| {
error!("json error -> {:?}", e);
CacheError::SerdeJson
})?;
self.conn
.execute(
"UPDATE account_t SET password = :data WHERE uuid = :a_uuid",
named_params! {
":a_uuid": &a_uuid.as_hyphenated().to_string(),
":data": &data,
},
)
.map_err(|e| self.sqlite_error("update account_t password", &e))
.map(|_| ())
}
pub fn check_account_password(
&mut self,
a_uuid: Uuid,
cred: &str,
hsm: &mut dyn Tpm,
hmac_key: &HmacKey,
) -> Result<bool, CacheError> {
let mut stmt = self
.conn
.prepare("SELECT password FROM account_t WHERE uuid = :a_uuid AND password IS NOT NULL")
.map_err(|e| self.sqlite_error("select prepare", &e))?;
// Makes tuple (token, expiry)
let data_iter = stmt
.query_map([a_uuid.as_hyphenated().to_string()], |row| row.get(0))
.map_err(|e| self.sqlite_error("query_map", &e))?;
let data: Result<Vec<Vec<u8>>, _> = data_iter
.map(|v| v.map_err(|e| self.sqlite_error("map", &e)))
.collect();
let data = data?;
if data.is_empty() {
info!("No cached password, failing authentication");
return Ok(false);
}
if data.len() >= 2 {
error!("invalid db state, multiple entries matched query?");
return Err(CacheError::TooManyResults);
}
let pw = data.first().map(|raw| {
// Map the option from data.first.
let dbpw: DbPasswordV1 = serde_json::from_slice(raw.as_slice()).map_err(|e| {
error!("json error -> {:?}", e);
})?;
Password::try_from(dbpw)
});
let pw = match pw {
Some(Ok(p)) => p,
_ => return Ok(false),
};
pw.verify_ctx(cred, Some((hsm, hmac_key))).map_err(|e| {
error!("password error -> {:?}", e);
CacheError::Cryptography
})
}
pub fn get_group(&mut self, grp_id: &Id) -> Result<Option<(GroupToken, u64)>, CacheError> { pub fn get_group(&mut self, grp_id: &Id) -> Result<Option<(GroupToken, u64)>, CacheError> {
let data = match grp_id { let data = match grp_id {
Id::Name(n) => self.get_group_data_name(n.as_str()), Id::Name(n) => self.get_group_data_name(n.as_str()),
@ -907,7 +812,7 @@ impl<'a> DbTxn<'a> {
data.iter() data.iter()
.map(|token| { .map(|token| {
// token convert with json. // token convert with json.
// debug!("{:?}", token); // trace!("{:?}", token);
serde_json::from_slice(token.as_slice()).map_err(|e| { serde_json::from_slice(token.as_slice()).map_err(|e| {
error!("json error -> {:?}", e); error!("json error -> {:?}", e);
CacheError::SerdeJson CacheError::SerdeJson
@ -935,7 +840,7 @@ impl<'a> DbTxn<'a> {
.iter() .iter()
.filter_map(|token| { .filter_map(|token| {
// token convert with json. // token convert with json.
// debug!("{:?}", token); // trace!("{:?}", token);
serde_json::from_slice(token.as_slice()) serde_json::from_slice(token.as_slice())
.map_err(|e| { .map_err(|e| {
error!("json error -> {:?}", e); error!("json error -> {:?}", e);
@ -971,7 +876,7 @@ impl<'a> DbTxn<'a> {
":expiry": &expire, ":expiry": &expire,
}) })
.map(|r| { .map(|r| {
debug!("insert -> {:?}", r); trace!("insert -> {:?}", r);
}) })
.map_err(|e| self.sqlite_error("execute", &e)) .map_err(|e| self.sqlite_error("execute", &e))
} }
@ -1002,7 +907,7 @@ impl<'a> Drop for DbTxn<'a> {
// Abort // Abort
fn drop(&mut self) { fn drop(&mut self) {
if !self.committed { if !self.committed {
// debug!("Aborting BE WR txn"); // trace!("Aborting BE WR txn");
#[allow(clippy::expect_used)] #[allow(clippy::expect_used)]
self.conn self.conn
.execute("ROLLBACK TRANSACTION", []) .execute("ROLLBACK TRANSACTION", [])
@ -1013,25 +918,8 @@ impl<'a> Drop for DbTxn<'a> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Cache, Db}; use super::{Cache, Db};
use crate::idprovider::interface::{GroupToken, Id, ProviderOrigin, UserToken}; use crate::idprovider::interface::{GroupToken, Id, ProviderOrigin, UserToken};
use kanidm_hsm_crypto::{AuthValue, Tpm};
const TESTACCOUNT1_PASSWORD_A: &str = "password a for account1 test";
const TESTACCOUNT1_PASSWORD_B: &str = "password b for account1 test";
#[cfg(feature = "tpm")]
fn setup_tpm() -> Box<dyn Tpm> {
use kanidm_hsm_crypto::tpm::TpmTss;
Box::new(TpmTss::new("device:/dev/tpmrm0").expect("Unable to build Tpm Context"))
}
#[cfg(not(feature = "tpm"))]
fn setup_tpm() -> Box<dyn Tpm> {
use kanidm_hsm_crypto::soft::SoftTpm;
Box::new(SoftTpm::new())
}
#[tokio::test] #[tokio::test]
async fn test_cache_db_account_basic() { async fn test_cache_db_account_basic() {
@ -1041,7 +929,7 @@ mod tests {
assert!(dbtxn.migrate().is_ok()); assert!(dbtxn.migrate().is_ok());
let mut ut1 = UserToken { let mut ut1 = UserToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testuser".to_string(), name: "testuser".to_string(),
spn: "testuser@example.com".to_string(), spn: "testuser@example.com".to_string(),
displayname: "Test User".to_string(), displayname: "Test User".to_string(),
@ -1051,6 +939,7 @@ mod tests {
groups: Vec::new(), groups: Vec::new(),
sshkeys: vec!["key-a".to_string()], sshkeys: vec!["key-a".to_string()],
valid: true, valid: true,
extra_keys: Default::default(),
}; };
let id_name = Id::Name("testuser".to_string()); let id_name = Id::Name("testuser".to_string());
@ -1126,11 +1015,12 @@ mod tests {
assert!(dbtxn.migrate().is_ok()); assert!(dbtxn.migrate().is_ok());
let mut gt1 = GroupToken { let mut gt1 = GroupToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testgroup".to_string(), name: "testgroup".to_string(),
spn: "testgroup@example.com".to_string(), spn: "testgroup@example.com".to_string(),
gidnumber: 2000, gidnumber: 2000,
uuid: uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16"), uuid: uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16"),
extra_keys: Default::default(),
}; };
let id_name = Id::Name("testgroup".to_string()); let id_name = Id::Name("testgroup".to_string());
@ -1202,23 +1092,25 @@ mod tests {
assert!(dbtxn.migrate().is_ok()); assert!(dbtxn.migrate().is_ok());
let gt1 = GroupToken { let gt1 = GroupToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testuser".to_string(), name: "testuser".to_string(),
spn: "testuser@example.com".to_string(), spn: "testuser@example.com".to_string(),
gidnumber: 2000, gidnumber: 2000,
uuid: uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16"), uuid: uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16"),
extra_keys: Default::default(),
}; };
let gt2 = GroupToken { let gt2 = GroupToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testgroup".to_string(), name: "testgroup".to_string(),
spn: "testgroup@example.com".to_string(), spn: "testgroup@example.com".to_string(),
gidnumber: 2001, gidnumber: 2001,
uuid: uuid::uuid!("b500be97-8552-42a5-aca0-668bc5625705"), uuid: uuid::uuid!("b500be97-8552-42a5-aca0-668bc5625705"),
extra_keys: Default::default(),
}; };
let mut ut1 = UserToken { let mut ut1 = UserToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testuser".to_string(), name: "testuser".to_string(),
spn: "testuser@example.com".to_string(), spn: "testuser@example.com".to_string(),
displayname: "Test User".to_string(), displayname: "Test User".to_string(),
@ -1228,6 +1120,7 @@ mod tests {
groups: vec![gt1.clone(), gt2], groups: vec![gt1.clone(), gt2],
sshkeys: vec!["key-a".to_string()], sshkeys: vec!["key-a".to_string()],
valid: true, valid: true,
extra_keys: Default::default(),
}; };
// First, add the groups. // First, add the groups.
@ -1265,91 +1158,6 @@ mod tests {
assert!(dbtxn.commit().is_ok()); assert!(dbtxn.commit().is_ok());
} }
#[tokio::test]
async fn test_cache_db_account_password() {
sketching::test_init();
let db = Db::new("").expect("failed to create.");
let mut dbtxn = db.write().await;
assert!(dbtxn.migrate().is_ok());
let mut hsm = setup_tpm();
let auth_value = AuthValue::ephemeral().unwrap();
let loadable_machine_key = hsm.machine_key_create(&auth_value).unwrap();
let machine_key = hsm
.machine_key_load(&auth_value, &loadable_machine_key)
.unwrap();
let loadable_hmac_key = hsm.hmac_key_create(&machine_key).unwrap();
let hmac_key = hsm.hmac_key_load(&machine_key, &loadable_hmac_key).unwrap();
let uuid1 = uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16");
let mut ut1 = UserToken {
provider: ProviderOrigin::Files,
name: "testuser".to_string(),
spn: "testuser@example.com".to_string(),
displayname: "Test User".to_string(),
gidnumber: 2000,
uuid: uuid1,
shell: None,
groups: Vec::new(),
sshkeys: vec!["key-a".to_string()],
valid: true,
};
// Test that with no account, is false
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_A, &mut *hsm, &hmac_key),
Ok(false)
));
// test adding an account
dbtxn.update_account(&ut1, 0).unwrap();
// check with no password is false.
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_A, &mut *hsm, &hmac_key),
Ok(false)
));
// update the pw
assert!(dbtxn
.update_account_password(uuid1, TESTACCOUNT1_PASSWORD_A, &mut *hsm, &hmac_key)
.is_ok());
// Check it now works.
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_A, &mut *hsm, &hmac_key),
Ok(true)
));
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_B, &mut *hsm, &hmac_key),
Ok(false)
));
// Update the pw
assert!(dbtxn
.update_account_password(uuid1, TESTACCOUNT1_PASSWORD_B, &mut *hsm, &hmac_key)
.is_ok());
// Check it matches.
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_A, &mut *hsm, &hmac_key),
Ok(false)
));
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_B, &mut *hsm, &hmac_key),
Ok(true)
));
// Check that updating the account does not break the password.
ut1.displayname = "Test User Update".to_string();
dbtxn.update_account(&ut1, 0).unwrap();
assert!(matches!(
dbtxn.check_account_password(uuid1, TESTACCOUNT1_PASSWORD_B, &mut *hsm, &hmac_key),
Ok(true)
));
assert!(dbtxn.commit().is_ok());
}
#[tokio::test] #[tokio::test]
async fn test_cache_db_group_rename_duplicate() { async fn test_cache_db_group_rename_duplicate() {
sketching::test_init(); sketching::test_init();
@ -1358,19 +1166,21 @@ mod tests {
assert!(dbtxn.migrate().is_ok()); assert!(dbtxn.migrate().is_ok());
let mut gt1 = GroupToken { let mut gt1 = GroupToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testgroup".to_string(), name: "testgroup".to_string(),
spn: "testgroup@example.com".to_string(), spn: "testgroup@example.com".to_string(),
gidnumber: 2000, gidnumber: 2000,
uuid: uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16"), uuid: uuid::uuid!("0302b99c-f0f6-41ab-9492-852692b0fd16"),
extra_keys: Default::default(),
}; };
let gt2 = GroupToken { let gt2 = GroupToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testgroup".to_string(), name: "testgroup".to_string(),
spn: "testgroup@example.com".to_string(), spn: "testgroup@example.com".to_string(),
gidnumber: 2001, gidnumber: 2001,
uuid: uuid::uuid!("799123b2-3802-4b19-b0b8-1ffae2aa9a4b"), uuid: uuid::uuid!("799123b2-3802-4b19-b0b8-1ffae2aa9a4b"),
extra_keys: Default::default(),
}; };
let id_name = Id::Name("testgroup".to_string()); let id_name = Id::Name("testgroup".to_string());
@ -1415,7 +1225,7 @@ mod tests {
assert!(dbtxn.migrate().is_ok()); assert!(dbtxn.migrate().is_ok());
let mut ut1 = UserToken { let mut ut1 = UserToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testuser".to_string(), name: "testuser".to_string(),
spn: "testuser@example.com".to_string(), spn: "testuser@example.com".to_string(),
displayname: "Test User".to_string(), displayname: "Test User".to_string(),
@ -1425,10 +1235,11 @@ mod tests {
groups: Vec::new(), groups: Vec::new(),
sshkeys: vec!["key-a".to_string()], sshkeys: vec!["key-a".to_string()],
valid: true, valid: true,
extra_keys: Default::default(),
}; };
let ut2 = UserToken { let ut2 = UserToken {
provider: ProviderOrigin::Files, provider: ProviderOrigin::System,
name: "testuser".to_string(), name: "testuser".to_string(),
spn: "testuser@example.com".to_string(), spn: "testuser@example.com".to_string(),
displayname: "Test User".to_string(), displayname: "Test User".to_string(),
@ -1438,6 +1249,7 @@ mod tests {
groups: Vec::new(), groups: Vec::new(),
sshkeys: vec!["key-a".to_string()], sshkeys: vec!["key-a".to_string()],
valid: true, valid: true,
extra_keys: Default::default(),
}; };
let id_name = Id::Name("testuser".to_string()); let id_name = Id::Name("testuser".to_string());

View file

@ -1,12 +1,17 @@
use crate::db::KeyStoreTxn;
use async_trait::async_trait; use async_trait::async_trait;
use kanidm_unix_common::unix_proto::{ use kanidm_unix_common::unix_proto::{
DeviceAuthorizationResponse, PamAuthRequest, PamAuthResponse, DeviceAuthorizationResponse, PamAuthRequest, PamAuthResponse,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::collections::BTreeMap;
use std::fmt;
use std::time::SystemTime;
use tokio::sync::broadcast; use tokio::sync::broadcast;
use uuid::Uuid; use uuid::Uuid;
pub type XKeyId = String;
pub use kanidm_hsm_crypto as tpm; pub use kanidm_hsm_crypto as tpm;
/// Errors that the IdProvider may return. These drive the resolver state machine /// Errors that the IdProvider may return. These drive the resolver state machine
@ -33,22 +38,59 @@ pub enum IdpError {
Tpm, Tpm,
} }
pub enum UserTokenState {
/// Indicate to the resolver that the cached UserToken should be used, if present.
UseCached,
/// The requested entity is not found, or has been removed.
NotFound,
/// Update the cache state with the data found in this UserToken.
Update(UserToken),
}
pub enum GroupTokenState {
/// Indicate to the resolver that the cached GroupToken should be used, if present.
UseCached,
/// The requested entity is not found, or has been removed.
NotFound,
/// Update the cache state with the data found in this GroupToken.
Update(GroupToken),
}
#[derive(Debug, Clone, PartialEq, Eq, Hash)] #[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum Id { pub enum Id {
Name(String), Name(String),
Gid(u32), Gid(u32),
} }
#[derive(Debug, Serialize, Deserialize, Clone, Default)] #[derive(Debug, Serialize, Deserialize, Clone, Default, Eq, PartialEq, Hash)]
pub enum ProviderOrigin { pub enum ProviderOrigin {
// To allow transition, we have an ignored type that effectively // To allow transition, we have an ignored type that effectively
// causes these items to be nixed. // causes these items to be nixed.
#[default] #[default]
Ignore, Ignore,
Files, /// Provided by /etc/passwd or /etc/group
System,
Kanidm, Kanidm,
} }
impl fmt::Display for ProviderOrigin {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ProviderOrigin::Ignore => {
write!(f, "Ignored")
}
ProviderOrigin::System => {
write!(f, "System")
}
ProviderOrigin::Kanidm => {
write!(f, "Kanidm")
}
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
pub struct GroupToken { pub struct GroupToken {
#[serde(default)] #[serde(default)]
@ -57,12 +99,16 @@ pub struct GroupToken {
pub spn: String, pub spn: String,
pub uuid: Uuid, pub uuid: Uuid,
pub gidnumber: u32, pub gidnumber: u32,
#[serde(flatten)]
pub extra_keys: BTreeMap<XKeyId, Value>,
} }
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
pub struct UserToken { pub struct UserToken {
#[serde(default)] #[serde(default)]
pub provider: ProviderOrigin, pub provider: ProviderOrigin,
pub name: String, pub name: String,
pub spn: String, pub spn: String,
pub uuid: Uuid, pub uuid: Uuid,
@ -70,10 +116,16 @@ pub struct UserToken {
pub displayname: String, pub displayname: String,
pub shell: Option<String>, pub shell: Option<String>,
pub groups: Vec<GroupToken>, pub groups: Vec<GroupToken>,
// Could there be a better type here? // Could there be a better type here?
pub sshkeys: Vec<String>, pub sshkeys: Vec<String>,
// Defaults to false. // Defaults to false.
pub valid: bool, pub valid: bool,
// These are opaque extra keys that the provider can interpret for internal
// functions.
#[serde(flatten)]
pub extra_keys: BTreeMap<XKeyId, Value>,
} }
#[derive(Debug)] #[derive(Debug)]
@ -147,22 +199,21 @@ pub enum AuthResult {
Next(AuthRequest), Next(AuthRequest),
} }
pub enum AuthCacheAction {
None,
PasswordHashUpdate { cred: String },
}
#[async_trait] #[async_trait]
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub trait IdProvider { pub trait IdProvider {
async fn configure_hsm_keys( /// Retrieve this providers origin
&self, fn origin(&self) -> ProviderOrigin;
_keystore: &mut KeyStoreTxn,
_tpm: &mut tpm::BoxedDynTpm, /// Attempt to go online *immediately*
_machine_key: &tpm::MachineKey, async fn attempt_online(&self, _tpm: &mut tpm::BoxedDynTpm, _now: SystemTime) -> bool;
) -> Result<(), IdpError> {
Ok(()) /// Mark that this provider should attempt to go online next time it
} /// recieves a request
async fn mark_next_check(&self, _now: SystemTime);
/// Force this provider offline immediately.
async fn mark_offline(&self);
/// This is similar to a "domain join" process. What do we actually need to pass here /// This is similar to a "domain join" process. What do we actually need to pass here
/// for this to work for kanidm or himmelblau? Should we make it take a generic? /// for this to work for kanidm or himmelblau? Should we make it take a generic?
@ -177,23 +228,19 @@ pub trait IdProvider {
} }
*/ */
async fn provider_authenticate(&self, _tpm: &mut tpm::BoxedDynTpm) -> Result<(), IdpError>;
async fn unix_user_get( async fn unix_user_get(
&self, &self,
_id: &Id, _id: &Id,
_token: Option<&UserToken>, _token: Option<&UserToken>,
_tpm: &mut tpm::BoxedDynTpm, _tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey, _now: SystemTime,
) -> Result<UserToken, IdpError>; ) -> Result<UserTokenState, IdpError>;
async fn unix_user_online_auth_init( async fn unix_user_online_auth_init(
&self, &self,
_account_id: &str, _account_id: &str,
_token: Option<&UserToken>, _token: &UserToken,
_keystore: &mut KeyStoreTxn,
_tpm: &mut tpm::BoxedDynTpm, _tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey,
_shutdown_rx: &broadcast::Receiver<()>, _shutdown_rx: &broadcast::Receiver<()>,
) -> Result<(AuthRequest, AuthCredHandler), IdpError>; ) -> Result<(AuthRequest, AuthCredHandler), IdpError>;
@ -202,17 +249,20 @@ pub trait IdProvider {
_account_id: &str, _account_id: &str,
_cred_handler: &mut AuthCredHandler, _cred_handler: &mut AuthCredHandler,
_pam_next_req: PamAuthRequest, _pam_next_req: PamAuthRequest,
_keystore: &mut KeyStoreTxn,
_tpm: &mut tpm::BoxedDynTpm, _tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey,
_shutdown_rx: &broadcast::Receiver<()>, _shutdown_rx: &broadcast::Receiver<()>,
) -> Result<(AuthResult, AuthCacheAction), IdpError>; ) -> Result<AuthResult, IdpError>;
async fn unix_unknown_user_online_auth_init(
&self,
_account_id: &str,
_tpm: &mut tpm::BoxedDynTpm,
_shutdown_rx: &broadcast::Receiver<()>,
) -> Result<Option<(AuthRequest, AuthCredHandler)>, IdpError>;
async fn unix_user_offline_auth_init( async fn unix_user_offline_auth_init(
&self, &self,
_account_id: &str, _token: &UserToken,
_token: Option<&UserToken>,
_keystore: &mut KeyStoreTxn,
) -> Result<(AuthRequest, AuthCredHandler), IdpError>; ) -> Result<(AuthRequest, AuthCredHandler), IdpError>;
// I thought about this part of the interface a lot. we could have the // I thought about this part of the interface a lot. we could have the
@ -236,19 +286,16 @@ pub trait IdProvider {
// TPM key. // TPM key.
async fn unix_user_offline_auth_step( async fn unix_user_offline_auth_step(
&self, &self,
_account_id: &str,
_token: &UserToken, _token: &UserToken,
_cred_handler: &mut AuthCredHandler, _cred_handler: &mut AuthCredHandler,
_pam_next_req: PamAuthRequest, _pam_next_req: PamAuthRequest,
_keystore: &mut KeyStoreTxn,
_tpm: &mut tpm::BoxedDynTpm, _tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey,
_online_at_init: bool,
) -> Result<AuthResult, IdpError>; ) -> Result<AuthResult, IdpError>;
async fn unix_group_get( async fn unix_group_get(
&self, &self,
id: &Id, id: &Id,
_tpm: &mut tpm::BoxedDynTpm, _tpm: &mut tpm::BoxedDynTpm,
) -> Result<GroupToken, IdpError>; _now: SystemTime,
) -> Result<GroupTokenState, IdpError>;
} }

View file

@ -3,36 +3,96 @@ use async_trait::async_trait;
use kanidm_client::{ClientError, KanidmClient, StatusCode}; use kanidm_client::{ClientError, KanidmClient, StatusCode};
use kanidm_proto::internal::OperationError; use kanidm_proto::internal::OperationError;
use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; use kanidm_proto::v1::{UnixGroupToken, UnixUserToken};
use tokio::sync::{broadcast, RwLock}; use std::time::{Duration, SystemTime};
use tokio::sync::{broadcast, Mutex};
use kanidm_lib_crypto::CryptoPolicy;
use kanidm_lib_crypto::DbPasswordV1;
use kanidm_lib_crypto::Password;
use super::interface::{ use super::interface::{
// KeyStore, tpm::{self, HmacKey, Tpm},
tpm, AuthCredHandler, AuthRequest, AuthResult, GroupToken, GroupTokenState, Id, IdProvider,
tpm::Tpm, IdpError, ProviderOrigin, UserToken, UserTokenState,
AuthCacheAction,
AuthCredHandler,
AuthRequest,
AuthResult,
GroupToken,
Id,
IdProvider,
IdpError,
ProviderOrigin,
UserToken,
}; };
use kanidm_unix_common::unix_proto::PamAuthRequest; use kanidm_unix_common::unix_proto::PamAuthRequest;
const TAG_IDKEY: &str = "idkey"; const KANIDM_HMAC_KEY: &str = "kanidm-hmac-key";
const KANIDM_PWV1_KEY: &str = "kanidm-pw-v1";
const OFFLINE_NEXT_CHECK: Duration = Duration::from_secs(60);
#[derive(Debug, Clone)]
enum CacheState {
Online,
Offline,
OfflineNextCheck(SystemTime),
}
struct KanidmProviderInternal {
state: CacheState,
client: KanidmClient,
hmac_key: HmacKey,
crypto_policy: CryptoPolicy,
}
pub struct KanidmProvider { pub struct KanidmProvider {
client: RwLock<KanidmClient>, inner: Mutex<KanidmProviderInternal>,
} }
impl KanidmProvider { impl KanidmProvider {
pub fn new(client: KanidmClient) -> Self { pub fn new(
KanidmProvider { client: KanidmClient,
client: RwLock::new(client), now: SystemTime,
} keystore: &mut KeyStoreTxn,
tpm: &mut tpm::BoxedDynTpm,
machine_key: &tpm::MachineKey,
) -> Result<Self, IdpError> {
// FUTURE: Randomised jitter on next check at startup.
// Initially retrieve our HMAC key.
let loadable_hmac_key: Option<tpm::LoadableHmacKey> = keystore
.get_tagged_hsm_key(KANIDM_HMAC_KEY)
.map_err(|ks_err| {
error!(?ks_err);
IdpError::KeyStore
})?;
let loadable_hmac_key = if let Some(loadable_hmac_key) = loadable_hmac_key {
loadable_hmac_key
} else {
let loadable_hmac_key = tpm.hmac_key_create(machine_key).map_err(|tpm_err| {
error!(?tpm_err);
IdpError::Tpm
})?;
keystore
.insert_tagged_hsm_key(KANIDM_HMAC_KEY, &loadable_hmac_key)
.map_err(|ks_err| {
error!(?ks_err);
IdpError::KeyStore
})?;
loadable_hmac_key
};
let hmac_key = tpm
.hmac_key_load(machine_key, &loadable_hmac_key)
.map_err(|tpm_err| {
error!(?tpm_err);
IdpError::Tpm
})?;
let crypto_policy = CryptoPolicy::time_target(Duration::from_millis(250));
Ok(KanidmProvider {
inner: Mutex::new(KanidmProviderInternal {
state: CacheState::OfflineNextCheck(now),
client,
hmac_key,
crypto_policy,
}),
})
} }
} }
@ -63,6 +123,7 @@ impl From<UnixUserToken> for UserToken {
groups, groups,
sshkeys, sshkeys,
valid, valid,
extra_keys: Default::default(),
} }
} }
} }
@ -82,73 +143,174 @@ impl From<UnixGroupToken> for GroupToken {
spn, spn,
uuid, uuid,
gidnumber, gidnumber,
extra_keys: Default::default(),
}
}
}
impl UserToken {
pub fn kanidm_update_cached_password(
&mut self,
crypto_policy: &CryptoPolicy,
cred: &str,
tpm: &mut tpm::BoxedDynTpm,
hmac_key: &HmacKey,
) {
let pw = match Password::new_argon2id_hsm(crypto_policy, cred, tpm, hmac_key) {
Ok(pw) => pw,
Err(reason) => {
// Clear cached pw.
self.extra_keys.remove(KANIDM_PWV1_KEY);
warn!(
?reason,
"unable to apply kdf to password, clearing cached password."
);
return;
}
};
let pw_value = match serde_json::to_value(pw.to_dbpasswordv1()) {
Ok(pw) => pw,
Err(reason) => {
// Clear cached pw.
self.extra_keys.remove(KANIDM_PWV1_KEY);
warn!(
?reason,
"unable to serialise credential, clearing cached password."
);
return;
}
};
self.extra_keys.insert(KANIDM_PWV1_KEY.into(), pw_value);
debug!(spn = %self.spn, "Updated cached pw");
}
pub fn kanidm_check_cached_password(
&self,
cred: &str,
tpm: &mut tpm::BoxedDynTpm,
hmac_key: &HmacKey,
) -> bool {
let pw_value = match self.extra_keys.get(KANIDM_PWV1_KEY) {
Some(pw_value) => pw_value,
None => {
debug!(spn = %self.spn, "no cached pw available");
return false;
}
};
let dbpw = match serde_json::from_value::<DbPasswordV1>(pw_value.clone()) {
Ok(dbpw) => dbpw,
Err(reason) => {
warn!(spn = %self.spn, ?reason, "unable to deserialise credential");
return false;
}
};
let pw = match Password::try_from(dbpw) {
Ok(pw) => pw,
Err(reason) => {
warn!(spn = %self.spn, ?reason, "unable to process credential");
return false;
}
};
pw.verify_ctx(cred, Some((tpm, hmac_key)))
.unwrap_or_default()
}
}
impl KanidmProviderInternal {
async fn check_online(&mut self, tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
match self.state {
// Proceed
CacheState::Online => true,
CacheState::OfflineNextCheck(at_time) if now >= at_time => {
// Attempt online. If fails, return token.
self.attempt_online(tpm, now).await
}
CacheState::OfflineNextCheck(_) | CacheState::Offline => false,
}
}
async fn attempt_online(&mut self, _tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
match self.client.auth_anonymous().await {
Ok(_uat) => {
self.state = CacheState::Online;
true
}
Err(ClientError::Transport(err)) => {
warn!(?err, "transport failure");
self.state = CacheState::OfflineNextCheck(now + OFFLINE_NEXT_CHECK);
false
}
Err(err) => {
error!(?err, "Provider authentication failed");
self.state = CacheState::OfflineNextCheck(now + OFFLINE_NEXT_CHECK);
false
}
} }
} }
} }
#[async_trait] #[async_trait]
impl IdProvider for KanidmProvider { impl IdProvider for KanidmProvider {
async fn configure_hsm_keys( fn origin(&self) -> ProviderOrigin {
&self, ProviderOrigin::Kanidm
keystore: &mut KeyStoreTxn,
tpm: &mut tpm::BoxedDynTpm,
machine_key: &tpm::MachineKey,
) -> Result<(), IdpError> {
let id_key: Option<tpm::LoadableIdentityKey> =
keystore.get_tagged_hsm_key(TAG_IDKEY).map_err(|ks_err| {
error!(?ks_err);
IdpError::KeyStore
})?;
if id_key.is_none() {
let loadable_id_key = tpm
.identity_key_create(machine_key, None, tpm::KeyAlgorithm::Ecdsa256)
.map_err(|tpm_err| {
error!(?tpm_err);
IdpError::Tpm
})?;
keystore
.insert_tagged_hsm_key(TAG_IDKEY, &loadable_id_key)
.map_err(|ks_err| {
error!(?ks_err);
IdpError::KeyStore
})?;
}
Ok(())
} }
// Needs .read on all types except re-auth. async fn attempt_online(&self, tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
async fn provider_authenticate(&self, _tpm: &mut tpm::BoxedDynTpm) -> Result<(), IdpError> { let mut inner = self.inner.lock().await;
match self.client.write().await.auth_anonymous().await { inner.check_online(tpm, now).await
Ok(_uat) => Ok(()), }
Err(err) => {
error!(?err, "Provider authentication failed"); async fn mark_next_check(&self, now: SystemTime) {
Err(IdpError::ProviderUnauthorised) let mut inner = self.inner.lock().await;
} inner.state = CacheState::OfflineNextCheck(now);
} }
async fn mark_offline(&self) {
let mut inner = self.inner.lock().await;
inner.state = CacheState::Offline;
} }
async fn unix_user_get( async fn unix_user_get(
&self, &self,
id: &Id, id: &Id,
_token: Option<&UserToken>, token: Option<&UserToken>,
_tpm: &mut tpm::BoxedDynTpm, tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey, now: SystemTime,
) -> Result<UserToken, IdpError> { ) -> Result<UserTokenState, IdpError> {
match self let mut inner = self.inner.lock().await;
if !inner.check_online(tpm, now).await {
// We are offline, return that we should use a cached token.
return Ok(UserTokenState::UseCached);
}
// We are ONLINE, do the get.
match inner
.client .client
.read()
.await
.idm_account_unix_token_get(id.to_string().as_str()) .idm_account_unix_token_get(id.to_string().as_str())
.await .await
{ {
Ok(tok) => Ok(UserToken::from(tok)), Ok(tok) => {
Err(ClientError::Transport(err)) => { let mut ut = UserToken::from(tok);
error!(?err);
Err(IdpError::Transport) if let Some(previous_token) = token {
ut.extra_keys = previous_token.extra_keys.clone();
}
Ok(UserTokenState::Update(ut))
} }
// Offline?
Err(ClientError::Transport(err)) => {
error!(?err, "transport error");
inner.state = CacheState::OfflineNextCheck(now + OFFLINE_NEXT_CHECK);
Ok(UserTokenState::UseCached)
}
// Provider session error, need to re-auth
Err(ClientError::Http(StatusCode::UNAUTHORIZED, reason, opid)) => { Err(ClientError::Http(StatusCode::UNAUTHORIZED, reason, opid)) => {
match reason { match reason {
Some(OperationError::NotAuthenticated) => warn!( Some(OperationError::NotAuthenticated) => warn!(
@ -164,8 +326,10 @@ impl IdProvider for KanidmProvider {
e, opid e, opid
), ),
}; };
Err(IdpError::ProviderUnauthorised) inner.state = CacheState::OfflineNextCheck(now + OFFLINE_NEXT_CHECK);
Ok(UserTokenState::UseCached)
} }
// 404 / Removed.
Err(ClientError::Http( Err(ClientError::Http(
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
Some(OperationError::NoMatchingEntries), Some(OperationError::NoMatchingEntries),
@ -185,8 +349,9 @@ impl IdProvider for KanidmProvider {
?opid, ?opid,
"entry has been removed or is no longer a valid posix account" "entry has been removed or is no longer a valid posix account"
); );
Err(IdpError::NotFound) Ok(UserTokenState::NotFound)
} }
// Something is really wrong? We did get a response though, so we are still online.
Err(err) => { Err(err) => {
error!(?err, "client error"); error!(?err, "client error");
Err(IdpError::BadRequest) Err(IdpError::BadRequest)
@ -197,42 +362,66 @@ impl IdProvider for KanidmProvider {
async fn unix_user_online_auth_init( async fn unix_user_online_auth_init(
&self, &self,
_account_id: &str, _account_id: &str,
_token: Option<&UserToken>, _token: &UserToken,
_keystore: &mut KeyStoreTxn,
_tpm: &mut tpm::BoxedDynTpm, _tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey,
_shutdown_rx: &broadcast::Receiver<()>, _shutdown_rx: &broadcast::Receiver<()>,
) -> Result<(AuthRequest, AuthCredHandler), IdpError> { ) -> Result<(AuthRequest, AuthCredHandler), IdpError> {
// Not sure that I need to do much here? // Not sure that I need to do much here?
Ok((AuthRequest::Password, AuthCredHandler::Password)) Ok((AuthRequest::Password, AuthCredHandler::Password))
} }
async fn unix_unknown_user_online_auth_init(
&self,
_account_id: &str,
_tpm: &mut tpm::BoxedDynTpm,
_shutdown_rx: &broadcast::Receiver<()>,
) -> Result<Option<(AuthRequest, AuthCredHandler)>, IdpError> {
// We do not support unknown user auth.
Ok(None)
}
async fn unix_user_online_auth_step( async fn unix_user_online_auth_step(
&self, &self,
account_id: &str, account_id: &str,
cred_handler: &mut AuthCredHandler, cred_handler: &mut AuthCredHandler,
pam_next_req: PamAuthRequest, pam_next_req: PamAuthRequest,
_keystore: &mut KeyStoreTxn, tpm: &mut tpm::BoxedDynTpm,
_tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey,
_shutdown_rx: &broadcast::Receiver<()>, _shutdown_rx: &broadcast::Receiver<()>,
) -> Result<(AuthResult, AuthCacheAction), IdpError> { ) -> Result<AuthResult, IdpError> {
match (cred_handler, pam_next_req) { match (cred_handler, pam_next_req) {
(AuthCredHandler::Password, PamAuthRequest::Password { cred }) => { (AuthCredHandler::Password, PamAuthRequest::Password { cred }) => {
match self let inner = self.inner.lock().await;
let auth_result = inner
.client .client
.read()
.await
.idm_account_unix_cred_verify(account_id, &cred) .idm_account_unix_cred_verify(account_id, &cred)
.await .await;
{
Ok(Some(n_tok)) => Ok(( trace!(?auth_result);
AuthResult::Success {
token: UserToken::from(n_tok), match auth_result {
}, Ok(Some(n_tok)) => {
AuthCacheAction::PasswordHashUpdate { cred }, let mut token = UserToken::from(n_tok);
)), token.kanidm_update_cached_password(
Ok(None) => Ok((AuthResult::Denied, AuthCacheAction::None)), &inner.crypto_policy,
cred.as_str(),
tpm,
&inner.hmac_key,
);
Ok(AuthResult::Success { token })
}
Ok(None) => {
// TODO: i'm not a huge fan of this rn, but currently the way we handle
// an expired account is we return Ok(None).
//
// We can't tell the difference between expired and incorrect password.
// So in these cases we have to clear the cached password. :(
//
// In future once we have domain join, we should be getting the user token
// at the start of the auth and checking for account validity instead.
Ok(AuthResult::Denied)
}
Err(ClientError::Transport(err)) => { Err(ClientError::Transport(err)) => {
error!(?err); error!(?err);
Err(IdpError::Transport) Err(IdpError::Transport)
@ -298,46 +487,74 @@ impl IdProvider for KanidmProvider {
async fn unix_user_offline_auth_init( async fn unix_user_offline_auth_init(
&self, &self,
_account_id: &str, _token: &UserToken,
_token: Option<&UserToken>,
_keystore: &mut KeyStoreTxn,
) -> Result<(AuthRequest, AuthCredHandler), IdpError> { ) -> Result<(AuthRequest, AuthCredHandler), IdpError> {
// Not sure that I need to do much here?
Ok((AuthRequest::Password, AuthCredHandler::Password)) Ok((AuthRequest::Password, AuthCredHandler::Password))
} }
async fn unix_user_offline_auth_step( async fn unix_user_offline_auth_step(
&self, &self,
_account_id: &str, token: &UserToken,
_token: &UserToken, cred_handler: &mut AuthCredHandler,
_cred_handler: &mut AuthCredHandler, pam_next_req: PamAuthRequest,
_pam_next_req: PamAuthRequest, tpm: &mut tpm::BoxedDynTpm,
_keystore: &mut KeyStoreTxn,
_tpm: &mut tpm::BoxedDynTpm,
_machine_key: &tpm::MachineKey,
_online_at_init: bool,
) -> Result<AuthResult, IdpError> { ) -> Result<AuthResult, IdpError> {
// We need any cached credentials here. match (cred_handler, pam_next_req) {
Err(IdpError::BadRequest) (AuthCredHandler::Password, PamAuthRequest::Password { cred }) => {
let inner = self.inner.lock().await;
if token.kanidm_check_cached_password(cred.as_str(), tpm, &inner.hmac_key) {
// TODO: We can update the token here and then do lockouts.
Ok(AuthResult::Success {
token: token.clone(),
})
} else {
Ok(AuthResult::Denied)
}
}
(
AuthCredHandler::DeviceAuthorizationGrant,
PamAuthRequest::DeviceAuthorizationGrant { .. },
) => {
error!("DeviceAuthorizationGrant not implemented!");
Err(IdpError::BadRequest)
}
_ => {
error!("invalid authentication request state");
Err(IdpError::BadRequest)
}
}
} }
async fn unix_group_get( async fn unix_group_get(
&self, &self,
id: &Id, id: &Id,
_tpm: &mut tpm::BoxedDynTpm, tpm: &mut tpm::BoxedDynTpm,
) -> Result<GroupToken, IdpError> { now: SystemTime,
match self ) -> Result<GroupTokenState, IdpError> {
let mut inner = self.inner.lock().await;
if !inner.check_online(tpm, now).await {
// We are offline, return that we should use a cached token.
return Ok(GroupTokenState::UseCached);
}
match inner
.client .client
.read()
.await
.idm_group_unix_token_get(id.to_string().as_str()) .idm_group_unix_token_get(id.to_string().as_str())
.await .await
{ {
Ok(tok) => Ok(GroupToken::from(tok)), Ok(tok) => {
Err(ClientError::Transport(err)) => { let gt = GroupToken::from(tok);
error!(?err); Ok(GroupTokenState::Update(gt))
Err(IdpError::Transport)
} }
// Offline?
Err(ClientError::Transport(err)) => {
error!(?err, "transport error");
inner.state = CacheState::OfflineNextCheck(now + OFFLINE_NEXT_CHECK);
Ok(GroupTokenState::UseCached)
}
// Provider session error, need to re-auth
Err(ClientError::Http(StatusCode::UNAUTHORIZED, reason, opid)) => { Err(ClientError::Http(StatusCode::UNAUTHORIZED, reason, opid)) => {
match reason { match reason {
Some(OperationError::NotAuthenticated) => warn!( Some(OperationError::NotAuthenticated) => warn!(
@ -353,8 +570,10 @@ impl IdProvider for KanidmProvider {
e, opid e, opid
), ),
}; };
Err(IdpError::ProviderUnauthorised) inner.state = CacheState::OfflineNextCheck(now + OFFLINE_NEXT_CHECK);
Ok(GroupTokenState::UseCached)
} }
// 404 / Removed.
Err(ClientError::Http( Err(ClientError::Http(
StatusCode::BAD_REQUEST, StatusCode::BAD_REQUEST,
Some(OperationError::NoMatchingEntries), Some(OperationError::NoMatchingEntries),
@ -372,10 +591,11 @@ impl IdProvider for KanidmProvider {
)) => { )) => {
debug!( debug!(
?opid, ?opid,
"entry has been removed or is no longer a valid posix group" "entry has been removed or is no longer a valid posix account"
); );
Err(IdpError::NotFound) Ok(GroupTokenState::NotFound)
} }
// Something is really wrong? We did get a response though, so we are still online.
Err(err) => { Err(err) => {
error!(?err, "client error"); error!(?err, "client error");
Err(IdpError::BadRequest) Err(IdpError::BadRequest)

View file

@ -1,2 +1,3 @@
pub mod interface; pub mod interface;
pub mod kanidm; pub mod kanidm;
pub mod system;

View file

@ -0,0 +1,126 @@
use hashbrown::HashMap;
use std::sync::Arc;
use tokio::sync::Mutex;
use super::interface::{Id, IdpError};
use kanidm_unix_common::unix_passwd::{EtcGroup, EtcUser};
use kanidm_unix_common::unix_proto::{NssGroup, NssUser};
pub struct SystemProviderInternal {
users: HashMap<Id, Arc<EtcUser>>,
user_list: Vec<Arc<EtcUser>>,
groups: HashMap<Id, Arc<EtcGroup>>,
group_list: Vec<Arc<EtcGroup>>,
}
pub struct SystemProvider {
inner: Mutex<SystemProviderInternal>,
}
impl SystemProvider {
pub fn new() -> Result<Self, IdpError> {
Ok(SystemProvider {
inner: Mutex::new(SystemProviderInternal {
users: Default::default(),
user_list: Default::default(),
groups: Default::default(),
group_list: Default::default(),
}),
})
}
pub async fn reload(&self, users: Vec<EtcUser>, groups: Vec<EtcGroup>) {
let mut system_ids_txn = self.inner.lock().await;
system_ids_txn.users.clear();
system_ids_txn.user_list.clear();
system_ids_txn.groups.clear();
system_ids_txn.group_list.clear();
for group in groups {
let name = Id::Name(group.name.clone());
let gid = Id::Gid(group.gid);
let group = Arc::new(group);
if system_ids_txn.groups.insert(name, group.clone()).is_some() {
error!(name = %group.name, gid = %group.gid, "group name conflict");
};
if system_ids_txn.groups.insert(gid, group.clone()).is_some() {
error!(name = %group.name, gid = %group.gid, "group id conflict");
}
system_ids_txn.group_list.push(group);
}
for user in users {
let name = Id::Name(user.name.clone());
let uid = Id::Gid(user.uid);
let gid = Id::Gid(user.gid);
if user.uid != user.gid {
error!(name = %user.name, uid = %user.uid, gid = %user.gid, "user uid and gid are not the same, this may be a security risk!");
}
// Security checks.
if let Some(group) = system_ids_txn.groups.get(&gid) {
if group.name != user.name {
error!(name = %user.name, uid = %user.uid, gid = %user.gid, "user private group does not appear to have the same name as the user, this may be a security risk!");
}
if !(group.members.is_empty()
|| (group.members.len() == 1 && group.members.first() == Some(&user.name)))
{
error!(name = %user.name, uid = %user.uid, gid = %user.gid, "user private group must not have members, THIS IS A SECURITY RISK!");
}
} else {
info!(name = %user.name, uid = %user.uid, gid = %user.gid, "user private group is not present on system, synthesising it");
let group = Arc::new(EtcGroup {
name: user.name.clone(),
password: String::new(),
gid: user.gid,
members: vec![user.name.clone()],
});
system_ids_txn.groups.insert(name.clone(), group.clone());
system_ids_txn.groups.insert(gid.clone(), group.clone());
system_ids_txn.group_list.push(group);
}
let user = Arc::new(user);
if system_ids_txn.users.insert(name, user.clone()).is_some() {
error!(name = %user.name, uid = %user.uid, "user name conflict");
}
if system_ids_txn.users.insert(uid, user.clone()).is_some() {
error!(name = %user.name, uid = %user.uid, "user id conflict");
}
system_ids_txn.user_list.push(user);
}
}
pub async fn contains_account(&self, account_id: &Id) -> bool {
let inner = self.inner.lock().await;
inner.users.contains_key(account_id)
}
pub async fn contains_group(&self, account_id: &Id) -> bool {
let inner = self.inner.lock().await;
inner.groups.contains_key(account_id)
}
pub async fn get_nssaccount(&self, account_id: &Id) -> Option<NssUser> {
let inner = self.inner.lock().await;
inner.users.get(account_id).map(NssUser::from)
}
pub async fn get_nssaccounts(&self) -> Vec<NssUser> {
let inner = self.inner.lock().await;
inner.user_list.iter().map(NssUser::from).collect()
}
pub async fn get_nssgroup(&self, grp_id: &Id) -> Option<NssGroup> {
let inner = self.inner.lock().await;
inner.groups.get(grp_id).map(NssGroup::from)
}
pub async fn get_nssgroups(&self) -> Vec<NssGroup> {
let inner = self.inner.lock().await;
inner.group_list.iter().map(NssGroup::from).collect()
}
}

File diff suppressed because it is too large Load diff

View file

@ -2,7 +2,8 @@
use std::future::Future; use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::sync::atomic::Ordering; use std::sync::atomic::Ordering;
use std::time::Duration; use std::sync::Arc;
use std::time::{Duration, SystemTime};
use kanidm_client::{KanidmClient, KanidmClientBuilder}; use kanidm_client::{KanidmClient, KanidmClientBuilder};
use kanidm_proto::constants::ATTR_ACCOUNT_EXPIRE; use kanidm_proto::constants::ATTR_ACCOUNT_EXPIRE;
@ -10,9 +11,11 @@ use kanidm_unix_common::constants::{
DEFAULT_GID_ATTR_MAP, DEFAULT_HOME_ALIAS, DEFAULT_HOME_ATTR, DEFAULT_HOME_PREFIX, DEFAULT_GID_ATTR_MAP, DEFAULT_HOME_ALIAS, DEFAULT_HOME_ATTR, DEFAULT_HOME_PREFIX,
DEFAULT_SHELL, DEFAULT_UID_ATTR_MAP, DEFAULT_SHELL, DEFAULT_UID_ATTR_MAP,
}; };
use kanidm_unix_common::unix_passwd::{EtcGroup, EtcUser};
use kanidm_unix_resolver::db::{Cache, Db}; use kanidm_unix_resolver::db::{Cache, Db};
use kanidm_unix_resolver::idprovider::interface::Id; use kanidm_unix_resolver::idprovider::interface::Id;
use kanidm_unix_resolver::idprovider::kanidm::KanidmProvider; use kanidm_unix_resolver::idprovider::kanidm::KanidmProvider;
use kanidm_unix_resolver::idprovider::system::SystemProvider;
use kanidm_unix_resolver::resolver::Resolver; use kanidm_unix_resolver::resolver::Resolver;
use kanidmd_core::config::{Configuration, IntegrationTestConfig, ServerRole}; use kanidmd_core::config::{Configuration, IntegrationTestConfig, ServerRole};
use kanidmd_core::create_server_core; use kanidmd_core::create_server_core;
@ -101,18 +104,13 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) {
.build() .build()
.expect("Failed to build client"); .expect("Failed to build client");
let idprovider = KanidmProvider::new(rsclient);
let db = Db::new( let db = Db::new(
"", // The sqlite db path, this is in memory. "", // The sqlite db path, this is in memory.
) )
.expect("Failed to setup DB"); .expect("Failed to setup DB");
let mut dbtxn = db.write().await; let mut dbtxn = db.write().await;
dbtxn dbtxn.migrate().expect("Unable to migrate cache db");
.migrate()
.and_then(|_| dbtxn.commit())
.expect("Unable to migrate cache db");
let mut hsm = BoxedDynTpm::new(SoftTpm::new()); let mut hsm = BoxedDynTpm::new(SoftTpm::new());
@ -123,11 +121,26 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) {
.machine_key_load(&auth_value, &loadable_machine_key) .machine_key_load(&auth_value, &loadable_machine_key)
.unwrap(); .unwrap();
let system_provider = SystemProvider::new().unwrap();
let idprovider = KanidmProvider::new(
rsclient,
SystemTime::now(),
&mut (&mut dbtxn).into(),
&mut hsm,
&machine_key,
)
.unwrap();
drop(machine_key);
dbtxn.commit().expect("Unable to commit dbtxn");
let cachelayer = Resolver::new( let cachelayer = Resolver::new(
db, db,
Box::new(idprovider), Arc::new(system_provider),
Arc::new(idprovider),
hsm, hsm,
machine_key,
300, 300,
vec!["allowed_group".to_string()], vec!["allowed_group".to_string()],
DEFAULT_SHELL.to_string(), DEFAULT_SHELL.to_string(),
@ -136,7 +149,6 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) {
DEFAULT_HOME_ALIAS, DEFAULT_HOME_ALIAS,
DEFAULT_UID_ATTR_MAP, DEFAULT_UID_ATTR_MAP,
DEFAULT_GID_ATTR_MAP, DEFAULT_GID_ATTR_MAP,
vec!["masked_group".to_string()],
) )
.await .await
.expect("Failed to build cache layer."); .expect("Failed to build cache layer.");
@ -231,7 +243,7 @@ async fn test_cache_sshkey() {
assert!(sk.is_empty()); assert!(sk.is_empty());
// Bring ourselves online. // Bring ourselves online.
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
let sk = cachelayer let sk = cachelayer
@ -262,7 +274,7 @@ async fn test_cache_account() {
assert!(ut.is_none()); assert!(ut.is_none());
// go online // go online
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// get the account // get the account
@ -305,7 +317,7 @@ async fn test_cache_group() {
assert!(gt.is_none()); assert!(gt.is_none());
// go online. Get the group // go online. Get the group
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
let gt = cachelayer let gt = cachelayer
.get_nssgroup_name("testgroup1") .get_nssgroup_name("testgroup1")
@ -326,7 +338,7 @@ async fn test_cache_group() {
// clear cache, go online // clear cache, go online
assert!(cachelayer.invalidate().await.is_ok()); assert!(cachelayer.invalidate().await.is_ok());
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// get an account with the group // get an account with the group
@ -361,7 +373,7 @@ async fn test_cache_group() {
async fn test_cache_group_delete() { async fn test_cache_group_delete() {
let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await;
// get the group // get the group
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
let gt = cachelayer let gt = cachelayer
.get_nssgroup_name("testgroup1") .get_nssgroup_name("testgroup1")
@ -395,7 +407,7 @@ async fn test_cache_group_delete() {
async fn test_cache_account_delete() { async fn test_cache_account_delete() {
let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await;
// get the account // get the account
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
let ut = cachelayer let ut = cachelayer
.get_nssaccount_name("testaccount1") .get_nssaccount_name("testaccount1")
@ -435,7 +447,7 @@ async fn test_cache_account_delete() {
#[tokio::test] #[tokio::test]
async fn test_cache_account_password() { async fn test_cache_account_password() {
let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await;
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
// Test authentication failure. // Test authentication failure.
let a1 = cachelayer let a1 = cachelayer
.pam_account_authenticate("testaccount1", TESTACCOUNT1_PASSWORD_INC) .pam_account_authenticate("testaccount1", TESTACCOUNT1_PASSWORD_INC)
@ -513,7 +525,7 @@ async fn test_cache_account_password() {
assert!(a7.is_none()); assert!(a7.is_none());
// go online // go online
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// test auth success // test auth success
@ -527,7 +539,7 @@ async fn test_cache_account_password() {
#[tokio::test] #[tokio::test]
async fn test_cache_account_pam_allowed() { async fn test_cache_account_pam_allowed() {
let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await;
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
// Should fail // Should fail
let a1 = cachelayer let a1 = cachelayer
@ -559,7 +571,7 @@ async fn test_cache_account_pam_allowed() {
#[tokio::test] #[tokio::test]
async fn test_cache_account_pam_nonexist() { async fn test_cache_account_pam_nonexist() {
let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await;
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
let a1 = cachelayer let a1 = cachelayer
.pam_account_allowed("NO_SUCH_ACCOUNT") .pam_account_allowed("NO_SUCH_ACCOUNT")
@ -591,7 +603,7 @@ async fn test_cache_account_pam_nonexist() {
#[tokio::test] #[tokio::test]
async fn test_cache_account_expiry() { async fn test_cache_account_expiry() {
let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, adminclient) = setup_test(fixture(test_fixture)).await;
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// We need one good auth first to prime the cache with a hash. // We need one good auth first to prime the cache with a hash.
@ -636,12 +648,13 @@ async fn test_cache_account_expiry() {
// go offline // go offline
cachelayer.mark_offline().await; cachelayer.mark_offline().await;
// Now, check again ... // Now, check again. Since this uses the cached pw and we are offline, this
// will now succeed.
let a4 = cachelayer let a4 = cachelayer
.pam_account_authenticate("testaccount1", TESTACCOUNT1_PASSWORD_A) .pam_account_authenticate("testaccount1", TESTACCOUNT1_PASSWORD_A)
.await .await
.expect("failed to authenticate"); .expect("failed to authenticate");
assert!(a4 == Some(false)); assert!(a4 == Some(true));
// ssh keys should be empty // ssh keys should be empty
let sk = cachelayer let sk = cachelayer
@ -661,7 +674,7 @@ async fn test_cache_account_expiry() {
#[tokio::test] #[tokio::test]
async fn test_cache_nxcache() { async fn test_cache_nxcache() {
let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await;
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// Is it in the nxcache? // Is it in the nxcache?
@ -737,20 +750,22 @@ async fn test_cache_nxset_account() {
// Important! This is what sets up that testaccount1 won't be resolved // Important! This is what sets up that testaccount1 won't be resolved
// because it's in the "local" user set. // because it's in the "local" user set.
cachelayer cachelayer
.reload_nxset(vec![("testaccount1".to_string(), 20000)].into_iter()) .reload_system_identities(
vec![EtcUser {
name: "testaccount1".to_string(),
uid: 30000,
gid: 30000,
password: Default::default(),
gecos: Default::default(),
homedir: Default::default(),
shell: Default::default(),
}],
vec![],
)
.await; .await;
// Force offline. Show we have no account
cachelayer.mark_offline().await;
let ut = cachelayer
.get_nssaccount_name("testaccount1")
.await
.expect("Failed to get from cache");
assert!(ut.is_none());
// go online // go online
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// get the account // get the account
@ -758,7 +773,10 @@ async fn test_cache_nxset_account() {
.get_nssaccount_name("testaccount1") .get_nssaccount_name("testaccount1")
.await .await
.expect("Failed to get from cache"); .expect("Failed to get from cache");
assert!(ut.is_none());
let ut = ut.unwrap();
// Assert the user is the system version.
assert_eq!(ut.uid, 30000);
// go offline // go offline
cachelayer.mark_offline().await; cachelayer.mark_offline().await;
@ -768,14 +786,24 @@ async fn test_cache_nxset_account() {
.get_nssaccount_name("testaccount1") .get_nssaccount_name("testaccount1")
.await .await
.expect("Failed to get from cache"); .expect("Failed to get from cache");
assert!(ut.is_none());
// Finally, check it's not in all accounts. let ut = ut.unwrap();
// Assert the user is the system version.
assert_eq!(ut.uid, 30000);
// Finally, check it's the system version in all accounts.
let us = cachelayer let us = cachelayer
.get_nssaccounts() .get_nssaccounts()
.await .await
.expect("failed to list all accounts"); .expect("failed to list all accounts");
assert!(us.is_empty());
let us: Vec<_> = us
.into_iter()
.filter(|nss_user| nss_user.name == "testaccount1")
.collect();
assert_eq!(us.len(), 1);
assert_eq!(us[0].gid, 30000);
} }
#[tokio::test] #[tokio::test]
@ -785,25 +813,30 @@ async fn test_cache_nxset_group() {
// Important! This is what sets up that testgroup1 won't be resolved // Important! This is what sets up that testgroup1 won't be resolved
// because it's in the "local" group set. // because it's in the "local" group set.
cachelayer cachelayer
.reload_nxset(vec![("testgroup1".to_string(), 20001)].into_iter()) .reload_system_identities(
vec![],
vec![EtcGroup {
name: "testgroup1".to_string(),
// Important! We set the GID to differ from what kanidm stores so we can
// tell we got the system version.
gid: 30001,
password: Default::default(),
members: Default::default(),
}],
)
.await; .await;
// Force offline. Show we have no groups.
cachelayer.mark_offline().await;
let gt = cachelayer
.get_nssgroup_name("testgroup1")
.await
.expect("Failed to get from cache");
assert!(gt.is_none());
// go online. Get the group // go online. Get the group
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
let gt = cachelayer let gt = cachelayer
.get_nssgroup_name("testgroup1") .get_nssgroup_name("testgroup1")
.await .await
.expect("Failed to get from cache"); .expect("Failed to get from cache");
assert!(gt.is_none());
// We get the group, it's the system version. Check the gid.
let gt = gt.unwrap();
assert_eq!(gt.gid, 30001);
// go offline. still works // go offline. still works
cachelayer.mark_offline().await; cachelayer.mark_offline().await;
@ -811,15 +844,16 @@ async fn test_cache_nxset_group() {
.get_nssgroup_name("testgroup1") .get_nssgroup_name("testgroup1")
.await .await
.expect("Failed to get from cache"); .expect("Failed to get from cache");
assert!(gt.is_none());
let gt = gt.unwrap();
assert_eq!(gt.gid, 30001);
// clear cache, go online // clear cache, go online
assert!(cachelayer.invalidate().await.is_ok()); assert!(cachelayer.invalidate().await.is_ok());
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// get an account with the group // get a kanidm account with the kanidm equivalent group
// DO NOT get the group yet.
let ut = cachelayer let ut = cachelayer
.get_nssaccount_name("testaccount1") .get_nssaccount_name("testaccount1")
.await .await
@ -829,56 +863,31 @@ async fn test_cache_nxset_group() {
// go offline. // go offline.
cachelayer.mark_offline().await; cachelayer.mark_offline().await;
// show we have the group despite no direct calls // show that the group we have is still the system version, and lacks our
// member.
let gt = cachelayer let gt = cachelayer
.get_nssgroup_name("testgroup1") .get_nssgroup_name("testgroup1")
.await .await
.expect("Failed to get from cache"); .expect("Failed to get from cache");
assert!(gt.is_none());
// Finally, check we only have the upg in the list let gt = gt.unwrap();
assert_eq!(gt.gid, 30001);
assert!(gt.members.is_empty());
// Finally, check we only have the system group version in the list.
let gs = cachelayer let gs = cachelayer
.get_nssgroups() .get_nssgroups()
.await .await
.expect("failed to list all groups"); .expect("failed to list all groups");
assert!(gs.len() == 1);
assert!(gs[0].name == "testaccount1@idm.example.com");
}
#[tokio::test] let gs: Vec<_> = gs
async fn test_cache_nxset_allow_overrides() { .into_iter()
let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await; .filter(|nss_group| nss_group.name == "testgroup1")
.collect();
// Important! masked_group is set as an allowed override group even though debug!("{:?}", gs);
// it's been "inserted" to the nxset. This means it will still resolve! assert_eq!(gs.len(), 1);
cachelayer assert_eq!(gs[0].gid, 30001);
.reload_nxset(vec![("masked_group".to_string(), 20003)].into_iter())
.await;
// Force offline. Show we have no groups.
cachelayer.mark_offline().await;
let gt = cachelayer
.get_nssgroup_name("masked_group")
.await
.expect("Failed to get from cache");
assert!(gt.is_none());
// go online. Get the group
cachelayer.attempt_online().await;
assert!(cachelayer.test_connection().await);
let gt = cachelayer
.get_nssgroup_name("masked_group")
.await
.expect("Failed to get from cache");
assert!(gt.is_some());
// go offline. still works
cachelayer.mark_offline().await;
let gt = cachelayer
.get_nssgroup_name("masked_group")
.await
.expect("Failed to get from cache");
assert!(gt.is_some());
} }
/// Issue 1830. If cache items expire where we have an account and a group, and we /// Issue 1830. If cache items expire where we have an account and a group, and we
@ -892,7 +901,7 @@ async fn test_cache_nxset_allow_overrides() {
async fn test_cache_group_fk_deferred() { async fn test_cache_group_fk_deferred() {
let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await; let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await;
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// Get the account then the group. // Get the account then the group.
@ -912,7 +921,7 @@ async fn test_cache_group_fk_deferred() {
// Invalidate all items. // Invalidate all items.
cachelayer.mark_offline().await; cachelayer.mark_offline().await;
assert!(cachelayer.invalidate().await.is_ok()); assert!(cachelayer.invalidate().await.is_ok());
cachelayer.attempt_online().await; cachelayer.mark_next_check_now(SystemTime::now()).await;
assert!(cachelayer.test_connection().await); assert!(cachelayer.test_connection().await);
// Get the *group*. It *should* still have it's members. // Get the *group*. It *should* still have it's members.