mirror of
https://github.com/kanidm/kanidm.git
synced 2025-05-06 09:05:04 +02:00
Compare commits
4 commits
06a9740ce2
...
0958d05ba3
Author | SHA1 | Date | |
---|---|---|---|
|
0958d05ba3 | ||
|
b113262357 | ||
|
d025e8fff0 | ||
|
ee46216093 |
4
Cargo.lock
generated
4
Cargo.lock
generated
|
@ -5658,9 +5658,9 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tokio"
|
name = "tokio"
|
||||||
version = "1.44.1"
|
version = "1.44.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a"
|
checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"backtrace",
|
"backtrace",
|
||||||
"bytes",
|
"bytes",
|
||||||
|
|
|
@ -268,7 +268,7 @@ tempfile = "3.15.0"
|
||||||
testkit-macros = { path = "./server/testkit-macros" }
|
testkit-macros = { path = "./server/testkit-macros" }
|
||||||
time = { version = "^0.3.36", features = ["formatting", "local-offset"] }
|
time = { version = "^0.3.36", features = ["formatting", "local-offset"] }
|
||||||
|
|
||||||
tokio = "^1.43.0"
|
tokio = "^1.44.2"
|
||||||
tokio-openssl = "^0.6.5"
|
tokio-openssl = "^0.6.5"
|
||||||
tokio-util = "^0.7.13"
|
tokio-util = "^0.7.13"
|
||||||
|
|
||||||
|
|
|
@ -84,8 +84,11 @@
|
||||||
- [Cryptography Key Domains (2024)](developers/designs/cryptography_key_domains.md)
|
- [Cryptography Key Domains (2024)](developers/designs/cryptography_key_domains.md)
|
||||||
- [Domain Join - Machine Accounts](developers/designs/domain_join_machine_accounts.md)
|
- [Domain Join - Machine Accounts](developers/designs/domain_join_machine_accounts.md)
|
||||||
- [Elevated Priv Mode](developers/designs/elevated_priv_mode.md)
|
- [Elevated Priv Mode](developers/designs/elevated_priv_mode.md)
|
||||||
|
- [Ephemeral Entries](developers/designs/ephemeral_entries.md)
|
||||||
- [OAuth2 Device Flow](developers/designs/oauth2_device_flow.md)
|
- [OAuth2 Device Flow](developers/designs/oauth2_device_flow.md)
|
||||||
- [OAuth2 Refresh Tokens](developers/designs/oauth2_refresh_tokens.md)
|
- [OAuth2 Refresh Tokens](developers/designs/oauth2_refresh_tokens.md)
|
||||||
|
- [SubEntries (2024)](developers/designs/subentries.md)
|
||||||
|
- [Schema (2024)](developers/designs/schema.md)
|
||||||
- [Replication Coordinator](developers/designs/replication_coordinator.md)
|
- [Replication Coordinator](developers/designs/replication_coordinator.md)
|
||||||
- [Replication Design and Notes](developers/designs/replication_design_and_notes.md)
|
- [Replication Design and Notes](developers/designs/replication_design_and_notes.md)
|
||||||
- [REST Interface](developers/designs/rest_interface.md)
|
- [REST Interface](developers/designs/rest_interface.md)
|
||||||
|
|
24
book/src/developers/designs/ephemeral_entries.md
Normal file
24
book/src/developers/designs/ephemeral_entries.md
Normal file
|
@ -0,0 +1,24 @@
|
||||||
|
# Ephemeral Entries
|
||||||
|
|
||||||
|
We have a number of data types and entries that may need to be automatically deleted
|
||||||
|
after some time window has past. This could be an event notification, a group for a
|
||||||
|
temporary group membership, a session token, or more.
|
||||||
|
|
||||||
|
To achieve this we need a way to mark entries as ephemeral. After a set time has past
|
||||||
|
the entry will be automatically deleted.
|
||||||
|
|
||||||
|
## Class
|
||||||
|
|
||||||
|
A new class `EphemeralObject` will be added. It will have a must attribute of `removedAt`
|
||||||
|
which will contain a time at which the entry will be deleted.
|
||||||
|
|
||||||
|
## Automatic Deletion
|
||||||
|
|
||||||
|
A new interval task similar to the recycle/tombstone tasks will be added that checks for
|
||||||
|
and deletes ephemeral objects once removedAt has past.
|
||||||
|
|
||||||
|
## Ordering Index
|
||||||
|
|
||||||
|
To make this effecient we should consider addition of an "ordering" index on the `removedAt`
|
||||||
|
attribute to improve searching for these. Initially this won't be needed as there will be
|
||||||
|
very few of these, but it should be added in future.
|
47
book/src/developers/designs/schema.md
Normal file
47
book/src/developers/designs/schema.md
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
# Schema Changes 2024 / 2025
|
||||||
|
|
||||||
|
Our current schema structure has served us very well, remaining almost unchanged since nearl 2018.
|
||||||
|
|
||||||
|
The current design is a heavily adapted LDAP/AD style structure with classes that define a set
|
||||||
|
of may and must attributes, and attributes that define properties like single value, multivalue,
|
||||||
|
the types of indexes to apply, and the syntax of the attribute.
|
||||||
|
|
||||||
|
However, after 6 years we are starting to finally run into some limits.
|
||||||
|
|
||||||
|
## Proposed Changes
|
||||||
|
|
||||||
|
### Removal of Multivalue
|
||||||
|
|
||||||
|
We currently have many types that have to be multivalue capable out of syntax compliance but are never
|
||||||
|
actually made to be multivalue types. This creates overheads in the server but also in how we code
|
||||||
|
the valuesets themself.
|
||||||
|
|
||||||
|
The multivalue type should be removed. The syntax should imply if the type is single or multivalue.
|
||||||
|
For example, bool is always single value. utf8 is single value. utf8strings is multivalue.
|
||||||
|
|
||||||
|
This allows consistent handling with SCIM which has separate handling of multi/single value types.
|
||||||
|
|
||||||
|
### Indexing
|
||||||
|
|
||||||
|
Currently we have a number of indexing flags like equality, substring, presence. In the future we
|
||||||
|
would like to add ordering. However, these don't make sense on all types. How do you "order" certificates?
|
||||||
|
How do you "substring" an integer? How do you perform equality on two passkeys?
|
||||||
|
|
||||||
|
To resolve this schema should indicate a boolean for "indexed" or not based on if the value will be
|
||||||
|
queried. The syntax will then imply the class of indexes that will be emitted for the type.
|
||||||
|
|
||||||
|
### Migration Behaviour
|
||||||
|
|
||||||
|
Certain attributes for internal server migrations need to have their content asserted, merged, or
|
||||||
|
ignored. This behaviour should be flagged in the schema to make it more consistent and visible how
|
||||||
|
these types will be affected during a migration, and to prevent human error.
|
||||||
|
|
||||||
|
### SubAttributes and SubAttribute Syntax
|
||||||
|
|
||||||
|
SCIM allows complex structure types to exist. We could consider a schema syntax to allow generic
|
||||||
|
structures of these based on a set of limited and restricted SubAttributes. For example we might
|
||||||
|
have a SubAttribute of "Mail" and it allows two SubAttributeValues of "value": email, and "primary": bool.
|
||||||
|
|
||||||
|
We would need more thought here about this, and it's likely it's own whole separate topic including
|
||||||
|
how to handle it with access controls.
|
||||||
|
|
131
book/src/developers/designs/subentries.md
Normal file
131
book/src/developers/designs/subentries.md
Normal file
|
@ -0,0 +1,131 @@
|
||||||
|
# Sub-Entries
|
||||||
|
|
||||||
|
As Kanidm has grown we have encountered issues with growing complexity of values and valuesets. These
|
||||||
|
can be hard to create and add, they touch a lot of the codebase, and they add complexity to new
|
||||||
|
features or changes.
|
||||||
|
|
||||||
|
These complex valueset types (such as authsession, oauth2session, application passwords) arose out
|
||||||
|
of a need to have data associated to an account, but that data required structure and nesting
|
||||||
|
of certain components.
|
||||||
|
|
||||||
|
Rather than continue to add more complex and unwieldy valuesets, we need a way to create entries
|
||||||
|
that refer to others.
|
||||||
|
|
||||||
|
## Existing Referential Code
|
||||||
|
|
||||||
|
The existing referential integrity code is designed to ensure that values from one entry are removed
|
||||||
|
cleanly if the referenced entry is deleted. As an example, a group with a member "ellie" should have
|
||||||
|
the reference deleted when the entry "ellie" is deleted.
|
||||||
|
|
||||||
|
If the group were deleted, this has no impact on ellie, since the reference is defining a weak
|
||||||
|
relationship - the user is a member of a group.
|
||||||
|
|
||||||
|
## What Is Required
|
||||||
|
|
||||||
|
What we need in a new reference type are the following properties.
|
||||||
|
|
||||||
|
* A sub-entry references an owning entry
|
||||||
|
* A sub-entry is deleted when the owning entry is deleted (aka recycled)
|
||||||
|
* Sub-entries can not exist without a related owning entry
|
||||||
|
* Deletion of the sub-entry does not delete the entry
|
||||||
|
* When an entry is searched, specific types of sub-entries can be fetched at the same time
|
||||||
|
* The owning entry can imply access controls to related sub-entries
|
||||||
|
* Conditional creation of sub-entries and adherence to certain rules (such as, "identity X can create sub-entry Y only if the owning entry is itself/X")
|
||||||
|
* Subentries may have a minimal / flattened representation that can inline to the owning entry via a phantomAttribute
|
||||||
|
|
||||||
|
Properties we can not maintain are
|
||||||
|
|
||||||
|
* An entry has a `must` relationship for a sub-entry to exist
|
||||||
|
* SubEntries may not have SubEntries
|
||||||
|
|
||||||
|
## Example SubEntry
|
||||||
|
|
||||||
|
Auth Sessions, OAuth2 Sessions, ApiTokens, Application Passwords, are examples of candidates to become SubEntries.
|
||||||
|
|
||||||
|
```
|
||||||
|
class: person
|
||||||
|
name: ellie
|
||||||
|
uuid: A
|
||||||
|
|
||||||
|
class: subentry
|
||||||
|
class: authsession
|
||||||
|
SubEntryOf: A
|
||||||
|
sessionStartTime: ...
|
||||||
|
sessionEntTime: ...
|
||||||
|
sessionId: ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Good candidates are structured data that are logically indendent from the owning entry and may not
|
||||||
|
always need presentation with the owning entry. Displaying a person does not always require it's
|
||||||
|
subentries to be displayed.
|
||||||
|
|
||||||
|
## Non-Examples
|
||||||
|
|
||||||
|
Some attributes should not become subentries, generally things with minimal or small structures
|
||||||
|
that benefit from being present on the owning entry for human consumption.
|
||||||
|
|
||||||
|
* Mail
|
||||||
|
* Address
|
||||||
|
* Certificates
|
||||||
|
* Passkeys
|
||||||
|
|
||||||
|
## AccessControls
|
||||||
|
|
||||||
|
Access Controls need to be able to express a relationship between an owner and the subEntry. For
|
||||||
|
example we want rules that can express:
|
||||||
|
|
||||||
|
* Identity X can create an AuthSession where the AuthSession must reference Identity X
|
||||||
|
* `idm_admins` can delete/modify ApiTokens where the owning entries are persons and not members of `idm_high_priv`
|
||||||
|
|
||||||
|
We need to extend the `filter` type to support a `SubEntryOfSelf`. This
|
||||||
|
is similar to the `SelfUUID` type, but rather than expanding to `Uuid(...)` it would expand to
|
||||||
|
`SubEntryOf(...)`. As `create` access controls define that the resultant entry *must* match
|
||||||
|
the target filter, this achieves the goal.
|
||||||
|
|
||||||
|
We also need a new ACP Target Type. This new target type needs two filters - one
|
||||||
|
to express the relationship to the SubEntry, and the other to the relationship of the SubEntryOwner. This
|
||||||
|
would constitute two filters
|
||||||
|
|
||||||
|
```
|
||||||
|
SubEntryTarget: class eq apitokens
|
||||||
|
EntryTarget: person and not memberOf idm_high_priv
|
||||||
|
```
|
||||||
|
|
||||||
|
Both conditions must be met for the access control to apply. In the case of a `create`, the SubEntryTarget
|
||||||
|
is used for assertion of the SubEntry adherence to the filter. SubEntryTarget implies "class eq SubEntry". EntryTarget
|
||||||
|
implies `and not class eq SubEntry`.
|
||||||
|
|
||||||
|
## Search / Access
|
||||||
|
|
||||||
|
How to handle where we need to check the entryTarget if we don't have the entry? Do SubEntries need
|
||||||
|
to auto-dereference and link to their owning entry for filter application?
|
||||||
|
|
||||||
|
If we deref, we need to be careful to avoid ref-count loops, since we would need to embed Arc or Weak
|
||||||
|
references into the results.
|
||||||
|
|
||||||
|
|
||||||
|
Alternately, is this where we need pre-extraction of access controls?
|
||||||
|
|
||||||
|
Could SubEntries only be accessed via their Parent Entry via embedding?
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Deletion
|
||||||
|
|
||||||
|
During a deletion, all deleted entries will also imply the deletion of their SubEntries. These SubEntries
|
||||||
|
will be marked with a flag to distinguish them as an indirect delete.
|
||||||
|
|
||||||
|
## Reviving
|
||||||
|
|
||||||
|
During a revive, a revived entry implies the revival of it's SubEntries that are marked as indirect
|
||||||
|
deleted.
|
||||||
|
|
||||||
|
## Replication / Consistency
|
||||||
|
|
||||||
|
If a SubEntry is created with out an owner, or becomes a orphaned due to a replication conflict of
|
||||||
|
it's owning entry, the SubEntries are deleted.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -194,7 +194,8 @@ impl Into<PamAuthResponse> for AuthRequest {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub enum AuthResult {
|
pub enum AuthResult {
|
||||||
Success { token: UserToken },
|
Success,
|
||||||
|
SuccessUpdate { new_token: UserToken },
|
||||||
Denied,
|
Denied,
|
||||||
Next(AuthRequest),
|
Next(AuthRequest),
|
||||||
}
|
}
|
||||||
|
@ -251,6 +252,7 @@ pub trait IdProvider {
|
||||||
async fn unix_user_online_auth_step(
|
async fn unix_user_online_auth_step(
|
||||||
&self,
|
&self,
|
||||||
_account_id: &str,
|
_account_id: &str,
|
||||||
|
_current_token: Option<&UserToken>,
|
||||||
_cred_handler: &mut AuthCredHandler,
|
_cred_handler: &mut AuthCredHandler,
|
||||||
_pam_next_req: PamAuthRequest,
|
_pam_next_req: PamAuthRequest,
|
||||||
_tpm: &mut tpm::BoxedDynTpm,
|
_tpm: &mut tpm::BoxedDynTpm,
|
||||||
|
@ -290,7 +292,8 @@ pub trait IdProvider {
|
||||||
// TPM key.
|
// TPM key.
|
||||||
async fn unix_user_offline_auth_step(
|
async fn unix_user_offline_auth_step(
|
||||||
&self,
|
&self,
|
||||||
_token: &UserToken,
|
_current_token: Option<&UserToken>,
|
||||||
|
_session_token: &UserToken,
|
||||||
_cred_handler: &mut AuthCredHandler,
|
_cred_handler: &mut AuthCredHandler,
|
||||||
_pam_next_req: PamAuthRequest,
|
_pam_next_req: PamAuthRequest,
|
||||||
_tpm: &mut tpm::BoxedDynTpm,
|
_tpm: &mut tpm::BoxedDynTpm,
|
||||||
|
|
|
@ -55,8 +55,6 @@ impl KanidmProvider {
|
||||||
tpm: &mut tpm::BoxedDynTpm,
|
tpm: &mut tpm::BoxedDynTpm,
|
||||||
machine_key: &tpm::MachineKey,
|
machine_key: &tpm::MachineKey,
|
||||||
) -> Result<Self, IdpError> {
|
) -> Result<Self, IdpError> {
|
||||||
// FUTURE: Randomised jitter on next check at startup.
|
|
||||||
|
|
||||||
// Initially retrieve our HMAC key.
|
// Initially retrieve our HMAC key.
|
||||||
let loadable_hmac_key: Option<tpm::LoadableHmacKey> = keystore
|
let loadable_hmac_key: Option<tpm::LoadableHmacKey> = keystore
|
||||||
.get_tagged_hsm_key(KANIDM_HMAC_KEY)
|
.get_tagged_hsm_key(KANIDM_HMAC_KEY)
|
||||||
|
@ -248,13 +246,25 @@ impl KanidmProviderInternal {
|
||||||
// Proceed
|
// Proceed
|
||||||
CacheState::Online => true,
|
CacheState::Online => true,
|
||||||
CacheState::OfflineNextCheck(at_time) if now >= at_time => {
|
CacheState::OfflineNextCheck(at_time) if now >= at_time => {
|
||||||
// Attempt online. If fails, return token.
|
|
||||||
self.attempt_online(tpm, now).await
|
self.attempt_online(tpm, now).await
|
||||||
}
|
}
|
||||||
CacheState::OfflineNextCheck(_) | CacheState::Offline => false,
|
CacheState::OfflineNextCheck(_) | CacheState::Offline => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[instrument(level = "debug", skip_all)]
|
||||||
|
async fn check_online_right_meow(
|
||||||
|
&mut self,
|
||||||
|
tpm: &mut tpm::BoxedDynTpm,
|
||||||
|
now: SystemTime,
|
||||||
|
) -> bool {
|
||||||
|
match self.state {
|
||||||
|
CacheState::Online => true,
|
||||||
|
CacheState::OfflineNextCheck(_) => self.attempt_online(tpm, now).await,
|
||||||
|
CacheState::Offline => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", skip_all)]
|
#[instrument(level = "debug", skip_all)]
|
||||||
async fn attempt_online(&mut self, _tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
|
async fn attempt_online(&mut self, _tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
|
||||||
let mut max_attempts = 3;
|
let mut max_attempts = 3;
|
||||||
|
@ -295,7 +305,7 @@ impl IdProvider for KanidmProvider {
|
||||||
|
|
||||||
async fn attempt_online(&self, tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
|
async fn attempt_online(&self, tpm: &mut tpm::BoxedDynTpm, now: SystemTime) -> bool {
|
||||||
let mut inner = self.inner.lock().await;
|
let mut inner = self.inner.lock().await;
|
||||||
inner.check_online(tpm, now).await
|
inner.check_online_right_meow(tpm, now).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn mark_next_check(&self, now: SystemTime) {
|
async fn mark_next_check(&self, now: SystemTime) {
|
||||||
|
@ -431,6 +441,7 @@ impl IdProvider for KanidmProvider {
|
||||||
async fn unix_user_online_auth_step(
|
async fn unix_user_online_auth_step(
|
||||||
&self,
|
&self,
|
||||||
account_id: &str,
|
account_id: &str,
|
||||||
|
current_token: Option<&UserToken>,
|
||||||
cred_handler: &mut AuthCredHandler,
|
cred_handler: &mut AuthCredHandler,
|
||||||
pam_next_req: PamAuthRequest,
|
pam_next_req: PamAuthRequest,
|
||||||
tpm: &mut tpm::BoxedDynTpm,
|
tpm: &mut tpm::BoxedDynTpm,
|
||||||
|
@ -449,15 +460,23 @@ impl IdProvider for KanidmProvider {
|
||||||
|
|
||||||
match auth_result {
|
match auth_result {
|
||||||
Ok(Some(n_tok)) => {
|
Ok(Some(n_tok)) => {
|
||||||
let mut token = UserToken::from(n_tok);
|
let mut new_token = UserToken::from(n_tok);
|
||||||
token.kanidm_update_cached_password(
|
|
||||||
|
// Update any keys that may have been in the db in the current
|
||||||
|
// token.
|
||||||
|
if let Some(previous_token) = current_token {
|
||||||
|
new_token.extra_keys = previous_token.extra_keys.clone();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set any new keys that are relevant from this authentication
|
||||||
|
new_token.kanidm_update_cached_password(
|
||||||
&inner.crypto_policy,
|
&inner.crypto_policy,
|
||||||
cred.as_str(),
|
cred.as_str(),
|
||||||
tpm,
|
tpm,
|
||||||
&inner.hmac_key,
|
&inner.hmac_key,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(AuthResult::Success { token })
|
Ok(AuthResult::SuccessUpdate { new_token })
|
||||||
}
|
}
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
// TODO: i'm not a huge fan of this rn, but currently the way we handle
|
// TODO: i'm not a huge fan of this rn, but currently the way we handle
|
||||||
|
@ -552,7 +571,8 @@ impl IdProvider for KanidmProvider {
|
||||||
|
|
||||||
async fn unix_user_offline_auth_step(
|
async fn unix_user_offline_auth_step(
|
||||||
&self,
|
&self,
|
||||||
token: &UserToken,
|
current_token: Option<&UserToken>,
|
||||||
|
session_token: &UserToken,
|
||||||
cred_handler: &mut AuthCredHandler,
|
cred_handler: &mut AuthCredHandler,
|
||||||
pam_next_req: PamAuthRequest,
|
pam_next_req: PamAuthRequest,
|
||||||
tpm: &mut tpm::BoxedDynTpm,
|
tpm: &mut tpm::BoxedDynTpm,
|
||||||
|
@ -561,11 +581,13 @@ impl IdProvider for KanidmProvider {
|
||||||
(AuthCredHandler::Password, PamAuthRequest::Password { cred }) => {
|
(AuthCredHandler::Password, PamAuthRequest::Password { cred }) => {
|
||||||
let inner = self.inner.lock().await;
|
let inner = self.inner.lock().await;
|
||||||
|
|
||||||
if token.kanidm_check_cached_password(cred.as_str(), tpm, &inner.hmac_key) {
|
if session_token.kanidm_check_cached_password(cred.as_str(), tpm, &inner.hmac_key) {
|
||||||
|
// Ensure we have either the latest token, or if none, at least the session token.
|
||||||
|
let new_token = current_token.unwrap_or(session_token).clone();
|
||||||
|
|
||||||
// TODO: We can update the token here and then do lockouts.
|
// TODO: We can update the token here and then do lockouts.
|
||||||
Ok(AuthResult::Success {
|
|
||||||
token: token.clone(),
|
Ok(AuthResult::SuccessUpdate { new_token })
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
Ok(AuthResult::Denied)
|
Ok(AuthResult::Denied)
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,6 @@ pub enum AuthSession {
|
||||||
client: Arc<dyn IdProvider + Sync + Send>,
|
client: Arc<dyn IdProvider + Sync + Send>,
|
||||||
account_id: String,
|
account_id: String,
|
||||||
id: Id,
|
id: Id,
|
||||||
token: Option<Box<UserToken>>,
|
|
||||||
cred_handler: AuthCredHandler,
|
cred_handler: AuthCredHandler,
|
||||||
/// Some authentication operations may need to spawn background tasks. These tasks need
|
/// Some authentication operations may need to spawn background tasks. These tasks need
|
||||||
/// to know when to stop as the caller has disconnected. This receiver allows that, so
|
/// to know when to stop as the caller has disconnected. This receiver allows that, so
|
||||||
|
@ -59,7 +58,7 @@ pub enum AuthSession {
|
||||||
account_id: String,
|
account_id: String,
|
||||||
id: Id,
|
id: Id,
|
||||||
client: Arc<dyn IdProvider + Sync + Send>,
|
client: Arc<dyn IdProvider + Sync + Send>,
|
||||||
token: Box<UserToken>,
|
session_token: Box<UserToken>,
|
||||||
cred_handler: AuthCredHandler,
|
cred_handler: AuthCredHandler,
|
||||||
},
|
},
|
||||||
System {
|
System {
|
||||||
|
@ -225,7 +224,7 @@ impl Resolver {
|
||||||
// Attempt to search these in the db.
|
// Attempt to search these in the db.
|
||||||
let mut dbtxn = self.db.write().await;
|
let mut dbtxn = self.db.write().await;
|
||||||
let r = dbtxn.get_account(account_id).map_err(|err| {
|
let r = dbtxn.get_account(account_id).map_err(|err| {
|
||||||
debug!("get_cached_usertoken {:?}", err);
|
debug!(?err, "get_cached_usertoken");
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
drop(dbtxn);
|
drop(dbtxn);
|
||||||
|
@ -318,7 +317,12 @@ impl Resolver {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn set_cache_usertoken(&self, token: &mut UserToken) -> Result<(), ()> {
|
async fn set_cache_usertoken(
|
||||||
|
&self,
|
||||||
|
token: &mut UserToken,
|
||||||
|
// This is just for proof that only one write can occur at a time.
|
||||||
|
_tpm: &mut BoxedDynTpm,
|
||||||
|
) -> Result<(), ()> {
|
||||||
// Set an expiry
|
// Set an expiry
|
||||||
let ex_time = SystemTime::now() + Duration::from_secs(self.timeout_seconds);
|
let ex_time = SystemTime::now() + Duration::from_secs(self.timeout_seconds);
|
||||||
let offset = ex_time
|
let offset = ex_time
|
||||||
|
@ -451,6 +455,22 @@ impl Resolver {
|
||||||
|
|
||||||
let mut hsm_lock = self.hsm.lock().await;
|
let mut hsm_lock = self.hsm.lock().await;
|
||||||
|
|
||||||
|
// We need to re-acquire the token now behind the hsmlock - this is so that
|
||||||
|
// we know that as we write the updated token, we know that no one else has
|
||||||
|
// written to this token, since we are now the only task that is allowed
|
||||||
|
// to be in a write phase.
|
||||||
|
let token = if token.is_some() {
|
||||||
|
self.get_cached_usertoken(account_id)
|
||||||
|
.await
|
||||||
|
.map(|(_expired, option_token)| option_token)
|
||||||
|
.map_err(|err| {
|
||||||
|
debug!(?err, "get_usertoken error");
|
||||||
|
})?
|
||||||
|
} else {
|
||||||
|
// Was already none, leave it that way.
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
let user_get_result = if let Some(tok) = token.as_ref() {
|
let user_get_result = if let Some(tok) = token.as_ref() {
|
||||||
// Re-use the provider that the token is from.
|
// Re-use the provider that the token is from.
|
||||||
match self.client_ids.get(&tok.provider) {
|
match self.client_ids.get(&tok.provider) {
|
||||||
|
@ -486,12 +506,11 @@ impl Resolver {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
drop(hsm_lock);
|
|
||||||
|
|
||||||
match user_get_result {
|
match user_get_result {
|
||||||
Ok(UserTokenState::Update(mut n_tok)) => {
|
Ok(UserTokenState::Update(mut n_tok)) => {
|
||||||
// We have the token!
|
// We have the token!
|
||||||
self.set_cache_usertoken(&mut n_tok).await?;
|
self.set_cache_usertoken(&mut n_tok, hsm_lock.deref_mut())
|
||||||
|
.await?;
|
||||||
Ok(Some(n_tok))
|
Ok(Some(n_tok))
|
||||||
}
|
}
|
||||||
Ok(UserTokenState::NotFound) => {
|
Ok(UserTokenState::NotFound) => {
|
||||||
|
@ -958,7 +977,6 @@ impl Resolver {
|
||||||
client,
|
client,
|
||||||
account_id: account_id.to_string(),
|
account_id: account_id.to_string(),
|
||||||
id,
|
id,
|
||||||
token: Some(Box::new(token)),
|
|
||||||
cred_handler,
|
cred_handler,
|
||||||
shutdown_rx,
|
shutdown_rx,
|
||||||
};
|
};
|
||||||
|
@ -979,7 +997,7 @@ impl Resolver {
|
||||||
account_id: account_id.to_string(),
|
account_id: account_id.to_string(),
|
||||||
id,
|
id,
|
||||||
client,
|
client,
|
||||||
token: Box::new(token),
|
session_token: Box::new(token),
|
||||||
cred_handler,
|
cred_handler,
|
||||||
};
|
};
|
||||||
Ok((auth_session, next_req.into()))
|
Ok((auth_session, next_req.into()))
|
||||||
|
@ -1022,7 +1040,6 @@ impl Resolver {
|
||||||
client: client.clone(),
|
client: client.clone(),
|
||||||
account_id: account_id.to_string(),
|
account_id: account_id.to_string(),
|
||||||
id,
|
id,
|
||||||
token: None,
|
|
||||||
cred_handler,
|
cred_handler,
|
||||||
shutdown_rx,
|
shutdown_rx,
|
||||||
};
|
};
|
||||||
|
@ -1050,19 +1067,32 @@ impl Resolver {
|
||||||
auth_session: &mut AuthSession,
|
auth_session: &mut AuthSession,
|
||||||
pam_next_req: PamAuthRequest,
|
pam_next_req: PamAuthRequest,
|
||||||
) -> Result<PamAuthResponse, ()> {
|
) -> Result<PamAuthResponse, ()> {
|
||||||
|
let mut hsm_lock = self.hsm.lock().await;
|
||||||
|
|
||||||
let maybe_err = match &mut *auth_session {
|
let maybe_err = match &mut *auth_session {
|
||||||
&mut AuthSession::Online {
|
&mut AuthSession::Online {
|
||||||
ref client,
|
ref client,
|
||||||
ref account_id,
|
ref account_id,
|
||||||
id: _,
|
ref id,
|
||||||
token: _,
|
|
||||||
ref mut cred_handler,
|
ref mut cred_handler,
|
||||||
ref shutdown_rx,
|
ref shutdown_rx,
|
||||||
} => {
|
} => {
|
||||||
let mut hsm_lock = self.hsm.lock().await;
|
// This is not used in the authentication, but is so that any new
|
||||||
|
// extra keys or data on the token are updated correctly if the authentication
|
||||||
|
// requests an update. Since we hold the hsm_lock, no other task can
|
||||||
|
// update this token between now and completion of the fn.
|
||||||
|
let current_token = self
|
||||||
|
.get_cached_usertoken(id)
|
||||||
|
.await
|
||||||
|
.map(|(_expired, option_token)| option_token)
|
||||||
|
.map_err(|err| {
|
||||||
|
debug!(?err, "get_usertoken error");
|
||||||
|
})?;
|
||||||
|
|
||||||
let result = client
|
let result = client
|
||||||
.unix_user_online_auth_step(
|
.unix_user_online_auth_step(
|
||||||
account_id,
|
account_id,
|
||||||
|
current_token.as_ref(),
|
||||||
cred_handler,
|
cred_handler,
|
||||||
pam_next_req,
|
pam_next_req,
|
||||||
hsm_lock.deref_mut(),
|
hsm_lock.deref_mut(),
|
||||||
|
@ -1071,7 +1101,7 @@ impl Resolver {
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
Ok(AuthResult::Success { .. }) => {
|
Ok(AuthResult::SuccessUpdate { .. } | AuthResult::Success) => {
|
||||||
info!(?account_id, "Authentication Success");
|
info!(?account_id, "Authentication Success");
|
||||||
}
|
}
|
||||||
Ok(AuthResult::Denied) => {
|
Ok(AuthResult::Denied) => {
|
||||||
|
@ -1087,17 +1117,29 @@ impl Resolver {
|
||||||
}
|
}
|
||||||
&mut AuthSession::Offline {
|
&mut AuthSession::Offline {
|
||||||
ref account_id,
|
ref account_id,
|
||||||
id: _,
|
ref id,
|
||||||
ref client,
|
ref client,
|
||||||
ref token,
|
ref session_token,
|
||||||
ref mut cred_handler,
|
ref mut cred_handler,
|
||||||
} => {
|
} => {
|
||||||
|
// This is not used in the authentication, but is so that any new
|
||||||
|
// extra keys or data on the token are updated correctly if the authentication
|
||||||
|
// requests an update. Since we hold the hsm_lock, no other task can
|
||||||
|
// update this token between now and completion of the fn.
|
||||||
|
let current_token = self
|
||||||
|
.get_cached_usertoken(id)
|
||||||
|
.await
|
||||||
|
.map(|(_expired, option_token)| option_token)
|
||||||
|
.map_err(|err| {
|
||||||
|
debug!(?err, "get_usertoken error");
|
||||||
|
})?;
|
||||||
|
|
||||||
// We are offline, continue. Remember, authsession should have
|
// We are offline, continue. Remember, authsession should have
|
||||||
// *everything you need* to proceed here!
|
// *everything you need* to proceed here!
|
||||||
let mut hsm_lock = self.hsm.lock().await;
|
|
||||||
let result = client
|
let result = client
|
||||||
.unix_user_offline_auth_step(
|
.unix_user_offline_auth_step(
|
||||||
token,
|
current_token.as_ref(),
|
||||||
|
session_token,
|
||||||
cred_handler,
|
cred_handler,
|
||||||
pam_next_req,
|
pam_next_req,
|
||||||
hsm_lock.deref_mut(),
|
hsm_lock.deref_mut(),
|
||||||
|
@ -1105,7 +1147,7 @@ impl Resolver {
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
Ok(AuthResult::Success { .. }) => {
|
Ok(AuthResult::SuccessUpdate { .. } | AuthResult::Success) => {
|
||||||
info!(?account_id, "Authentication Success");
|
info!(?account_id, "Authentication Success");
|
||||||
}
|
}
|
||||||
Ok(AuthResult::Denied) => {
|
Ok(AuthResult::Denied) => {
|
||||||
|
@ -1156,8 +1198,13 @@ impl Resolver {
|
||||||
|
|
||||||
match maybe_err {
|
match maybe_err {
|
||||||
// What did the provider direct us to do next?
|
// What did the provider direct us to do next?
|
||||||
Ok(AuthResult::Success { mut token }) => {
|
Ok(AuthResult::Success) => {
|
||||||
self.set_cache_usertoken(&mut token).await?;
|
*auth_session = AuthSession::Success;
|
||||||
|
Ok(PamAuthResponse::Success)
|
||||||
|
}
|
||||||
|
Ok(AuthResult::SuccessUpdate { mut new_token }) => {
|
||||||
|
self.set_cache_usertoken(&mut new_token, hsm_lock.deref_mut())
|
||||||
|
.await?;
|
||||||
*auth_session = AuthSession::Success;
|
*auth_session = AuthSession::Success;
|
||||||
|
|
||||||
Ok(PamAuthResponse::Success)
|
Ok(PamAuthResponse::Success)
|
||||||
|
|
Loading…
Reference in a new issue