mirror of
https://github.com/kanidm/kanidm.git
synced 2025-05-06 17:15:04 +02:00
Compare commits
13 commits
0fbc25eb65
...
6e3783d9a2
Author | SHA1 | Date | |
---|---|---|---|
|
6e3783d9a2 | ||
|
9bf17c4846 | ||
|
ed88b72080 | ||
|
d0b0b163fd | ||
|
ce410f440c | ||
|
77271c1720 | ||
|
e838da9a08 | ||
|
94b7285cbb | ||
|
998e56d648 | ||
|
2e3f4f30ae | ||
|
15410a7830 | ||
|
8af51175f5 | ||
|
685746796e |
.github/workflows
book/src
examples
server/lib/src
constants
idm
plugins
server
unix_integration
17
.github/workflows/docker_build_kanidm.yml
vendored
17
.github/workflows/docker_build_kanidm.yml
vendored
|
@ -35,9 +35,15 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- set_tag_values
|
- set_tag_values
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Docker metadata
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
- name: Build kanidm
|
- name: Build kanidm
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
|
@ -47,6 +53,9 @@ jobs:
|
||||||
build-args: |
|
build-args: |
|
||||||
"KANIDM_FEATURES="
|
"KANIDM_FEATURES="
|
||||||
file: tools/Dockerfile
|
file: tools/Dockerfile
|
||||||
|
context: .
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
annotations: ${{ steps.meta.outputs.annotations }}
|
||||||
# Must use OCI exporter for multi-arch: https://github.com/docker/buildx/pull/1813
|
# Must use OCI exporter for multi-arch: https://github.com/docker/buildx/pull/1813
|
||||||
outputs: type=oci,dest=/tmp/kanidm-docker.tar
|
outputs: type=oci,dest=/tmp/kanidm-docker.tar
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
|
@ -60,8 +69,8 @@ jobs:
|
||||||
# This step is split so that we don't apply "packages: write" permission
|
# This step is split so that we don't apply "packages: write" permission
|
||||||
# except when uploading the final Docker image to GHCR.
|
# except when uploading the final Docker image to GHCR.
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ( github.ref_type == 'tag' || github.ref == 'refs/heads/master' ) && github.repository == 'kanidm/kanidm'
|
if: ( github.ref_type == 'tag' || github.ref == 'refs/heads/master' )
|
||||||
needs: kanidm_build
|
needs: [kanidm_build, set_tag_values]
|
||||||
permissions:
|
permissions:
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
|
@ -78,4 +87,4 @@ jobs:
|
||||||
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
||||||
oras login -u "${{ github.actor }}" --password-stdin ghcr.io
|
oras login -u "${{ github.actor }}" --password-stdin ghcr.io
|
||||||
oras copy --from-oci-layout "/tmp/kanidm-docker.tar:devel" \
|
oras copy --from-oci-layout "/tmp/kanidm-docker.tar:devel" \
|
||||||
"ghcr.io/${{ github.repository_owner }}/kanidm:devel"
|
"ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/kanidm:devel"
|
29
.github/workflows/docker_build_kanidmd.yml
vendored
29
.github/workflows/docker_build_kanidmd.yml
vendored
|
@ -35,27 +35,15 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: set_tag_values
|
needs: set_tag_values
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
- name: Docker metadata
|
- name: Docker metadata
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v5
|
uses: docker/metadata-action@v5
|
||||||
with:
|
|
||||||
# list of Docker images to use as base name for tags
|
|
||||||
# images: |
|
|
||||||
# kanidm/kanidmd
|
|
||||||
# ghcr.io/username/app
|
|
||||||
# generate Docker tags based on the following events/attributes
|
|
||||||
tags: |
|
|
||||||
type=schedule
|
|
||||||
type=ref,event=branch
|
|
||||||
type=ref,event=pr
|
|
||||||
type=semver,pattern={{version}}
|
|
||||||
type=semver,pattern={{major}}.{{minor}}
|
|
||||||
type=semver,pattern={{major}}
|
|
||||||
type=sha
|
|
||||||
|
|
||||||
- name: Build kanidmd
|
- name: Build kanidmd
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
|
@ -64,6 +52,9 @@ jobs:
|
||||||
# build-args: |
|
# build-args: |
|
||||||
# "KANIDM_BUILD_OPTIONS=-j1"
|
# "KANIDM_BUILD_OPTIONS=-j1"
|
||||||
file: server/Dockerfile
|
file: server/Dockerfile
|
||||||
|
context: .
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
annotations: ${{ steps.meta.outputs.annotations }}
|
||||||
# Must use OCI exporter for multi-arch: https://github.com/docker/buildx/pull/1813
|
# Must use OCI exporter for multi-arch: https://github.com/docker/buildx/pull/1813
|
||||||
outputs: type=oci,dest=/tmp/kanidmd-docker.tar
|
outputs: type=oci,dest=/tmp/kanidmd-docker.tar
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
|
@ -77,8 +68,8 @@ jobs:
|
||||||
# This step is split so that we don't apply "packages: write" permission
|
# This step is split so that we don't apply "packages: write" permission
|
||||||
# except when uploading the final Docker image to GHCR.
|
# except when uploading the final Docker image to GHCR.
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ( github.ref_type== 'tag' || github.ref == 'refs/heads/master' ) && github.repository == 'kanidm/kanidm'
|
if: ( github.ref_type== 'tag' || github.ref == 'refs/heads/master' )
|
||||||
needs: kanidmd_build
|
needs: [kanidmd_build, set_tag_values]
|
||||||
permissions:
|
permissions:
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
|
@ -95,4 +86,4 @@ jobs:
|
||||||
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
||||||
oras login -u "${{ github.actor }}" --password-stdin ghcr.io
|
oras login -u "${{ github.actor }}" --password-stdin ghcr.io
|
||||||
oras copy --from-oci-layout "/tmp/kanidmd-docker.tar:devel" \
|
oras copy --from-oci-layout "/tmp/kanidmd-docker.tar:devel" \
|
||||||
"ghcr.io/${{ github.repository_owner }}/kanidmd:devel"
|
"ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/kanidmd:devel"
|
17
.github/workflows/docker_build_radiusd.yml
vendored
17
.github/workflows/docker_build_radiusd.yml
vendored
|
@ -35,17 +35,26 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: set_tag_values
|
needs: set_tag_values
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v4
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
persist-credentials: false
|
||||||
- name: Set up QEMU
|
- name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v3
|
uses: docker/setup-qemu-action@v3
|
||||||
- name: Set up Docker Buildx
|
- name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v3
|
uses: docker/setup-buildx-action@v3
|
||||||
|
- name: Docker metadata
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v5
|
||||||
- name: Build radius
|
- name: Build radius
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
platforms: linux/arm64,linux/amd64
|
platforms: linux/arm64,linux/amd64
|
||||||
tags: ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/radius:devel,ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/radius:${{ needs.set_tag_values.outputs.ref_name}}
|
tags: ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/radius:devel,ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/radius:${{ needs.set_tag_values.outputs.ref_name}}
|
||||||
file: rlm_python/Dockerfile
|
file: rlm_python/Dockerfile
|
||||||
|
context: .
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
annotations: ${{ steps.meta.outputs.annotations }}
|
||||||
# Must use OCI exporter for multi-arch: https://github.com/docker/buildx/pull/1813
|
# Must use OCI exporter for multi-arch: https://github.com/docker/buildx/pull/1813
|
||||||
outputs: type=oci,dest=/tmp/radius-docker.tar
|
outputs: type=oci,dest=/tmp/radius-docker.tar
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
|
@ -59,8 +68,8 @@ jobs:
|
||||||
# This step is split so that we don't apply "packages: write" permission
|
# This step is split so that we don't apply "packages: write" permission
|
||||||
# except when uploading the final Docker image to GHCR.
|
# except when uploading the final Docker image to GHCR.
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ( github.ref_type == 'tag' || github.ref == 'refs/heads/master' ) && github.repository == 'kanidm/kanidm'
|
if: ( github.ref_type == 'tag' || github.ref == 'refs/heads/master' )
|
||||||
needs: radius_build
|
needs: [radius_build, set_tag_values]
|
||||||
permissions:
|
permissions:
|
||||||
packages: write
|
packages: write
|
||||||
|
|
||||||
|
@ -79,4 +88,4 @@ jobs:
|
||||||
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
echo "${{ secrets.GITHUB_TOKEN }}" | \
|
||||||
oras login -u "${{ github.actor }}" --password-stdin ghcr.io
|
oras login -u "${{ github.actor }}" --password-stdin ghcr.io
|
||||||
oras copy --from-oci-layout "/tmp/radius-docker.tar:devel" \
|
oras copy --from-oci-layout "/tmp/radius-docker.tar:devel" \
|
||||||
"ghcr.io/${{ github.repository_owner }}/radius:devel"
|
"ghcr.io/${{ needs.set_tag_values.outputs.owner_lc }}/radius:devel"
|
|
@ -215,7 +215,7 @@ Token signing public key
|
||||||
|
|
||||||
### Create the Kanidm Configuration
|
### Create the Kanidm Configuration
|
||||||
|
|
||||||
By default, members of the `system_admins` or `idm_hp_oauth2_manage_priv` groups are able to create
|
By default, members of the `idm_admins` or `idm_oauth2_admins` groups are able to create
|
||||||
or manage OAuth2 client integrations.
|
or manage OAuth2 client integrations.
|
||||||
|
|
||||||
You can create a new client by specifying its client name, application display name and the landing
|
You can create a new client by specifying its client name, application display name and the landing
|
||||||
|
|
|
@ -556,6 +556,65 @@ php occ config:app:set --value=0 user_oidc allow_multiple_user_backends
|
||||||
You can login directly by appending `?direct=1` to your login page. You can re-enable other backends
|
You can login directly by appending `?direct=1` to your login page. You can re-enable other backends
|
||||||
by setting the value to `1`
|
by setting the value to `1`
|
||||||
|
|
||||||
|
## OAuth2 Proxy
|
||||||
|
|
||||||
|
OAuth2 Proxy is a reverse proxy that provides authentication with OpenID Connect identity providers.
|
||||||
|
It is typically used to secure web applications without native OpenID Connect support.
|
||||||
|
|
||||||
|
Prepare the environment.
|
||||||
|
Due to a [lack of public client support](https://github.com/oauth2-proxy/oauth2-proxy/issues/1714) we have to set it up as a basic client.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kanidm system oauth2 create webapp 'webapp.example.com' 'https://webapp.example.com'
|
||||||
|
kanidm system add-redirect-url webapp 'https://webapp.example.com/oauth2/callback'
|
||||||
|
kanidm system oauth2 update-scope-map webapp email openid
|
||||||
|
kanidm system oauth2 get webapp
|
||||||
|
kanidm system oauth2 show-basic-secret webapp
|
||||||
|
<SECRET>
|
||||||
|
```
|
||||||
|
|
||||||
|
Create a user group.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kanidm group create 'webapp_admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
Setup the claim-map to add `webapp_group` to the userinfo claim.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kanidm system oauth2 update-claim-map-join 'webapp' 'webapp_group' array
|
||||||
|
kanidm system oauth2 update-claim-map 'webapp' 'webapp_group' 'webapp_admin' 'webapp_admin'
|
||||||
|
```
|
||||||
|
|
||||||
|
Authorize users for the application.
|
||||||
|
Additionally OAuth2 Proxy requires all users have an email, reference this issue for more details:
|
||||||
|
|
||||||
|
- <https://github.com/oauth2-proxy/oauth2-proxy/issues/2667>
|
||||||
|
|
||||||
|
```bash
|
||||||
|
kanidm person update '<user>' --legalname 'Personal Name' --mail 'user@example.com'
|
||||||
|
kanidm group add-members 'webapp_admin' '<user>'
|
||||||
|
```
|
||||||
|
|
||||||
|
And add the following to your OAuth2 Proxy config.
|
||||||
|
|
||||||
|
```toml
|
||||||
|
provider = "oidc"
|
||||||
|
scope = "openid email"
|
||||||
|
# change to match your kanidm domain and client id
|
||||||
|
oidc_issuer_url = "https://idm.example.com/oauth2/openid/webapp"
|
||||||
|
# client ID from `kanidm system oauth2 create`
|
||||||
|
client_id = "webapp"
|
||||||
|
# redirect URL from `kanidm system add-redirect-url webapp`
|
||||||
|
redirect_url = "https://webapp.example.com/oauth2/callback"
|
||||||
|
# claim name from `kanidm system oauth2 update-claim-map-join`
|
||||||
|
oidc_groups_claim = "webapp_group"
|
||||||
|
# user group from `kanidm group create`
|
||||||
|
allowed_groups = ["webapp_admin"]
|
||||||
|
# secret from `kanidm system oauth2 show-basic-secret webapp`
|
||||||
|
client_secret = "<SECRET>"
|
||||||
|
```
|
||||||
|
|
||||||
## Outline
|
## Outline
|
||||||
|
|
||||||
> These instructions were tested with self-hosted Outline 0.80.2.
|
> These instructions were tested with self-hosted Outline 0.80.2.
|
||||||
|
|
|
@ -22,6 +22,7 @@ This is a list of supported features and standards within Kanidm.
|
||||||
- [RFC4519 LDAP Schema](https://www.rfc-editor.org/rfc/rfc4519)
|
- [RFC4519 LDAP Schema](https://www.rfc-editor.org/rfc/rfc4519)
|
||||||
- FreeIPA User Schema
|
- FreeIPA User Schema
|
||||||
- [RFC7644 SCIM Bulk Data Import](https://www.rfc-editor.org/rfc/rfc7644)
|
- [RFC7644 SCIM Bulk Data Import](https://www.rfc-editor.org/rfc/rfc7644)
|
||||||
|
- NOTE: SCIM is only supported for synchronisation from another IDP at this time.
|
||||||
|
|
||||||
# Database
|
# Database
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Kanidm minimal Service Configuration - /etc/kanidm/config
|
# Kanidm minimal Service Configuration - /etc/kanidm/config
|
||||||
# For a full example and documentation, see /usr/share/kanidm/kanidm
|
# For a full example and documentation, see /usr/share/kanidm/config
|
||||||
# or `example/kanidm` in the source repository.
|
# or `example/kanidm` in the source repository.
|
||||||
|
|
||||||
# Replace this with your kanidmd URI and uncomment the line
|
# Replace this with your kanidmd URI and uncomment the line
|
||||||
|
|
|
@ -289,15 +289,6 @@ lazy_static! {
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Self-write of mail
|
|
||||||
pub static ref IDM_PEOPLE_SELF_WRITE_MAIL_V1: BuiltinGroup = BuiltinGroup {
|
|
||||||
name: "idm_people_self_write_mail",
|
|
||||||
description: "Builtin IDM Group for people accounts to update their own mail.",
|
|
||||||
uuid: UUID_IDM_PEOPLE_SELF_MAIL_WRITE,
|
|
||||||
members: Vec::with_capacity(0),
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Self-write of mail
|
/// Self-write of mail
|
||||||
pub static ref IDM_PEOPLE_SELF_MAIL_WRITE_DL7: BuiltinGroup = BuiltinGroup {
|
pub static ref IDM_PEOPLE_SELF_MAIL_WRITE_DL7: BuiltinGroup = BuiltinGroup {
|
||||||
name: "idm_people_self_mail_write",
|
name: "idm_people_self_mail_write",
|
||||||
|
@ -373,36 +364,7 @@ lazy_static! {
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This must be the last group to init to include the UUID of the other high priv groups.
|
/// This must be the last group to init to include the UUID of the other high priv groups.
|
||||||
pub static ref IDM_HIGH_PRIVILEGE_V1: BuiltinGroup = BuiltinGroup {
|
pub static ref IDM_HIGH_PRIVILEGE_DL8: BuiltinGroup = BuiltinGroup {
|
||||||
name: "idm_high_privilege",
|
|
||||||
uuid: UUID_IDM_HIGH_PRIVILEGE,
|
|
||||||
entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS),
|
|
||||||
description: "Builtin IDM provided groups with high levels of access that should be audited and limited in modification.",
|
|
||||||
members: vec![
|
|
||||||
UUID_SYSTEM_ADMINS,
|
|
||||||
UUID_IDM_ADMINS,
|
|
||||||
UUID_DOMAIN_ADMINS,
|
|
||||||
UUID_IDM_SERVICE_DESK,
|
|
||||||
UUID_IDM_RECYCLE_BIN_ADMINS,
|
|
||||||
UUID_IDM_SCHEMA_ADMINS,
|
|
||||||
UUID_IDM_ACCESS_CONTROL_ADMINS,
|
|
||||||
UUID_IDM_OAUTH2_ADMINS,
|
|
||||||
UUID_IDM_RADIUS_ADMINS,
|
|
||||||
UUID_IDM_ACCOUNT_POLICY_ADMINS,
|
|
||||||
UUID_IDM_RADIUS_SERVERS,
|
|
||||||
UUID_IDM_GROUP_ADMINS,
|
|
||||||
UUID_IDM_UNIX_ADMINS,
|
|
||||||
UUID_IDM_PEOPLE_PII_READ,
|
|
||||||
UUID_IDM_PEOPLE_ADMINS,
|
|
||||||
UUID_IDM_PEOPLE_ON_BOARDING,
|
|
||||||
UUID_IDM_SERVICE_ACCOUNT_ADMINS,
|
|
||||||
UUID_IDM_HIGH_PRIVILEGE,
|
|
||||||
],
|
|
||||||
..Default::default()
|
|
||||||
};
|
|
||||||
|
|
||||||
/// This must be the last group to init to include the UUID of the other high priv groups.
|
|
||||||
pub static ref IDM_HIGH_PRIVILEGE_DL7: BuiltinGroup = BuiltinGroup {
|
|
||||||
name: "idm_high_privilege",
|
name: "idm_high_privilege",
|
||||||
uuid: UUID_IDM_HIGH_PRIVILEGE,
|
uuid: UUID_IDM_HIGH_PRIVILEGE,
|
||||||
entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS),
|
entry_managed_by: Some(UUID_IDM_ACCESS_CONTROL_ADMINS),
|
||||||
|
@ -426,12 +388,14 @@ lazy_static! {
|
||||||
UUID_IDM_PEOPLE_ON_BOARDING,
|
UUID_IDM_PEOPLE_ON_BOARDING,
|
||||||
UUID_IDM_SERVICE_ACCOUNT_ADMINS,
|
UUID_IDM_SERVICE_ACCOUNT_ADMINS,
|
||||||
UUID_IDM_CLIENT_CERTIFICATE_ADMINS,
|
UUID_IDM_CLIENT_CERTIFICATE_ADMINS,
|
||||||
|
UUID_IDM_APPLICATION_ADMINS,
|
||||||
|
UUID_IDM_MAIL_ADMINS,
|
||||||
UUID_IDM_HIGH_PRIVILEGE,
|
UUID_IDM_HIGH_PRIVILEGE,
|
||||||
],
|
],
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
pub static ref BUILTIN_GROUP_APPLICATION_ADMINS: BuiltinGroup = BuiltinGroup {
|
pub static ref BUILTIN_GROUP_APPLICATION_ADMINS_DL8: BuiltinGroup = BuiltinGroup {
|
||||||
name: "idm_application_admins",
|
name: "idm_application_admins",
|
||||||
uuid: UUID_IDM_APPLICATION_ADMINS,
|
uuid: UUID_IDM_APPLICATION_ADMINS,
|
||||||
description: "Builtin Application Administration Group.",
|
description: "Builtin Application Administration Group.",
|
||||||
|
@ -458,17 +422,19 @@ pub fn idm_builtin_non_admin_groups() -> Vec<&'static BuiltinGroup> {
|
||||||
&BUILTIN_GROUP_PEOPLE_PII_READ,
|
&BUILTIN_GROUP_PEOPLE_PII_READ,
|
||||||
&BUILTIN_GROUP_PEOPLE_ON_BOARDING,
|
&BUILTIN_GROUP_PEOPLE_ON_BOARDING,
|
||||||
&BUILTIN_GROUP_SERVICE_ACCOUNT_ADMINS,
|
&BUILTIN_GROUP_SERVICE_ACCOUNT_ADMINS,
|
||||||
&BUILTIN_GROUP_APPLICATION_ADMINS,
|
|
||||||
&BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8,
|
&BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8,
|
||||||
&IDM_GROUP_ADMINS_V1,
|
&IDM_GROUP_ADMINS_V1,
|
||||||
&IDM_ALL_PERSONS,
|
&IDM_ALL_PERSONS,
|
||||||
&IDM_ALL_ACCOUNTS,
|
&IDM_ALL_ACCOUNTS,
|
||||||
&BUILTIN_IDM_RADIUS_SERVERS_V1,
|
&BUILTIN_IDM_RADIUS_SERVERS_V1,
|
||||||
&BUILTIN_IDM_MAIL_SERVERS_DL8,
|
&BUILTIN_IDM_MAIL_SERVERS_DL8,
|
||||||
&IDM_PEOPLE_SELF_WRITE_MAIL_V1,
|
&BUILTIN_GROUP_PEOPLE_SELF_NAME_WRITE_DL7,
|
||||||
|
&IDM_PEOPLE_SELF_MAIL_WRITE_DL7,
|
||||||
|
&BUILTIN_GROUP_CLIENT_CERTIFICATE_ADMINS_DL7,
|
||||||
|
&BUILTIN_GROUP_APPLICATION_ADMINS_DL8,
|
||||||
// Write deps on read, so write must be added first.
|
// Write deps on read, so write must be added first.
|
||||||
// All members must exist before we write HP
|
// All members must exist before we write HP
|
||||||
&IDM_HIGH_PRIVILEGE_V1,
|
&IDM_HIGH_PRIVILEGE_DL8,
|
||||||
// other things
|
// other things
|
||||||
&IDM_UI_ENABLE_EXPERIMENTAL_FEATURES,
|
&IDM_UI_ENABLE_EXPERIMENTAL_FEATURES,
|
||||||
&IDM_ACCOUNT_MAIL_READ,
|
&IDM_ACCOUNT_MAIL_READ,
|
||||||
|
|
|
@ -54,14 +54,6 @@ pub type DomainVersion = u32;
|
||||||
/// previously.
|
/// previously.
|
||||||
pub const DOMAIN_LEVEL_0: DomainVersion = 0;
|
pub const DOMAIN_LEVEL_0: DomainVersion = 0;
|
||||||
|
|
||||||
/// Deprecated as of 1.3.0
|
|
||||||
pub const DOMAIN_LEVEL_5: DomainVersion = 5;
|
|
||||||
|
|
||||||
/// Domain Level introduced with 1.2.0.
|
|
||||||
/// Deprecated as of 1.4.0
|
|
||||||
pub const DOMAIN_LEVEL_6: DomainVersion = 6;
|
|
||||||
pub const PATCH_LEVEL_1: u32 = 1;
|
|
||||||
|
|
||||||
/// Domain Level introduced with 1.3.0.
|
/// Domain Level introduced with 1.3.0.
|
||||||
/// Deprecated as of 1.5.0
|
/// Deprecated as of 1.5.0
|
||||||
pub const DOMAIN_LEVEL_7: DomainVersion = 7;
|
pub const DOMAIN_LEVEL_7: DomainVersion = 7;
|
||||||
|
@ -85,7 +77,7 @@ pub const DOMAIN_LEVEL_11: DomainVersion = 11;
|
||||||
|
|
||||||
// The minimum level that we can re-migrate from.
|
// The minimum level that we can re-migrate from.
|
||||||
// This should be DOMAIN_TGT_LEVEL minus 2
|
// This should be DOMAIN_TGT_LEVEL minus 2
|
||||||
pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL - 2;
|
pub const DOMAIN_MIN_REMIGRATION_LEVEL: DomainVersion = DOMAIN_LEVEL_8;
|
||||||
// The minimum supported domain functional level (for replication)
|
// The minimum supported domain functional level (for replication)
|
||||||
pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL;
|
pub const DOMAIN_MIN_LEVEL: DomainVersion = DOMAIN_TGT_LEVEL;
|
||||||
// The previous releases domain functional level
|
// The previous releases domain functional level
|
||||||
|
|
|
@ -208,6 +208,16 @@ pub static ref SCHEMA_ATTR_DENIED_NAME: SchemaAttribute = SchemaAttribute {
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub static ref SCHEMA_ATTR_DENIED_NAME_DL10: SchemaAttribute = SchemaAttribute {
|
||||||
|
uuid: UUID_SCHEMA_ATTR_DENIED_NAME,
|
||||||
|
name: Attribute::DeniedName,
|
||||||
|
description: "Iname values that are not allowed to be used in 'name'.".to_string(),
|
||||||
|
|
||||||
|
syntax: SyntaxType::Utf8StringIname,
|
||||||
|
multivalue: true,
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
pub static ref SCHEMA_ATTR_DOMAIN_TOKEN_KEY: SchemaAttribute = SchemaAttribute {
|
pub static ref SCHEMA_ATTR_DOMAIN_TOKEN_KEY: SchemaAttribute = SchemaAttribute {
|
||||||
uuid: UUID_SCHEMA_ATTR_DOMAIN_TOKEN_KEY,
|
uuid: UUID_SCHEMA_ATTR_DOMAIN_TOKEN_KEY,
|
||||||
name: Attribute::DomainTokenKey,
|
name: Attribute::DomainTokenKey,
|
||||||
|
@ -1209,6 +1219,30 @@ pub static ref SCHEMA_CLASS_DOMAIN_INFO_DL9: SchemaClass = SchemaClass {
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub static ref SCHEMA_CLASS_DOMAIN_INFO_DL10: SchemaClass = SchemaClass {
|
||||||
|
uuid: UUID_SCHEMA_CLASS_DOMAIN_INFO,
|
||||||
|
name: EntryClass::DomainInfo.into(),
|
||||||
|
description: "Local domain information and configuration".to_string(),
|
||||||
|
|
||||||
|
systemmay: vec![
|
||||||
|
Attribute::DomainSsid,
|
||||||
|
Attribute::DomainLdapBasedn,
|
||||||
|
Attribute::LdapAllowUnixPwBind,
|
||||||
|
Attribute::Image,
|
||||||
|
Attribute::PatchLevel,
|
||||||
|
Attribute::DomainDevelopmentTaint,
|
||||||
|
Attribute::DomainAllowEasterEggs,
|
||||||
|
Attribute::DomainDisplayName,
|
||||||
|
],
|
||||||
|
systemmust: vec![
|
||||||
|
Attribute::Name,
|
||||||
|
Attribute::DomainUuid,
|
||||||
|
Attribute::DomainName,
|
||||||
|
Attribute::Version,
|
||||||
|
],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
pub static ref SCHEMA_CLASS_POSIXGROUP: SchemaClass = SchemaClass {
|
pub static ref SCHEMA_CLASS_POSIXGROUP: SchemaClass = SchemaClass {
|
||||||
uuid: UUID_SCHEMA_CLASS_POSIXGROUP,
|
uuid: UUID_SCHEMA_CLASS_POSIXGROUP,
|
||||||
name: EntryClass::PosixGroup.into(),
|
name: EntryClass::PosixGroup.into(),
|
||||||
|
|
|
@ -248,24 +248,32 @@ impl AuthorisePermitSuccess {
|
||||||
pub fn build_redirect_uri(&self) -> Url {
|
pub fn build_redirect_uri(&self) -> Url {
|
||||||
let mut redirect_uri = self.redirect_uri.clone();
|
let mut redirect_uri = self.redirect_uri.clone();
|
||||||
|
|
||||||
// Always clear query and fragment, regardless of the response mode
|
// Always clear the fragment per RFC
|
||||||
redirect_uri.set_query(None);
|
|
||||||
redirect_uri.set_fragment(None);
|
redirect_uri.set_fragment(None);
|
||||||
|
|
||||||
// We can't set query pairs on fragments, only query.
|
|
||||||
let mut uri_builder = url::form_urlencoded::Serializer::new(String::new());
|
|
||||||
|
|
||||||
uri_builder.append_pair("code", &self.code);
|
|
||||||
|
|
||||||
if let Some(state) = self.state.as_ref() {
|
|
||||||
uri_builder.append_pair("state", state);
|
|
||||||
};
|
|
||||||
|
|
||||||
let encoded = uri_builder.finish();
|
|
||||||
|
|
||||||
match self.response_mode {
|
match self.response_mode {
|
||||||
ResponseMode::Query => redirect_uri.set_query(Some(&encoded)),
|
ResponseMode::Query => {
|
||||||
ResponseMode::Fragment => redirect_uri.set_fragment(Some(&encoded)),
|
redirect_uri
|
||||||
|
.query_pairs_mut()
|
||||||
|
.append_pair("code", &self.code);
|
||||||
|
|
||||||
|
if let Some(state) = self.state.as_ref() {
|
||||||
|
redirect_uri.query_pairs_mut().append_pair("state", state);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
ResponseMode::Fragment => {
|
||||||
|
redirect_uri.set_query(None);
|
||||||
|
|
||||||
|
// Per [the RFC](https://www.rfc-editor.org/rfc/rfc6749#section-3.1.2), we can't set query pairs on fragment-containing redirects, only query ones.
|
||||||
|
let mut uri_builder = url::form_urlencoded::Serializer::new(String::new());
|
||||||
|
uri_builder.append_pair("code", &self.code);
|
||||||
|
if let Some(state) = self.state.as_ref() {
|
||||||
|
uri_builder.append_pair("state", state);
|
||||||
|
};
|
||||||
|
let encoded = uri_builder.finish();
|
||||||
|
|
||||||
|
redirect_uri.set_fragment(Some(&encoded))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
redirect_uri
|
redirect_uri
|
||||||
|
@ -3020,7 +3028,7 @@ fn check_is_loopback(redirect_uri: &Url) -> bool {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use base64::{engine::general_purpose, Engine as _};
|
use base64::{engine::general_purpose, Engine as _};
|
||||||
use std::collections::BTreeSet;
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
use std::convert::TryFrom;
|
use std::convert::TryFrom;
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
@ -3144,7 +3152,7 @@ mod tests {
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
Attribute::OAuth2RsOrigin,
|
Attribute::OAuth2RsOrigin,
|
||||||
Value::new_url_s("https://portal.example.com").unwrap()
|
Value::new_url_s("https://portal.example.com/?custom=foo").unwrap()
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
Attribute::OAuth2RsOrigin,
|
Attribute::OAuth2RsOrigin,
|
||||||
|
@ -3680,6 +3688,70 @@ mod tests {
|
||||||
== Oauth2Error::InvalidOrigin
|
== Oauth2Error::InvalidOrigin
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// * invalid uri (doesn't match query params)
|
||||||
|
let auth_req = AuthorisationRequest {
|
||||||
|
response_type: ResponseType::Code,
|
||||||
|
response_mode: None,
|
||||||
|
client_id: "test_resource_server".to_string(),
|
||||||
|
state: Some("123".to_string()),
|
||||||
|
pkce_request: pkce_request.clone(),
|
||||||
|
redirect_uri: Url::parse("https://portal.example.com/?custom=foo&too=many").unwrap(),
|
||||||
|
scope: btreeset![OAUTH2_SCOPE_OPENID.to_string()],
|
||||||
|
nonce: None,
|
||||||
|
oidc_ext: Default::default(),
|
||||||
|
max_age: None,
|
||||||
|
unknown_keys: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
idms_prox_read
|
||||||
|
.check_oauth2_authorisation(Some(&ident), &auth_req, ct)
|
||||||
|
.unwrap_err()
|
||||||
|
== Oauth2Error::InvalidOrigin
|
||||||
|
);
|
||||||
|
|
||||||
|
let auth_req = AuthorisationRequest {
|
||||||
|
response_type: ResponseType::Code,
|
||||||
|
response_mode: None,
|
||||||
|
client_id: "test_resource_server".to_string(),
|
||||||
|
state: Some("123".to_string()),
|
||||||
|
pkce_request: pkce_request.clone(),
|
||||||
|
redirect_uri: Url::parse("https://portal.example.com").unwrap(),
|
||||||
|
scope: btreeset![OAUTH2_SCOPE_OPENID.to_string()],
|
||||||
|
nonce: None,
|
||||||
|
oidc_ext: Default::default(),
|
||||||
|
max_age: None,
|
||||||
|
unknown_keys: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
idms_prox_read
|
||||||
|
.check_oauth2_authorisation(Some(&ident), &auth_req, ct)
|
||||||
|
.unwrap_err()
|
||||||
|
== Oauth2Error::InvalidOrigin
|
||||||
|
);
|
||||||
|
|
||||||
|
let auth_req = AuthorisationRequest {
|
||||||
|
response_type: ResponseType::Code,
|
||||||
|
response_mode: None,
|
||||||
|
client_id: "test_resource_server".to_string(),
|
||||||
|
state: Some("123".to_string()),
|
||||||
|
pkce_request: pkce_request.clone(),
|
||||||
|
redirect_uri: Url::parse("https://portal.example.com/?wrong=queryparam").unwrap(),
|
||||||
|
scope: btreeset![OAUTH2_SCOPE_OPENID.to_string()],
|
||||||
|
nonce: None,
|
||||||
|
oidc_ext: Default::default(),
|
||||||
|
max_age: None,
|
||||||
|
unknown_keys: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
idms_prox_read
|
||||||
|
.check_oauth2_authorisation(Some(&ident), &auth_req, ct)
|
||||||
|
.unwrap_err()
|
||||||
|
== Oauth2Error::InvalidOrigin
|
||||||
|
);
|
||||||
|
|
||||||
// Not Authenticated
|
// Not Authenticated
|
||||||
let auth_req = AuthorisationRequest {
|
let auth_req = AuthorisationRequest {
|
||||||
response_type: ResponseType::Code,
|
response_type: ResponseType::Code,
|
||||||
|
@ -4041,6 +4113,8 @@ mod tests {
|
||||||
// == Setup the authorisation request
|
// == Setup the authorisation request
|
||||||
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
let (code_verifier, code_challenge) = create_code_verifier!("Whar Garble");
|
||||||
|
|
||||||
|
let redirect_uri = Url::parse("https://portal.example.com/?custom=foo").unwrap();
|
||||||
|
|
||||||
let auth_req = AuthorisationRequest {
|
let auth_req = AuthorisationRequest {
|
||||||
response_type: ResponseType::Code,
|
response_type: ResponseType::Code,
|
||||||
response_mode: None,
|
response_mode: None,
|
||||||
|
@ -4050,7 +4124,7 @@ mod tests {
|
||||||
code_challenge: code_challenge.clone(),
|
code_challenge: code_challenge.clone(),
|
||||||
code_challenge_method: CodeChallengeMethod::S256,
|
code_challenge_method: CodeChallengeMethod::S256,
|
||||||
}),
|
}),
|
||||||
redirect_uri: Url::parse("https://portal.example.com").unwrap(),
|
redirect_uri: redirect_uri.clone(),
|
||||||
scope: btreeset![OAUTH2_SCOPE_GROUPS.to_string()],
|
scope: btreeset![OAUTH2_SCOPE_GROUPS.to_string()],
|
||||||
nonce: Some("abcdef".to_string()),
|
nonce: Some("abcdef".to_string()),
|
||||||
oidc_ext: Default::default(),
|
oidc_ext: Default::default(),
|
||||||
|
@ -4080,12 +4154,22 @@ mod tests {
|
||||||
// Check we are reflecting the CSRF properly.
|
// Check we are reflecting the CSRF properly.
|
||||||
assert_eq!(permit_success.state.as_deref(), None);
|
assert_eq!(permit_success.state.as_deref(), None);
|
||||||
|
|
||||||
|
// Assert we followed the redirect uri including the query elements
|
||||||
|
// we have in the url.
|
||||||
|
let permit_redirect_uri = permit_success.build_redirect_uri();
|
||||||
|
|
||||||
|
assert_eq!(permit_redirect_uri.origin(), redirect_uri.origin());
|
||||||
|
assert_eq!(permit_redirect_uri.path(), redirect_uri.path());
|
||||||
|
let query = BTreeMap::from_iter(permit_redirect_uri.query_pairs().into_owned());
|
||||||
|
// Assert the query pair wasn't changed
|
||||||
|
assert_eq!(query.get("custom").map(|s| s.as_str()), Some("foo"));
|
||||||
|
|
||||||
// == Submit the token exchange code.
|
// == Submit the token exchange code.
|
||||||
// ⚠️ This is where we submit a different origin!
|
// ⚠️ This is where we submit a different origin!
|
||||||
let token_req = AccessTokenRequest {
|
let token_req = AccessTokenRequest {
|
||||||
grant_type: GrantTypeReq::AuthorizationCode {
|
grant_type: GrantTypeReq::AuthorizationCode {
|
||||||
code: permit_success.code,
|
code: permit_success.code,
|
||||||
redirect_uri: Url::parse("https://portal.example.com").unwrap(),
|
redirect_uri,
|
||||||
// From the first step.
|
// From the first step.
|
||||||
code_verifier: code_verifier.clone(),
|
code_verifier: code_verifier.clone(),
|
||||||
},
|
},
|
||||||
|
|
|
@ -5,7 +5,7 @@ use base64::{
|
||||||
Engine as _,
|
Engine as _,
|
||||||
};
|
};
|
||||||
|
|
||||||
use compact_jwt::{Jws, JwsCompact, JwsEs256Signer, JwsSigner};
|
use compact_jwt::{Jws, JwsCompact};
|
||||||
use kanidm_proto::internal::{ApiTokenPurpose, ScimSyncToken};
|
use kanidm_proto::internal::{ApiTokenPurpose, ScimSyncToken};
|
||||||
use kanidm_proto::scim_v1::*;
|
use kanidm_proto::scim_v1::*;
|
||||||
use std::collections::{BTreeMap, BTreeSet};
|
use std::collections::{BTreeMap, BTreeSet};
|
||||||
|
@ -25,7 +25,6 @@ pub(crate) struct SyncAccount {
|
||||||
pub name: String,
|
pub name: String,
|
||||||
pub uuid: Uuid,
|
pub uuid: Uuid,
|
||||||
pub sync_tokens: BTreeMap<Uuid, ApiToken>,
|
pub sync_tokens: BTreeMap<Uuid, ApiToken>,
|
||||||
pub jws_key: Option<JwsEs256Signer>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! try_from_entry {
|
macro_rules! try_from_entry {
|
||||||
|
@ -40,15 +39,6 @@ macro_rules! try_from_entry {
|
||||||
.map(|s| s.to_string())
|
.map(|s| s.to_string())
|
||||||
.ok_or(OperationError::MissingAttribute(Attribute::Name))?;
|
.ok_or(OperationError::MissingAttribute(Attribute::Name))?;
|
||||||
|
|
||||||
let jws_key = $value
|
|
||||||
.get_ava_single_jws_key_es256(Attribute::JwsEs256PrivateKey)
|
|
||||||
.cloned()
|
|
||||||
.map(|jws_key| {
|
|
||||||
jws_key
|
|
||||||
.set_sign_option_embed_jwk(true)
|
|
||||||
.set_sign_option_legacy_kid(true)
|
|
||||||
});
|
|
||||||
|
|
||||||
let sync_tokens = $value
|
let sync_tokens = $value
|
||||||
.get_ava_as_apitoken_map(Attribute::SyncTokenSession)
|
.get_ava_as_apitoken_map(Attribute::SyncTokenSession)
|
||||||
.cloned()
|
.cloned()
|
||||||
|
@ -60,7 +50,6 @@ macro_rules! try_from_entry {
|
||||||
name,
|
name,
|
||||||
uuid,
|
uuid,
|
||||||
sync_tokens,
|
sync_tokens,
|
||||||
jws_key,
|
|
||||||
})
|
})
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
@ -123,16 +112,6 @@ impl IdmServerProxyWriteTransaction<'_> {
|
||||||
gte: &GenerateScimSyncTokenEvent,
|
gte: &GenerateScimSyncTokenEvent,
|
||||||
ct: Duration,
|
ct: Duration,
|
||||||
) -> Result<JwsCompact, OperationError> {
|
) -> Result<JwsCompact, OperationError> {
|
||||||
// Get the target signing key.
|
|
||||||
let sync_account = self
|
|
||||||
.qs_write
|
|
||||||
.internal_search_uuid(gte.target)
|
|
||||||
.and_then(|entry| SyncAccount::try_from_entry_rw(&entry))
|
|
||||||
.map_err(|e| {
|
|
||||||
admin_error!(?e, "Failed to search service account");
|
|
||||||
e
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let session_id = Uuid::new_v4();
|
let session_id = Uuid::new_v4();
|
||||||
let issued_at = time::OffsetDateTime::UNIX_EPOCH + ct;
|
let issued_at = time::OffsetDateTime::UNIX_EPOCH + ct;
|
||||||
|
|
||||||
|
@ -185,25 +164,9 @@ impl IdmServerProxyWriteTransaction<'_> {
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
// The modify succeeded and was allowed, now sign the token for return.
|
// The modify succeeded and was allowed, now sign the token for return.
|
||||||
if self.qs_write.get_domain_version() < DOMAIN_LEVEL_6 {
|
self.qs_write
|
||||||
sync_account
|
.get_domain_key_object_handle()?
|
||||||
.jws_key
|
.jws_es256_sign(&token, ct)
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| {
|
|
||||||
admin_error!("Unable to sign sync token, no sync keys available");
|
|
||||||
OperationError::CryptographyError
|
|
||||||
})
|
|
||||||
.and_then(|jws_key| {
|
|
||||||
jws_key.sign(&token).map_err(|err| {
|
|
||||||
admin_error!(?err, "Unable to sign sync token");
|
|
||||||
OperationError::CryptographyError
|
|
||||||
})
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
self.qs_write
|
|
||||||
.get_domain_key_object_handle()?
|
|
||||||
.jws_es256_sign(&token, ct)
|
|
||||||
}
|
|
||||||
// Done!
|
// Done!
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use compact_jwt::{Jws, JwsCompact, JwsEs256Signer, JwsSigner};
|
use compact_jwt::{Jws, JwsCompact};
|
||||||
use kanidm_proto::internal::ApiToken as ProtoApiToken;
|
use kanidm_proto::internal::ApiToken as ProtoApiToken;
|
||||||
use time::OffsetDateTime;
|
use time::OffsetDateTime;
|
||||||
|
|
||||||
|
@ -23,15 +23,6 @@ macro_rules! try_from_entry {
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let jws_key = $value
|
|
||||||
.get_ava_single_jws_key_es256(Attribute::JwsEs256PrivateKey)
|
|
||||||
.cloned()
|
|
||||||
.map(|jws_key| {
|
|
||||||
jws_key
|
|
||||||
.set_sign_option_embed_jwk(true)
|
|
||||||
.set_sign_option_legacy_kid(true)
|
|
||||||
});
|
|
||||||
|
|
||||||
let api_tokens = $value
|
let api_tokens = $value
|
||||||
.get_ava_as_apitoken_map(Attribute::ApiTokenSession)
|
.get_ava_as_apitoken_map(Attribute::ApiTokenSession)
|
||||||
.cloned()
|
.cloned()
|
||||||
|
@ -48,7 +39,6 @@ macro_rules! try_from_entry {
|
||||||
valid_from,
|
valid_from,
|
||||||
expire,
|
expire,
|
||||||
api_tokens,
|
api_tokens,
|
||||||
jws_key,
|
|
||||||
})
|
})
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
|
@ -60,8 +50,6 @@ pub struct ServiceAccount {
|
||||||
pub expire: Option<OffsetDateTime>,
|
pub expire: Option<OffsetDateTime>,
|
||||||
|
|
||||||
pub api_tokens: BTreeMap<Uuid, ApiToken>,
|
pub api_tokens: BTreeMap<Uuid, ApiToken>,
|
||||||
|
|
||||||
pub jws_key: Option<JwsEs256Signer>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ServiceAccount {
|
impl ServiceAccount {
|
||||||
|
@ -253,25 +241,9 @@ impl IdmServerProxyWriteTransaction<'_> {
|
||||||
err
|
err
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
if self.qs_write.get_domain_version() < DOMAIN_LEVEL_6 {
|
self.qs_write
|
||||||
service_account
|
.get_domain_key_object_handle()?
|
||||||
.jws_key
|
.jws_es256_sign(&token, ct)
|
||||||
.as_ref()
|
|
||||||
.ok_or_else(|| {
|
|
||||||
admin_error!("Unable to sign sync token, no sync keys available");
|
|
||||||
OperationError::CryptographyError
|
|
||||||
})
|
|
||||||
.and_then(|jws_key| {
|
|
||||||
jws_key.sign(&token).map_err(|err| {
|
|
||||||
admin_error!(?err, "Unable to sign sync token");
|
|
||||||
OperationError::CryptographyError
|
|
||||||
})
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
self.qs_write
|
|
||||||
.get_domain_key_object_handle()?
|
|
||||||
.jws_es256_sign(&token, ct)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn service_account_destroy_api_token(
|
pub fn service_account_destroy_api_token(
|
||||||
|
|
|
@ -7,8 +7,6 @@
|
||||||
use std::iter::once;
|
use std::iter::once;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use compact_jwt::JwsEs256Signer;
|
|
||||||
use rand::prelude::*;
|
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use tracing::trace;
|
use tracing::trace;
|
||||||
|
|
||||||
|
@ -61,13 +59,6 @@ impl Plugin for Domain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_domain_cookie_key() -> Value {
|
|
||||||
let mut key = [0; 64];
|
|
||||||
let mut rng = StdRng::from_entropy();
|
|
||||||
rng.fill(&mut key);
|
|
||||||
Value::new_privatebinary(&key)
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Domain {
|
impl Domain {
|
||||||
/// Generates the cookie key for the domain.
|
/// Generates the cookie key for the domain.
|
||||||
fn modify_inner<T: Clone + std::fmt::Debug>(
|
fn modify_inner<T: Clone + std::fmt::Debug>(
|
||||||
|
@ -79,11 +70,14 @@ impl Domain {
|
||||||
&& e.attribute_equality(Attribute::Uuid, &PVUUID_DOMAIN_INFO)
|
&& e.attribute_equality(Attribute::Uuid, &PVUUID_DOMAIN_INFO)
|
||||||
{
|
{
|
||||||
// Validate the domain ldap basedn syntax.
|
// Validate the domain ldap basedn syntax.
|
||||||
if let Some(basedn) = e
|
if let Some(basedn) = e.get_ava_single_iutf8(Attribute::DomainLdapBasedn) {
|
||||||
.get_ava_single_iutf8(Attribute::DomainLdapBasedn) {
|
|
||||||
|
|
||||||
if !DOMAIN_LDAP_BASEDN_RE.is_match(basedn) {
|
if !DOMAIN_LDAP_BASEDN_RE.is_match(basedn) {
|
||||||
error!("Invalid {} '{}'. Must pass regex \"{}\"", Attribute::DomainLdapBasedn,basedn, *DOMAIN_LDAP_BASEDN_RE);
|
error!(
|
||||||
|
"Invalid {} '{}'. Must pass regex \"{}\"",
|
||||||
|
Attribute::DomainLdapBasedn,
|
||||||
|
basedn,
|
||||||
|
*DOMAIN_LDAP_BASEDN_RE
|
||||||
|
);
|
||||||
return Err(OperationError::InvalidState);
|
return Err(OperationError::InvalidState);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,39 +103,26 @@ impl Domain {
|
||||||
debug!("plugin_domain: NOT Applying domain version transform");
|
debug!("plugin_domain: NOT Applying domain version transform");
|
||||||
};
|
};
|
||||||
|
|
||||||
// create the domain_display_name if it's missing
|
// create the domain_display_name if it's missing. This was the behaviour in versions
|
||||||
if !e.attribute_pres(Attribute::DomainDisplayName) {
|
// prior to DL10. Rather than checking the domain version itself, the issue is we
|
||||||
let domain_display_name = Value::new_utf8(format!("Kanidm {}", qs.get_domain_name()));
|
// have to check the min remigration level. This is because during a server setup
|
||||||
security_info!("plugin_domain: setting default domain_display_name to {:?}", domain_display_name);
|
// we start from the MIN remigration level and work up, and the domain version == 0.
|
||||||
|
//
|
||||||
|
// So effectively we only skip setting this value after we know that we are at DL12
|
||||||
|
// since we could never go back to anything lower than 10 at that point.
|
||||||
|
if DOMAIN_MIN_REMIGRATION_LEVEL < DOMAIN_LEVEL_10
|
||||||
|
&& !e.attribute_pres(Attribute::DomainDisplayName)
|
||||||
|
{
|
||||||
|
let domain_display_name =
|
||||||
|
Value::new_utf8(format!("Kanidm {}", qs.get_domain_name()));
|
||||||
|
security_info!(
|
||||||
|
"plugin_domain: setting default domain_display_name to {:?}",
|
||||||
|
domain_display_name
|
||||||
|
);
|
||||||
|
|
||||||
e.set_ava(&Attribute::DomainDisplayName, once(domain_display_name));
|
e.set_ava(&Attribute::DomainDisplayName, once(domain_display_name));
|
||||||
}
|
}
|
||||||
|
|
||||||
if qs.get_domain_version() < DOMAIN_LEVEL_6 && !e.attribute_pres(Attribute::FernetPrivateKeyStr) {
|
|
||||||
security_info!("regenerating domain token encryption key");
|
|
||||||
let k = fernet::Fernet::generate_key();
|
|
||||||
let v = Value::new_secret_str(&k);
|
|
||||||
e.add_ava(Attribute::FernetPrivateKeyStr, v);
|
|
||||||
}
|
|
||||||
|
|
||||||
if qs.get_domain_version() < DOMAIN_LEVEL_6 && !e.attribute_pres(Attribute::Es256PrivateKeyDer) {
|
|
||||||
security_info!("regenerating domain es256 private key");
|
|
||||||
let der = JwsEs256Signer::generate_es256()
|
|
||||||
.and_then(|jws| jws.private_key_to_der())
|
|
||||||
.map_err(|e| {
|
|
||||||
admin_error!(err = ?e, "Unable to generate ES256 JwsSigner private key");
|
|
||||||
OperationError::CryptographyError
|
|
||||||
})?;
|
|
||||||
let v = Value::new_privatebinary(&der);
|
|
||||||
e.add_ava(Attribute::Es256PrivateKeyDer, v);
|
|
||||||
}
|
|
||||||
|
|
||||||
if qs.get_domain_version() < DOMAIN_LEVEL_6 && !e.attribute_pres(Attribute::PrivateCookieKey) {
|
|
||||||
security_info!("regenerating domain cookie key");
|
|
||||||
e.add_ava(Attribute::PrivateCookieKey, generate_domain_cookie_key());
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!(?e);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -62,10 +62,7 @@ pub const GID_UNUSED_D_MAX: u32 = 0x7fff_ffff;
|
||||||
|
|
||||||
pub struct GidNumber {}
|
pub struct GidNumber {}
|
||||||
|
|
||||||
fn apply_gidnumber<T: Clone>(
|
fn apply_gidnumber<T: Clone>(e: &mut Entry<EntryInvalid, T>) -> Result<(), OperationError> {
|
||||||
e: &mut Entry<EntryInvalid, T>,
|
|
||||||
domain_version: DomainVersion,
|
|
||||||
) -> Result<(), OperationError> {
|
|
||||||
if (e.attribute_equality(Attribute::Class, &EntryClass::PosixGroup.into())
|
if (e.attribute_equality(Attribute::Class, &EntryClass::PosixGroup.into())
|
||||||
|| e.attribute_equality(Attribute::Class, &EntryClass::PosixAccount.into()))
|
|| e.attribute_equality(Attribute::Class, &EntryClass::PosixAccount.into()))
|
||||||
&& !e.attribute_pres(Attribute::GidNumber)
|
&& !e.attribute_pres(Attribute::GidNumber)
|
||||||
|
@ -89,48 +86,33 @@ fn apply_gidnumber<T: Clone>(
|
||||||
e.set_ava(&Attribute::GidNumber, once(gid_v));
|
e.set_ava(&Attribute::GidNumber, once(gid_v));
|
||||||
Ok(())
|
Ok(())
|
||||||
} else if let Some(gid) = e.get_ava_single_uint32(Attribute::GidNumber) {
|
} else if let Some(gid) = e.get_ava_single_uint32(Attribute::GidNumber) {
|
||||||
if domain_version <= DOMAIN_LEVEL_6 {
|
// If they provided us with a gid number, ensure it's in a safe range.
|
||||||
if gid < GID_REGULAR_USER_MIN {
|
if (GID_REGULAR_USER_MIN..=GID_REGULAR_USER_MAX).contains(&gid)
|
||||||
error!(
|
|| (GID_UNUSED_A_MIN..=GID_UNUSED_A_MAX).contains(&gid)
|
||||||
"Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}",
|
|| (GID_UNUSED_B_MIN..= GID_UNUSED_B_MAX).contains(&gid)
|
||||||
gid,
|
|| (GID_UNUSED_C_MIN..=GID_UNUSED_C_MAX).contains(&gid)
|
||||||
GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX,
|
// We won't ever generate an id in the nspawn range, but we do secretly allow
|
||||||
GID_UNUSED_C_MIN, GID_UNUSED_C_MAX,
|
// it to be set for compatibility with services like freeipa or openldap. TBH
|
||||||
GID_UNUSED_D_MIN, GID_UNUSED_D_MAX
|
// most people don't even use systemd nspawn anyway ...
|
||||||
);
|
//
|
||||||
Err(OperationError::PL0001GidOverlapsSystemRange)
|
// I made this design choice to avoid a tunable that may confuse people to
|
||||||
} else {
|
// its purpose. This way things "just work" for imports and existing systems
|
||||||
Ok(())
|
// but we do the right thing in the future.
|
||||||
}
|
|| (GID_NSPAWN_MIN..=GID_NSPAWN_MAX).contains(&gid)
|
||||||
|
|| (GID_UNUSED_D_MIN..=GID_UNUSED_D_MAX).contains(&gid)
|
||||||
|
{
|
||||||
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
// If they provided us with a gid number, ensure it's in a safe range.
|
// Note that here we don't advertise that we allow the nspawn range to be set, even
|
||||||
if (GID_REGULAR_USER_MIN..=GID_REGULAR_USER_MAX).contains(&gid)
|
// though we do allow it.
|
||||||
|| (GID_UNUSED_A_MIN..=GID_UNUSED_A_MAX).contains(&gid)
|
error!(
|
||||||
|| (GID_UNUSED_B_MIN..= GID_UNUSED_B_MAX).contains(&gid)
|
"Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}",
|
||||||
|| (GID_UNUSED_C_MIN..=GID_UNUSED_C_MAX).contains(&gid)
|
gid,
|
||||||
// We won't ever generate an id in the nspawn range, but we do secretly allow
|
GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX,
|
||||||
// it to be set for compatibility with services like freeipa or openldap. TBH
|
GID_UNUSED_C_MIN, GID_UNUSED_C_MAX,
|
||||||
// most people don't even use systemd nspawn anyway ...
|
GID_UNUSED_D_MIN, GID_UNUSED_D_MAX
|
||||||
//
|
);
|
||||||
// I made this design choice to avoid a tunable that may confuse people to
|
Err(OperationError::PL0001GidOverlapsSystemRange)
|
||||||
// its purpose. This way things "just work" for imports and existing systems
|
|
||||||
// but we do the right thing in the future.
|
|
||||||
|| (GID_NSPAWN_MIN..=GID_NSPAWN_MAX).contains(&gid)
|
|
||||||
|| (GID_UNUSED_D_MIN..=GID_UNUSED_D_MAX).contains(&gid)
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
// Note that here we don't advertise that we allow the nspawn range to be set, even
|
|
||||||
// though we do allow it.
|
|
||||||
error!(
|
|
||||||
"Requested GID ({}) overlaps a system range. Allowed ranges are {} to {}, {} to {} and {} to {}",
|
|
||||||
gid,
|
|
||||||
GID_REGULAR_USER_MIN, GID_REGULAR_USER_MAX,
|
|
||||||
GID_UNUSED_C_MIN, GID_UNUSED_C_MAX,
|
|
||||||
GID_UNUSED_D_MIN, GID_UNUSED_D_MAX
|
|
||||||
);
|
|
||||||
Err(OperationError::PL0001GidOverlapsSystemRange)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -144,37 +126,31 @@ impl Plugin for GidNumber {
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "gidnumber_pre_create_transform", skip_all)]
|
#[instrument(level = "debug", name = "gidnumber_pre_create_transform", skip_all)]
|
||||||
fn pre_create_transform(
|
fn pre_create_transform(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
_qs: &mut QueryServerWriteTransaction,
|
||||||
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
|
cand: &mut Vec<Entry<EntryInvalid, EntryNew>>,
|
||||||
_ce: &CreateEvent,
|
_ce: &CreateEvent,
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
let dv = qs.get_domain_version();
|
cand.iter_mut().try_for_each(apply_gidnumber)
|
||||||
cand.iter_mut()
|
|
||||||
.try_for_each(|cand| apply_gidnumber(cand, dv))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "gidnumber_pre_modify", skip_all)]
|
#[instrument(level = "debug", name = "gidnumber_pre_modify", skip_all)]
|
||||||
fn pre_modify(
|
fn pre_modify(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
_qs: &mut QueryServerWriteTransaction,
|
||||||
_pre_cand: &[Arc<EntrySealedCommitted>],
|
_pre_cand: &[Arc<EntrySealedCommitted>],
|
||||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||||
_me: &ModifyEvent,
|
_me: &ModifyEvent,
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
let dv = qs.get_domain_version();
|
cand.iter_mut().try_for_each(apply_gidnumber)
|
||||||
cand.iter_mut()
|
|
||||||
.try_for_each(|cand| apply_gidnumber(cand, dv))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "gidnumber_pre_batch_modify", skip_all)]
|
#[instrument(level = "debug", name = "gidnumber_pre_batch_modify", skip_all)]
|
||||||
fn pre_batch_modify(
|
fn pre_batch_modify(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
_qs: &mut QueryServerWriteTransaction,
|
||||||
_pre_cand: &[Arc<EntrySealedCommitted>],
|
_pre_cand: &[Arc<EntrySealedCommitted>],
|
||||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||||
_me: &BatchModifyEvent,
|
_me: &BatchModifyEvent,
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
let dv = qs.get_domain_version();
|
cand.iter_mut().try_for_each(apply_gidnumber)
|
||||||
cand.iter_mut()
|
|
||||||
.try_for_each(|cand| apply_gidnumber(cand, dv))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,9 +162,7 @@ mod tests {
|
||||||
};
|
};
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
use kanidm_proto::internal::DomainUpgradeCheckStatus as ProtoDomainUpgradeCheckStatus;
|
#[qs_test]
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_7)]
|
|
||||||
async fn test_gidnumber_generate(server: &QueryServer) {
|
async fn test_gidnumber_generate(server: &QueryServer) {
|
||||||
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
||||||
|
|
||||||
|
@ -423,85 +397,4 @@ mod tests {
|
||||||
|
|
||||||
assert!(server_txn.commit().is_ok());
|
assert!(server_txn.commit().is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_6)]
|
|
||||||
async fn test_gidnumber_domain_level_6(server: &QueryServer) {
|
|
||||||
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
|
||||||
|
|
||||||
// This will be INVALID in DL 7 but it's allowed for DL6
|
|
||||||
let user_a_uuid = uuid!("d90fb0cb-6785-4f36-94cb-e364d9c13255");
|
|
||||||
{
|
|
||||||
let op_result = server_txn.internal_create(vec![entry_init!(
|
|
||||||
(Attribute::Class, EntryClass::Account.to_value()),
|
|
||||||
(Attribute::Class, EntryClass::PosixAccount.to_value()),
|
|
||||||
(Attribute::Name, Value::new_iname("testperson_2")),
|
|
||||||
(Attribute::Uuid, Value::Uuid(user_a_uuid)),
|
|
||||||
// NOTE HERE: We do GID_UNUSED_A_MIN minus 1 which isn't accepted
|
|
||||||
// on DL7
|
|
||||||
(Attribute::GidNumber, Value::Uint32(GID_UNUSED_A_MIN - 1)),
|
|
||||||
(Attribute::Description, Value::new_utf8s("testperson")),
|
|
||||||
(Attribute::DisplayName, Value::new_utf8s("testperson"))
|
|
||||||
)]);
|
|
||||||
|
|
||||||
assert!(op_result.is_ok());
|
|
||||||
|
|
||||||
let user_a = server_txn
|
|
||||||
.internal_search_uuid(user_a_uuid)
|
|
||||||
.expect("Unable to access user");
|
|
||||||
|
|
||||||
let user_a_uid = user_a
|
|
||||||
.get_ava_single_uint32(Attribute::GidNumber)
|
|
||||||
.expect("gidnumber not present on account");
|
|
||||||
|
|
||||||
assert_eq!(user_a_uid, GID_UNUSED_A_MIN - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(server_txn.commit().is_ok());
|
|
||||||
|
|
||||||
// Now, do the DL6 upgrade check - will FAIL because the above user has an invalid ID.
|
|
||||||
let mut server_txn = server.read().await.unwrap();
|
|
||||||
|
|
||||||
let check_item = server_txn
|
|
||||||
.domain_upgrade_check_6_to_7_gidnumber()
|
|
||||||
.expect("Failed to perform migration check.");
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
check_item.status,
|
|
||||||
ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber
|
|
||||||
);
|
|
||||||
|
|
||||||
drop(server_txn);
|
|
||||||
|
|
||||||
let mut server_txn = server.write(duration_from_epoch_now()).await.expect("txn");
|
|
||||||
|
|
||||||
// Test rejection of important gid values.
|
|
||||||
let user_b_uuid = uuid!("33afc396-2434-47e5-b143-05176148b50e");
|
|
||||||
// Test that an entry when modified to have posix attributes, if a gidnumber
|
|
||||||
// is provided then it is respected.
|
|
||||||
{
|
|
||||||
let op_result = server_txn.internal_create(vec![entry_init!(
|
|
||||||
(Attribute::Class, EntryClass::Account.to_value()),
|
|
||||||
(Attribute::Class, EntryClass::Person.to_value()),
|
|
||||||
(Attribute::Name, Value::new_iname("testperson_6")),
|
|
||||||
(Attribute::Uuid, Value::Uuid(user_b_uuid)),
|
|
||||||
(Attribute::Description, Value::new_utf8s("testperson")),
|
|
||||||
(Attribute::DisplayName, Value::new_utf8s("testperson"))
|
|
||||||
)]);
|
|
||||||
|
|
||||||
assert!(op_result.is_ok());
|
|
||||||
|
|
||||||
for id in [0, 500, GID_REGULAR_USER_MIN - 1] {
|
|
||||||
let modlist = modlist!([
|
|
||||||
m_pres(Attribute::Class, &EntryClass::PosixAccount.to_value()),
|
|
||||||
m_pres(Attribute::GidNumber, &Value::Uint32(id))
|
|
||||||
]);
|
|
||||||
let op_result = server_txn.internal_modify_uuid(user_b_uuid, &modlist);
|
|
||||||
|
|
||||||
trace!(?id);
|
|
||||||
assert_eq!(op_result, Err(OperationError::PL0001GidOverlapsSystemRange));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert!(server_txn.commit().is_ok());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,7 @@ impl Plugin for JwsKeygen {
|
||||||
|
|
||||||
impl JwsKeygen {
|
impl JwsKeygen {
|
||||||
fn modify_inner<T: Clone>(
|
fn modify_inner<T: Clone>(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
_qs: &mut QueryServerWriteTransaction,
|
||||||
cand: &mut [Entry<EntryInvalid, T>],
|
cand: &mut [Entry<EntryInvalid, T>],
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
cand.iter_mut().try_for_each(|e| {
|
cand.iter_mut().try_for_each(|e| {
|
||||||
|
@ -88,20 +88,6 @@ impl JwsKeygen {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if qs.get_domain_version() < DOMAIN_LEVEL_6 &&
|
|
||||||
(e.attribute_equality(Attribute::Class, &EntryClass::ServiceAccount.into()) ||
|
|
||||||
e.attribute_equality(Attribute::Class, &EntryClass::SyncAccount.into())) &&
|
|
||||||
!e.attribute_pres(Attribute::JwsEs256PrivateKey) {
|
|
||||||
security_info!("regenerating jws es256 private key");
|
|
||||||
let jwssigner = JwsEs256Signer::generate_es256()
|
|
||||||
.map_err(|e| {
|
|
||||||
admin_error!(err = ?e, "Unable to generate ES256 JwsSigner private key");
|
|
||||||
OperationError::CryptographyError
|
|
||||||
})?;
|
|
||||||
let v = Value::JwsKeyEs256(jwssigner);
|
|
||||||
e.add_ava(Attribute::JwsEs256PrivateKey, v);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ impl Plugin for ValueDeny {
|
||||||
"plugin_value_deny"
|
"plugin_value_deny"
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "base_pre_create_transform", skip_all)]
|
#[instrument(level = "debug", name = "denied_names_pre_create_transform", skip_all)]
|
||||||
#[allow(clippy::cognitive_complexity)]
|
#[allow(clippy::cognitive_complexity)]
|
||||||
fn pre_create_transform(
|
fn pre_create_transform(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
qs: &mut QueryServerWriteTransaction,
|
||||||
|
@ -19,9 +19,25 @@ impl Plugin for ValueDeny {
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
let denied_names = qs.denied_names();
|
let denied_names = qs.denied_names();
|
||||||
|
|
||||||
|
if denied_names.is_empty() {
|
||||||
|
// Nothing to check.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let mut pass = true;
|
let mut pass = true;
|
||||||
|
|
||||||
for entry in cand {
|
for entry in cand {
|
||||||
|
// If the entry doesn't have a uuid, it's invalid anyway and will fail schema.
|
||||||
|
if let Some(e_uuid) = entry.get_uuid() {
|
||||||
|
// SAFETY - Thanks to JpWarren blowing his nipper clean off, we need to
|
||||||
|
// assert that the break glass and system accounts are NOT subject to
|
||||||
|
// this process.
|
||||||
|
if e_uuid < DYNAMIC_RANGE_MINIMUM_UUID {
|
||||||
|
// These entries are exempt
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if let Some(name) = entry.get_ava_single_iname(Attribute::Name) {
|
if let Some(name) = entry.get_ava_single_iname(Attribute::Name) {
|
||||||
if denied_names.contains(name) {
|
if denied_names.contains(name) {
|
||||||
pass = false;
|
pass = false;
|
||||||
|
@ -37,27 +53,24 @@ impl Plugin for ValueDeny {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "base_pre_modify", skip_all)]
|
|
||||||
fn pre_modify(
|
fn pre_modify(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
qs: &mut QueryServerWriteTransaction,
|
||||||
_pre_cand: &[Arc<EntrySealedCommitted>],
|
pre_cand: &[Arc<EntrySealedCommitted>],
|
||||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||||
_me: &ModifyEvent,
|
_me: &ModifyEvent,
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
Self::modify(qs, cand)
|
Self::modify(qs, pre_cand, cand)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "base_pre_modify", skip_all)]
|
|
||||||
fn pre_batch_modify(
|
fn pre_batch_modify(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
qs: &mut QueryServerWriteTransaction,
|
||||||
_pre_cand: &[Arc<EntrySealedCommitted>],
|
pre_cand: &[Arc<EntrySealedCommitted>],
|
||||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
||||||
_me: &BatchModifyEvent,
|
_me: &BatchModifyEvent,
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
Self::modify(qs, cand)
|
Self::modify(qs, pre_cand, cand)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(level = "debug", name = "base::verify", skip_all)]
|
|
||||||
fn verify(qs: &mut QueryServerReadTransaction) -> Vec<Result<(), ConsistencyError>> {
|
fn verify(qs: &mut QueryServerReadTransaction) -> Vec<Result<(), ConsistencyError>> {
|
||||||
let denied_names = qs.denied_names().clone();
|
let denied_names = qs.denied_names().clone();
|
||||||
|
|
||||||
|
@ -68,7 +81,15 @@ impl Plugin for ValueDeny {
|
||||||
match qs.internal_search(filt) {
|
match qs.internal_search(filt) {
|
||||||
Ok(entries) => {
|
Ok(entries) => {
|
||||||
for entry in entries {
|
for entry in entries {
|
||||||
results.push(Err(ConsistencyError::DeniedName(entry.get_uuid())));
|
let e_uuid = entry.get_uuid();
|
||||||
|
// SAFETY - Thanks to JpWarren blowing his nipper clean off, we need to
|
||||||
|
// assert that the break glass accounts are NOT subject to this process.
|
||||||
|
if e_uuid < DYNAMIC_RANGE_MINIMUM_UUID {
|
||||||
|
// These entries are exempt
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
results.push(Err(ConsistencyError::DeniedName(e_uuid)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
|
@ -83,17 +104,37 @@ impl Plugin for ValueDeny {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ValueDeny {
|
impl ValueDeny {
|
||||||
|
#[instrument(level = "debug", name = "denied_names_modify", skip_all)]
|
||||||
fn modify(
|
fn modify(
|
||||||
qs: &mut QueryServerWriteTransaction,
|
qs: &mut QueryServerWriteTransaction,
|
||||||
cand: &mut Vec<Entry<EntryInvalid, EntryCommitted>>,
|
pre_cand: &[Arc<EntrySealedCommitted>],
|
||||||
|
cand: &mut [EntryInvalidCommitted],
|
||||||
) -> Result<(), OperationError> {
|
) -> Result<(), OperationError> {
|
||||||
let denied_names = qs.denied_names();
|
let denied_names = qs.denied_names();
|
||||||
|
|
||||||
|
if denied_names.is_empty() {
|
||||||
|
// Nothing to check.
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
let mut pass = true;
|
let mut pass = true;
|
||||||
|
|
||||||
for entry in cand {
|
for (pre_entry, post_entry) in pre_cand.iter().zip(cand.iter()) {
|
||||||
if let Some(name) = entry.get_ava_single_iname(Attribute::Name) {
|
// If the entry doesn't have a uuid, it's invalid anyway and will fail schema.
|
||||||
if denied_names.contains(name) {
|
let e_uuid = pre_entry.get_uuid();
|
||||||
|
// SAFETY - Thanks to JpWarren blowing his nipper clean off, we need to
|
||||||
|
// assert that the break glass accounts are NOT subject to this process.
|
||||||
|
if e_uuid < DYNAMIC_RANGE_MINIMUM_UUID {
|
||||||
|
// These entries are exempt
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let pre_name = pre_entry.get_ava_single_iname(Attribute::Name);
|
||||||
|
let post_name = post_entry.get_ava_single_iname(Attribute::Name);
|
||||||
|
|
||||||
|
if let Some(name) = post_name {
|
||||||
|
// Only if the name is changing, and is denied.
|
||||||
|
if pre_name != post_name && denied_names.contains(name) {
|
||||||
pass = false;
|
pass = false;
|
||||||
error!(?name, "name denied by system configuration");
|
error!(?name, "name denied by system configuration");
|
||||||
}
|
}
|
||||||
|
@ -117,10 +158,10 @@ mod tests {
|
||||||
|
|
||||||
let me_inv_m = ModifyEvent::new_internal_invalid(
|
let me_inv_m = ModifyEvent::new_internal_invalid(
|
||||||
filter!(f_eq(Attribute::Uuid, PVUUID_SYSTEM_CONFIG.clone())),
|
filter!(f_eq(Attribute::Uuid, PVUUID_SYSTEM_CONFIG.clone())),
|
||||||
ModifyList::new_list(vec![Modify::Present(
|
ModifyList::new_list(vec![
|
||||||
Attribute::DeniedName,
|
Modify::Present(Attribute::DeniedName, Value::new_iname("tobias")),
|
||||||
Value::new_iname("tobias"),
|
Modify::Present(Attribute::DeniedName, Value::new_iname("ellie")),
|
||||||
)]),
|
]),
|
||||||
);
|
);
|
||||||
assert!(server_txn.modify(&me_inv_m).is_ok());
|
assert!(server_txn.modify(&me_inv_m).is_ok());
|
||||||
|
|
||||||
|
@ -148,30 +189,103 @@ mod tests {
|
||||||
|
|
||||||
#[qs_test]
|
#[qs_test]
|
||||||
async fn test_valuedeny_modify(server: &QueryServer) {
|
async fn test_valuedeny_modify(server: &QueryServer) {
|
||||||
setup_name_deny(server).await;
|
// Create an entry that has a name which will become denied to test how it
|
||||||
|
// interacts.
|
||||||
let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
let t_uuid = Uuid::new_v4();
|
let e_uuid = Uuid::new_v4();
|
||||||
assert!(server_txn
|
assert!(server_txn
|
||||||
.internal_create(vec![entry_init!(
|
.internal_create(vec![entry_init!(
|
||||||
(Attribute::Class, EntryClass::Object.to_value()),
|
(Attribute::Class, EntryClass::Object.to_value()),
|
||||||
(Attribute::Class, EntryClass::Account.to_value()),
|
(Attribute::Class, EntryClass::Account.to_value()),
|
||||||
(Attribute::Class, EntryClass::Person.to_value()),
|
(Attribute::Class, EntryClass::Person.to_value()),
|
||||||
(Attribute::Name, Value::new_iname("newname")),
|
(Attribute::Name, Value::new_iname("ellie")),
|
||||||
(Attribute::Uuid, Value::Uuid(t_uuid)),
|
(Attribute::Uuid, Value::Uuid(e_uuid)),
|
||||||
(Attribute::Description, Value::new_utf8s("Tobias")),
|
(Attribute::Description, Value::new_utf8s("Ellie Meow")),
|
||||||
(Attribute::DisplayName, Value::new_utf8s("Tobias"))
|
(Attribute::DisplayName, Value::new_utf8s("Ellie Meow"))
|
||||||
),])
|
),])
|
||||||
.is_ok());
|
.is_ok());
|
||||||
|
|
||||||
// Now mod it
|
assert!(server_txn.commit().is_ok());
|
||||||
|
|
||||||
|
setup_name_deny(server).await;
|
||||||
|
|
||||||
|
let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
// Attempt to mod ellie.
|
||||||
|
|
||||||
|
// Can mod a different attribute
|
||||||
assert!(server_txn
|
assert!(server_txn
|
||||||
.internal_modify_uuid(
|
.internal_modify_uuid(
|
||||||
t_uuid,
|
e_uuid,
|
||||||
|
&ModifyList::new_purge_and_set(Attribute::DisplayName, Value::new_utf8s("tobias"))
|
||||||
|
)
|
||||||
|
.is_ok());
|
||||||
|
|
||||||
|
// Can't mod to another invalid name.
|
||||||
|
assert!(server_txn
|
||||||
|
.internal_modify_uuid(
|
||||||
|
e_uuid,
|
||||||
&ModifyList::new_purge_and_set(Attribute::Name, Value::new_iname("tobias"))
|
&ModifyList::new_purge_and_set(Attribute::Name, Value::new_iname("tobias"))
|
||||||
)
|
)
|
||||||
.is_err());
|
.is_err());
|
||||||
|
|
||||||
|
// Can mod to a valid name.
|
||||||
|
assert!(server_txn
|
||||||
|
.internal_modify_uuid(
|
||||||
|
e_uuid,
|
||||||
|
&ModifyList::new_purge_and_set(
|
||||||
|
Attribute::Name,
|
||||||
|
Value::new_iname("miss_meowington")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.is_ok());
|
||||||
|
|
||||||
|
// Now mod from the valid name to an invalid one.
|
||||||
|
assert!(server_txn
|
||||||
|
.internal_modify_uuid(
|
||||||
|
e_uuid,
|
||||||
|
&ModifyList::new_purge_and_set(Attribute::Name, Value::new_iname("tobias"))
|
||||||
|
)
|
||||||
|
.is_err());
|
||||||
|
|
||||||
|
assert!(server_txn.commit().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[qs_test]
|
||||||
|
async fn test_valuedeny_jpwarren_special(server: &QueryServer) {
|
||||||
|
// Assert that our break glass accounts are exempt from this processing.
|
||||||
|
let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
let me_inv_m = ModifyEvent::new_internal_invalid(
|
||||||
|
filter!(f_eq(Attribute::Uuid, PVUUID_SYSTEM_CONFIG.clone())),
|
||||||
|
ModifyList::new_list(vec![
|
||||||
|
Modify::Present(Attribute::DeniedName, Value::new_iname("admin")),
|
||||||
|
Modify::Present(Attribute::DeniedName, Value::new_iname("idm_admin")),
|
||||||
|
]),
|
||||||
|
);
|
||||||
|
assert!(server_txn.modify(&me_inv_m).is_ok());
|
||||||
|
assert!(server_txn.commit().is_ok());
|
||||||
|
|
||||||
|
let mut server_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
assert!(server_txn
|
||||||
|
.internal_modify_uuid(
|
||||||
|
UUID_IDM_ADMIN,
|
||||||
|
&ModifyList::new_purge_and_set(
|
||||||
|
Attribute::DisplayName,
|
||||||
|
Value::new_utf8s("Idm Admin")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.is_ok());
|
||||||
|
|
||||||
|
assert!(server_txn
|
||||||
|
.internal_modify_uuid(
|
||||||
|
UUID_ADMIN,
|
||||||
|
&ModifyList::new_purge_and_set(Attribute::DisplayName, Value::new_utf8s("Admin"))
|
||||||
|
)
|
||||||
|
.is_ok());
|
||||||
|
|
||||||
|
assert!(server_txn.commit().is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[qs_test]
|
#[qs_test]
|
||||||
|
|
|
@ -158,7 +158,7 @@ impl QueryServer {
|
||||||
|
|
||||||
// If we are new enough to support patches, and we are lower than the target patch level
|
// If we are new enough to support patches, and we are lower than the target patch level
|
||||||
// then a reload will be applied after we raise the patch level.
|
// then a reload will be applied after we raise the patch level.
|
||||||
if domain_target_level >= DOMAIN_LEVEL_7 && domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
|
if domain_patch_level < DOMAIN_TGT_PATCH_LEVEL {
|
||||||
write_txn
|
write_txn
|
||||||
.internal_modify_uuid(
|
.internal_modify_uuid(
|
||||||
UUID_DOMAIN_INFO,
|
UUID_DOMAIN_INFO,
|
||||||
|
@ -294,346 +294,6 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Migration domain level 6 to 7
|
|
||||||
#[instrument(level = "info", skip_all)]
|
|
||||||
pub(crate) fn migrate_domain_6_to_7(&mut self) -> Result<(), OperationError> {
|
|
||||||
if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_7 {
|
|
||||||
error!("Unable to raise domain level from 6 to 7.");
|
|
||||||
return Err(OperationError::MG0004DomainLevelInDevelopment);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============== Apply constraints ===============
|
|
||||||
|
|
||||||
// Due to changes in gidnumber allocation, in the *extremely* unlikely
|
|
||||||
// case that a user's ID was generated outside the valid range, we re-request
|
|
||||||
// the creation of their gid number to proceed.
|
|
||||||
let filter = filter!(f_and!([
|
|
||||||
f_or!([
|
|
||||||
f_eq(Attribute::Class, EntryClass::PosixAccount.into()),
|
|
||||||
f_eq(Attribute::Class, EntryClass::PosixGroup.into())
|
|
||||||
]),
|
|
||||||
// This logic gets a bit messy but it would be:
|
|
||||||
// If ! (
|
|
||||||
// (GID_REGULAR_USER_MIN < value < GID_REGULAR_USER_MAX) ||
|
|
||||||
// (GID_UNUSED_A_MIN < value < GID_UNUSED_A_MAX) ||
|
|
||||||
// (GID_UNUSED_B_MIN < value < GID_UNUSED_B_MAX) ||
|
|
||||||
// (GID_UNUSED_C_MIN < value < GID_UNUSED_D_MAX)
|
|
||||||
// )
|
|
||||||
f_andnot(f_or!([
|
|
||||||
f_and!([
|
|
||||||
// The gid value must be less than GID_REGULAR_USER_MAX
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MAX)
|
|
||||||
),
|
|
||||||
// This bit of mental gymnastics is "greater than".
|
|
||||||
// The gid value must not be less than USER_MIN
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
f_and!([
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MAX)
|
|
||||||
),
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
f_and!([
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MAX)
|
|
||||||
),
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
// If both of these conditions are true we get:
|
|
||||||
// C_MIN < value < D_MAX, which the outer and-not inverts.
|
|
||||||
f_and!([
|
|
||||||
// The gid value must be less than GID_UNUSED_D_MAX
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_D_MAX)
|
|
||||||
),
|
|
||||||
// This bit of mental gymnastics is "greater than".
|
|
||||||
// The gid value must not be less than C_MIN
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_C_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
]))
|
|
||||||
]));
|
|
||||||
|
|
||||||
let results = self.internal_search(filter).map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_6_to_7 -> Error");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !results.is_empty() {
|
|
||||||
error!("Unable to proceed. Not all entries meet gid/uid constraints.");
|
|
||||||
for entry in results {
|
|
||||||
error!(gid_invalid = ?entry.get_display_id());
|
|
||||||
}
|
|
||||||
return Err(OperationError::MG0005GidConstraintsNotMet);
|
|
||||||
}
|
|
||||||
|
|
||||||
// =========== Apply changes ==============
|
|
||||||
|
|
||||||
// For each oauth2 client, if it is missing a landing page then we clone the origin
|
|
||||||
// into landing. This is because previously we implied the landing to be origin if
|
|
||||||
// unset, but now landing is the primary url and implies an origin.
|
|
||||||
let filter = filter!(f_and!([
|
|
||||||
f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
|
|
||||||
f_pres(Attribute::OAuth2RsOrigin),
|
|
||||||
f_andnot(f_pres(Attribute::OAuth2RsOriginLanding)),
|
|
||||||
]));
|
|
||||||
|
|
||||||
let pre_candidates = self.internal_search(filter).map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_6_to_7 internal search failure");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let modset: Vec<_> = pre_candidates
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|ent| {
|
|
||||||
ent.get_ava_single_url(Attribute::OAuth2RsOrigin)
|
|
||||||
.map(|origin_url| {
|
|
||||||
// Copy the origin url to the landing.
|
|
||||||
let modlist = vec![Modify::Present(
|
|
||||||
Attribute::OAuth2RsOriginLanding,
|
|
||||||
Value::Url(origin_url.clone()),
|
|
||||||
)];
|
|
||||||
|
|
||||||
(ent.get_uuid(), ModifyList::new_list(modlist))
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// If there is nothing, we don't need to do anything.
|
|
||||||
if !modset.is_empty() {
|
|
||||||
self.internal_batch_modify(modset.into_iter())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do this before schema change since domain info has cookie key
|
|
||||||
// as may at this point.
|
|
||||||
//
|
|
||||||
// Domain info should have the attribute private cookie key removed.
|
|
||||||
let modlist = ModifyList::new_list(vec![
|
|
||||||
Modify::Purged(Attribute::PrivateCookieKey),
|
|
||||||
Modify::Purged(Attribute::Es256PrivateKeyDer),
|
|
||||||
Modify::Purged(Attribute::FernetPrivateKeyStr),
|
|
||||||
]);
|
|
||||||
|
|
||||||
self.internal_modify_uuid(UUID_DOMAIN_INFO, &modlist)?;
|
|
||||||
|
|
||||||
let filter = filter!(f_or!([
|
|
||||||
f_eq(Attribute::Class, EntryClass::ServiceAccount.into()),
|
|
||||||
f_eq(Attribute::Class, EntryClass::SyncAccount.into())
|
|
||||||
]));
|
|
||||||
|
|
||||||
let modlist = ModifyList::new_list(vec![Modify::Purged(Attribute::JwsEs256PrivateKey)]);
|
|
||||||
|
|
||||||
self.internal_modify(&filter, &modlist)?;
|
|
||||||
|
|
||||||
// Now update schema
|
|
||||||
let idm_schema_classes = [
|
|
||||||
SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_REFERS_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_CERTIFICATE_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_OAUTH2_RS_ORIGIN_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_OAUTH2_STRICT_REDIRECT_URI_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_MAIL_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_LEGALNAME_DL7.clone().into(),
|
|
||||||
SCHEMA_ATTR_DISPLAYNAME_DL7.clone().into(),
|
|
||||||
SCHEMA_CLASS_DOMAIN_INFO_DL7.clone().into(),
|
|
||||||
SCHEMA_CLASS_SERVICE_ACCOUNT_DL7.clone().into(),
|
|
||||||
SCHEMA_CLASS_SYNC_ACCOUNT_DL7.clone().into(),
|
|
||||||
SCHEMA_CLASS_CLIENT_CERTIFICATE_DL7.clone().into(),
|
|
||||||
SCHEMA_CLASS_OAUTH2_RS_DL7.clone().into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
idm_schema_classes
|
|
||||||
.into_iter()
|
|
||||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_6_to_7 -> Error");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.reload()?;
|
|
||||||
|
|
||||||
// Update access controls
|
|
||||||
let idm_data = [
|
|
||||||
BUILTIN_GROUP_PEOPLE_SELF_NAME_WRITE_DL7
|
|
||||||
.clone()
|
|
||||||
.try_into()?,
|
|
||||||
IDM_PEOPLE_SELF_MAIL_WRITE_DL7.clone().try_into()?,
|
|
||||||
BUILTIN_GROUP_CLIENT_CERTIFICATE_ADMINS_DL7
|
|
||||||
.clone()
|
|
||||||
.try_into()?,
|
|
||||||
IDM_HIGH_PRIVILEGE_DL7.clone().try_into()?,
|
|
||||||
];
|
|
||||||
|
|
||||||
idm_data
|
|
||||||
.into_iter()
|
|
||||||
.try_for_each(|entry| {
|
|
||||||
self.internal_migrate_or_create_ignore_attrs(entry, &[Attribute::Member])
|
|
||||||
})
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_6_to_7 -> Error");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let idm_data = [
|
|
||||||
IDM_ACP_SELF_WRITE_DL7.clone().into(),
|
|
||||||
IDM_ACP_SELF_NAME_WRITE_DL7.clone().into(),
|
|
||||||
IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone().into(),
|
|
||||||
IDM_ACP_OAUTH2_MANAGE_DL7.clone().into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
idm_data
|
|
||||||
.into_iter()
|
|
||||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_6_to_7 -> Error");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Patch Application - This triggers a one-shot fixup task for issue #2756
|
|
||||||
/// to correct the content of dyngroups after the dyngroups are now loaded.
|
|
||||||
#[instrument(level = "info", skip_all)]
|
|
||||||
pub(crate) fn migrate_domain_patch_level_1(&mut self) -> Result<(), OperationError> {
|
|
||||||
admin_warn!("applying domain patch 1.");
|
|
||||||
|
|
||||||
debug_assert!(*self.phase >= ServerPhase::SchemaReady);
|
|
||||||
|
|
||||||
let filter = filter!(f_eq(Attribute::Class, EntryClass::DynGroup.into()));
|
|
||||||
let modlist = modlist!([m_pres(Attribute::Class, &EntryClass::DynGroup.into())]);
|
|
||||||
|
|
||||||
self.internal_modify(&filter, &modlist).map(|()| {
|
|
||||||
info!("forced dyngroups to re-calculate memberships");
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Migration domain level 7 to 8
|
|
||||||
#[instrument(level = "info", skip_all)]
|
|
||||||
pub(crate) fn migrate_domain_7_to_8(&mut self) -> Result<(), OperationError> {
|
|
||||||
if !cfg!(test) && DOMAIN_MAX_LEVEL < DOMAIN_LEVEL_8 {
|
|
||||||
error!("Unable to raise domain level from 7 to 8.");
|
|
||||||
return Err(OperationError::MG0004DomainLevelInDevelopment);
|
|
||||||
}
|
|
||||||
|
|
||||||
// ============== Apply constraints ===============
|
|
||||||
let filter = filter!(f_and!([
|
|
||||||
f_eq(Attribute::Class, EntryClass::Account.into()),
|
|
||||||
f_pres(Attribute::PrimaryCredential),
|
|
||||||
]));
|
|
||||||
|
|
||||||
let results = self.internal_search(filter)?;
|
|
||||||
|
|
||||||
let affected_entries = results
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|entry| {
|
|
||||||
if entry
|
|
||||||
.get_ava_single_credential(Attribute::PrimaryCredential)
|
|
||||||
.map(|cred| cred.has_securitykey())
|
|
||||||
.unwrap_or_default()
|
|
||||||
{
|
|
||||||
Some(entry.get_display_id())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
if !affected_entries.is_empty() {
|
|
||||||
error!("Unable to proceed. Some accounts still use legacy security keys, which need to be removed.");
|
|
||||||
for sk_present in affected_entries {
|
|
||||||
error!(%sk_present);
|
|
||||||
}
|
|
||||||
return Err(OperationError::MG0006SKConstraintsNotMet);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check oauth2 strict uri
|
|
||||||
let filter = filter!(f_and!([
|
|
||||||
f_eq(Attribute::Class, EntryClass::OAuth2ResourceServer.into()),
|
|
||||||
f_andnot(f_pres(Attribute::OAuth2StrictRedirectUri)),
|
|
||||||
]));
|
|
||||||
|
|
||||||
let results = self.internal_search(filter)?;
|
|
||||||
|
|
||||||
let affected_entries = results
|
|
||||||
.into_iter()
|
|
||||||
.map(|entry| entry.get_display_id())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
if !affected_entries.is_empty() {
|
|
||||||
error!("Unable to proceed. Not all oauth2 clients have strict redirect verification enabled.");
|
|
||||||
for missing_oauth2_strict_redirect_uri in affected_entries {
|
|
||||||
error!(%missing_oauth2_strict_redirect_uri);
|
|
||||||
}
|
|
||||||
return Err(OperationError::MG0007Oauth2StrictConstraintsNotMet);
|
|
||||||
}
|
|
||||||
|
|
||||||
// =========== Apply changes ==============
|
|
||||||
|
|
||||||
let idm_schema_classes = [
|
|
||||||
SCHEMA_ATTR_LINKED_GROUP_DL8.clone().into(),
|
|
||||||
SCHEMA_ATTR_APPLICATION_PASSWORD_DL8.clone().into(),
|
|
||||||
SCHEMA_CLASS_APPLICATION_DL8.clone().into(),
|
|
||||||
SCHEMA_CLASS_PERSON_DL8.clone().into(),
|
|
||||||
SCHEMA_CLASS_DOMAIN_INFO_DL8.clone().into(),
|
|
||||||
SCHEMA_ATTR_ALLOW_PRIMARY_CRED_FALLBACK_DL8.clone().into(),
|
|
||||||
SCHEMA_CLASS_ACCOUNT_POLICY_DL8.clone().into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
idm_schema_classes
|
|
||||||
.into_iter()
|
|
||||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_6_to_7 -> Error");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.reload()?;
|
|
||||||
|
|
||||||
// Update access controls.
|
|
||||||
let idm_data = [
|
|
||||||
BUILTIN_GROUP_APPLICATION_ADMINS.clone().try_into()?,
|
|
||||||
IDM_ACP_SELF_READ_DL8.clone().into(),
|
|
||||||
IDM_ACP_SELF_WRITE_DL8.clone().into(),
|
|
||||||
IDM_ACP_APPLICATION_MANAGE_DL8.clone().into(),
|
|
||||||
IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone().into(),
|
|
||||||
// Add the new types for mail server
|
|
||||||
BUILTIN_GROUP_MAIL_SERVICE_ADMINS_DL8.clone().try_into()?,
|
|
||||||
BUILTIN_IDM_MAIL_SERVERS_DL8.clone().try_into()?,
|
|
||||||
IDM_ACP_MAIL_SERVERS_DL8.clone().into(),
|
|
||||||
IDM_ACP_DOMAIN_ADMIN_DL8.clone().into(),
|
|
||||||
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone().into(),
|
|
||||||
];
|
|
||||||
|
|
||||||
idm_data
|
|
||||||
.into_iter()
|
|
||||||
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "migrate_domain_7_to_8 -> Error");
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Migration domain level 8 to 9 (1.5.0)
|
/// Migration domain level 8 to 9 (1.5.0)
|
||||||
#[instrument(level = "info", skip_all)]
|
#[instrument(level = "info", skip_all)]
|
||||||
pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> {
|
pub(crate) fn migrate_domain_8_to_9(&mut self) -> Result<(), OperationError> {
|
||||||
|
@ -764,6 +424,24 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
return Err(OperationError::MG0004DomainLevelInDevelopment);
|
return Err(OperationError::MG0004DomainLevelInDevelopment);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// =========== Apply changes ==============
|
||||||
|
|
||||||
|
// Now update schema
|
||||||
|
let idm_schema_changes = [
|
||||||
|
SCHEMA_ATTR_DENIED_NAME_DL10.clone().into(),
|
||||||
|
SCHEMA_CLASS_DOMAIN_INFO_DL10.clone().into(),
|
||||||
|
];
|
||||||
|
|
||||||
|
idm_schema_changes
|
||||||
|
.into_iter()
|
||||||
|
.try_for_each(|entry| self.internal_migrate_or_create(entry))
|
||||||
|
.map_err(|err| {
|
||||||
|
error!(?err, "migrate_domain_9_to_10 -> Error");
|
||||||
|
err
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.reload()?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -828,7 +506,7 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
//
|
//
|
||||||
// DO NOT MODIFY THIS DEFINITION
|
// DO NOT MODIFY THIS DEFINITION
|
||||||
let idm_schema: Vec<EntryInitNew> = vec![
|
let idm_schema: Vec<EntryInitNew> = vec![
|
||||||
SCHEMA_ATTR_MAIL.clone().into(),
|
// SCHEMA_ATTR_MAIL.clone().into(),
|
||||||
SCHEMA_ATTR_ACCOUNT_EXPIRE.clone().into(),
|
SCHEMA_ATTR_ACCOUNT_EXPIRE.clone().into(),
|
||||||
SCHEMA_ATTR_ACCOUNT_VALID_FROM.clone().into(),
|
SCHEMA_ATTR_ACCOUNT_VALID_FROM.clone().into(),
|
||||||
SCHEMA_ATTR_API_TOKEN_SESSION.clone().into(),
|
SCHEMA_ATTR_API_TOKEN_SESSION.clone().into(),
|
||||||
|
@ -838,7 +516,7 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
SCHEMA_ATTR_BADLIST_PASSWORD.clone().into(),
|
SCHEMA_ATTR_BADLIST_PASSWORD.clone().into(),
|
||||||
SCHEMA_ATTR_CREDENTIAL_UPDATE_INTENT_TOKEN.clone().into(),
|
SCHEMA_ATTR_CREDENTIAL_UPDATE_INTENT_TOKEN.clone().into(),
|
||||||
SCHEMA_ATTR_ATTESTED_PASSKEYS.clone().into(),
|
SCHEMA_ATTR_ATTESTED_PASSKEYS.clone().into(),
|
||||||
SCHEMA_ATTR_DISPLAYNAME.clone().into(),
|
// SCHEMA_ATTR_DISPLAYNAME.clone().into(),
|
||||||
SCHEMA_ATTR_DOMAIN_DISPLAY_NAME.clone().into(),
|
SCHEMA_ATTR_DOMAIN_DISPLAY_NAME.clone().into(),
|
||||||
SCHEMA_ATTR_DOMAIN_LDAP_BASEDN.clone().into(),
|
SCHEMA_ATTR_DOMAIN_LDAP_BASEDN.clone().into(),
|
||||||
SCHEMA_ATTR_DOMAIN_NAME.clone().into(),
|
SCHEMA_ATTR_DOMAIN_NAME.clone().into(),
|
||||||
|
@ -853,7 +531,7 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
SCHEMA_ATTR_GIDNUMBER.clone().into(),
|
SCHEMA_ATTR_GIDNUMBER.clone().into(),
|
||||||
SCHEMA_ATTR_GRANT_UI_HINT.clone().into(),
|
SCHEMA_ATTR_GRANT_UI_HINT.clone().into(),
|
||||||
SCHEMA_ATTR_JWS_ES256_PRIVATE_KEY.clone().into(),
|
SCHEMA_ATTR_JWS_ES256_PRIVATE_KEY.clone().into(),
|
||||||
SCHEMA_ATTR_LEGALNAME.clone().into(),
|
// SCHEMA_ATTR_LEGALNAME.clone().into(),
|
||||||
SCHEMA_ATTR_LOGINSHELL.clone().into(),
|
SCHEMA_ATTR_LOGINSHELL.clone().into(),
|
||||||
SCHEMA_ATTR_NAME_HISTORY.clone().into(),
|
SCHEMA_ATTR_NAME_HISTORY.clone().into(),
|
||||||
SCHEMA_ATTR_NSUNIQUEID.clone().into(),
|
SCHEMA_ATTR_NSUNIQUEID.clone().into(),
|
||||||
|
@ -867,7 +545,7 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
SCHEMA_ATTR_OAUTH2_RS_IMPLICIT_SCOPES.clone().into(),
|
SCHEMA_ATTR_OAUTH2_RS_IMPLICIT_SCOPES.clone().into(),
|
||||||
SCHEMA_ATTR_OAUTH2_RS_NAME.clone().into(),
|
SCHEMA_ATTR_OAUTH2_RS_NAME.clone().into(),
|
||||||
SCHEMA_ATTR_OAUTH2_RS_ORIGIN_LANDING.clone().into(),
|
SCHEMA_ATTR_OAUTH2_RS_ORIGIN_LANDING.clone().into(),
|
||||||
SCHEMA_ATTR_OAUTH2_RS_ORIGIN.clone().into(),
|
// SCHEMA_ATTR_OAUTH2_RS_ORIGIN.clone().into(),
|
||||||
SCHEMA_ATTR_OAUTH2_RS_SCOPE_MAP.clone().into(),
|
SCHEMA_ATTR_OAUTH2_RS_SCOPE_MAP.clone().into(),
|
||||||
SCHEMA_ATTR_OAUTH2_RS_SUP_SCOPE_MAP.clone().into(),
|
SCHEMA_ATTR_OAUTH2_RS_SUP_SCOPE_MAP.clone().into(),
|
||||||
SCHEMA_ATTR_OAUTH2_RS_TOKEN_KEY.clone().into(),
|
SCHEMA_ATTR_OAUTH2_RS_TOKEN_KEY.clone().into(),
|
||||||
|
@ -902,6 +580,17 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
// DL7
|
// DL7
|
||||||
SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(),
|
SCHEMA_ATTR_PATCH_LEVEL_DL7.clone().into(),
|
||||||
SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(),
|
SCHEMA_ATTR_DOMAIN_DEVELOPMENT_TAINT_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_REFERS_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_CERTIFICATE_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_OAUTH2_RS_ORIGIN_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_OAUTH2_STRICT_REDIRECT_URI_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_MAIL_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_LEGALNAME_DL7.clone().into(),
|
||||||
|
SCHEMA_ATTR_DISPLAYNAME_DL7.clone().into(),
|
||||||
|
// DL8
|
||||||
|
SCHEMA_ATTR_LINKED_GROUP_DL8.clone().into(),
|
||||||
|
SCHEMA_ATTR_APPLICATION_PASSWORD_DL8.clone().into(),
|
||||||
|
SCHEMA_ATTR_ALLOW_PRIMARY_CRED_FALLBACK_DL8.clone().into(),
|
||||||
];
|
];
|
||||||
|
|
||||||
let r = idm_schema
|
let r = idm_schema
|
||||||
|
@ -928,14 +617,14 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
// DL4
|
// DL4
|
||||||
SCHEMA_CLASS_OAUTH2_RS_PUBLIC_DL4.clone().into(),
|
SCHEMA_CLASS_OAUTH2_RS_PUBLIC_DL4.clone().into(),
|
||||||
// DL5
|
// DL5
|
||||||
SCHEMA_CLASS_PERSON_DL5.clone().into(),
|
// SCHEMA_CLASS_PERSON_DL5.clone().into(),
|
||||||
SCHEMA_CLASS_ACCOUNT_DL5.clone().into(),
|
SCHEMA_CLASS_ACCOUNT_DL5.clone().into(),
|
||||||
SCHEMA_CLASS_OAUTH2_RS_DL5.clone().into(),
|
// SCHEMA_CLASS_OAUTH2_RS_DL5.clone().into(),
|
||||||
SCHEMA_CLASS_OAUTH2_RS_BASIC_DL5.clone().into(),
|
SCHEMA_CLASS_OAUTH2_RS_BASIC_DL5.clone().into(),
|
||||||
// DL6
|
// DL6
|
||||||
SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(),
|
// SCHEMA_CLASS_ACCOUNT_POLICY_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(),
|
// SCHEMA_CLASS_SERVICE_ACCOUNT_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(),
|
// SCHEMA_CLASS_SYNC_ACCOUNT_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_GROUP_DL6.clone().into(),
|
SCHEMA_CLASS_GROUP_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_KEY_PROVIDER_DL6.clone().into(),
|
SCHEMA_CLASS_KEY_PROVIDER_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_KEY_PROVIDER_INTERNAL_DL6.clone().into(),
|
SCHEMA_CLASS_KEY_PROVIDER_INTERNAL_DL6.clone().into(),
|
||||||
|
@ -943,7 +632,18 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
SCHEMA_CLASS_KEY_OBJECT_JWT_ES256_DL6.clone().into(),
|
SCHEMA_CLASS_KEY_OBJECT_JWT_ES256_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_KEY_OBJECT_JWE_A128GCM_DL6.clone().into(),
|
SCHEMA_CLASS_KEY_OBJECT_JWE_A128GCM_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_KEY_OBJECT_INTERNAL_DL6.clone().into(),
|
SCHEMA_CLASS_KEY_OBJECT_INTERNAL_DL6.clone().into(),
|
||||||
SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(),
|
// SCHEMA_CLASS_DOMAIN_INFO_DL6.clone().into(),
|
||||||
|
// DL7
|
||||||
|
// SCHEMA_CLASS_DOMAIN_INFO_DL7.clone().into(),
|
||||||
|
SCHEMA_CLASS_SERVICE_ACCOUNT_DL7.clone().into(),
|
||||||
|
SCHEMA_CLASS_SYNC_ACCOUNT_DL7.clone().into(),
|
||||||
|
SCHEMA_CLASS_CLIENT_CERTIFICATE_DL7.clone().into(),
|
||||||
|
SCHEMA_CLASS_OAUTH2_RS_DL7.clone().into(),
|
||||||
|
// DL8
|
||||||
|
SCHEMA_CLASS_ACCOUNT_POLICY_DL8.clone().into(),
|
||||||
|
SCHEMA_CLASS_APPLICATION_DL8.clone().into(),
|
||||||
|
SCHEMA_CLASS_PERSON_DL8.clone().into(),
|
||||||
|
SCHEMA_CLASS_DOMAIN_INFO_DL8.clone().into(),
|
||||||
];
|
];
|
||||||
|
|
||||||
let r: Result<(), _> = idm_schema_classes_dl1
|
let r: Result<(), _> = idm_schema_classes_dl1
|
||||||
|
@ -1034,10 +734,10 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
IDM_ACP_RADIUS_SERVERS_V1.clone(),
|
IDM_ACP_RADIUS_SERVERS_V1.clone(),
|
||||||
IDM_ACP_RADIUS_SECRET_MANAGE_V1.clone(),
|
IDM_ACP_RADIUS_SECRET_MANAGE_V1.clone(),
|
||||||
IDM_ACP_PEOPLE_SELF_WRITE_MAIL_V1.clone(),
|
IDM_ACP_PEOPLE_SELF_WRITE_MAIL_V1.clone(),
|
||||||
IDM_ACP_SELF_READ_V1.clone(),
|
// IDM_ACP_SELF_READ_V1.clone(),
|
||||||
IDM_ACP_SELF_WRITE_V1.clone(),
|
// IDM_ACP_SELF_WRITE_V1.clone(),
|
||||||
IDM_ACP_ACCOUNT_SELF_WRITE_V1.clone(),
|
IDM_ACP_ACCOUNT_SELF_WRITE_V1.clone(),
|
||||||
IDM_ACP_SELF_NAME_WRITE_V1.clone(),
|
// IDM_ACP_SELF_NAME_WRITE_V1.clone(),
|
||||||
IDM_ACP_ALL_ACCOUNTS_POSIX_READ_V1.clone(),
|
IDM_ACP_ALL_ACCOUNTS_POSIX_READ_V1.clone(),
|
||||||
IDM_ACP_SYSTEM_CONFIG_ACCOUNT_POLICY_MANAGE_V1.clone(),
|
IDM_ACP_SYSTEM_CONFIG_ACCOUNT_POLICY_MANAGE_V1.clone(),
|
||||||
IDM_ACP_GROUP_UNIX_MANAGE_V1.clone(),
|
IDM_ACP_GROUP_UNIX_MANAGE_V1.clone(),
|
||||||
|
@ -1059,13 +759,26 @@ impl QueryServerWriteTransaction<'_> {
|
||||||
IDM_ACP_SERVICE_ACCOUNT_MANAGE_V1.clone(),
|
IDM_ACP_SERVICE_ACCOUNT_MANAGE_V1.clone(),
|
||||||
// DL4
|
// DL4
|
||||||
// DL5
|
// DL5
|
||||||
IDM_ACP_OAUTH2_MANAGE_DL5.clone(),
|
// IDM_ACP_OAUTH2_MANAGE_DL5.clone(),
|
||||||
// DL6
|
// DL6
|
||||||
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone(),
|
// IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL6.clone(),
|
||||||
IDM_ACP_PEOPLE_CREATE_DL6.clone(),
|
IDM_ACP_PEOPLE_CREATE_DL6.clone(),
|
||||||
IDM_ACP_GROUP_MANAGE_DL6.clone(),
|
IDM_ACP_GROUP_MANAGE_DL6.clone(),
|
||||||
IDM_ACP_ACCOUNT_MAIL_READ_DL6.clone(),
|
IDM_ACP_ACCOUNT_MAIL_READ_DL6.clone(),
|
||||||
IDM_ACP_DOMAIN_ADMIN_DL6.clone(),
|
// IDM_ACP_DOMAIN_ADMIN_DL6.clone(),
|
||||||
|
// DL7
|
||||||
|
// IDM_ACP_SELF_WRITE_DL7.clone(),
|
||||||
|
IDM_ACP_SELF_NAME_WRITE_DL7.clone(),
|
||||||
|
IDM_ACP_HP_CLIENT_CERTIFICATE_MANAGER_DL7.clone(),
|
||||||
|
IDM_ACP_OAUTH2_MANAGE_DL7.clone(),
|
||||||
|
// DL8
|
||||||
|
IDM_ACP_SELF_READ_DL8.clone(),
|
||||||
|
IDM_ACP_SELF_WRITE_DL8.clone(),
|
||||||
|
IDM_ACP_APPLICATION_MANAGE_DL8.clone(),
|
||||||
|
IDM_ACP_APPLICATION_ENTRY_MANAGER_DL8.clone(),
|
||||||
|
IDM_ACP_MAIL_SERVERS_DL8.clone(),
|
||||||
|
IDM_ACP_DOMAIN_ADMIN_DL8.clone(),
|
||||||
|
IDM_ACP_GROUP_ACCOUNT_POLICY_MANAGE_DL8.clone(),
|
||||||
];
|
];
|
||||||
|
|
||||||
let res: Result<(), _> = idm_entries
|
let res: Result<(), _> = idm_entries
|
||||||
|
@ -1095,19 +808,6 @@ impl QueryServerReadTransaction<'_> {
|
||||||
|
|
||||||
let mut report_items = Vec::with_capacity(1);
|
let mut report_items = Vec::with_capacity(1);
|
||||||
|
|
||||||
if current_level <= DOMAIN_LEVEL_6 && upgrade_level >= DOMAIN_LEVEL_7 {
|
|
||||||
let item = self
|
|
||||||
.domain_upgrade_check_6_to_7_gidnumber()
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(
|
|
||||||
?err,
|
|
||||||
"Failed to perform domain upgrade check 6 to 7 - gidnumber"
|
|
||||||
);
|
|
||||||
err
|
|
||||||
})?;
|
|
||||||
report_items.push(item);
|
|
||||||
}
|
|
||||||
|
|
||||||
if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
|
if current_level <= DOMAIN_LEVEL_7 && upgrade_level >= DOMAIN_LEVEL_8 {
|
||||||
let item = self
|
let item = self
|
||||||
.domain_upgrade_check_7_to_8_security_keys()
|
.domain_upgrade_check_7_to_8_security_keys()
|
||||||
|
@ -1141,94 +841,6 @@ impl QueryServerReadTransaction<'_> {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn domain_upgrade_check_6_to_7_gidnumber(
|
|
||||||
&mut self,
|
|
||||||
) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
|
|
||||||
let filter = filter!(f_and!([
|
|
||||||
f_or!([
|
|
||||||
f_eq(Attribute::Class, EntryClass::PosixAccount.into()),
|
|
||||||
f_eq(Attribute::Class, EntryClass::PosixGroup.into())
|
|
||||||
]),
|
|
||||||
// This logic gets a bit messy but it would be:
|
|
||||||
// If ! (
|
|
||||||
// (GID_REGULAR_USER_MIN < value < GID_REGULAR_USER_MAX) ||
|
|
||||||
// (GID_UNUSED_A_MIN < value < GID_UNUSED_A_MAX) ||
|
|
||||||
// (GID_UNUSED_B_MIN < value < GID_UNUSED_B_MAX) ||
|
|
||||||
// (GID_UNUSED_C_MIN < value < GID_UNUSED_D_MAX)
|
|
||||||
// )
|
|
||||||
f_andnot(f_or!([
|
|
||||||
f_and!([
|
|
||||||
// The gid value must be less than GID_REGULAR_USER_MAX
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MAX)
|
|
||||||
),
|
|
||||||
// This bit of mental gymnastics is "greater than".
|
|
||||||
// The gid value must not be less than USER_MIN
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_REGULAR_USER_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
f_and!([
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MAX)
|
|
||||||
),
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_A_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
f_and!([
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MAX)
|
|
||||||
),
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_B_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
// If both of these conditions are true we get:
|
|
||||||
// C_MIN < value < D_MAX, which the outer and-not inverts.
|
|
||||||
f_and!([
|
|
||||||
// The gid value must be less than GID_UNUSED_D_MAX
|
|
||||||
f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_D_MAX)
|
|
||||||
),
|
|
||||||
// This bit of mental gymnastics is "greater than".
|
|
||||||
// The gid value must not be less than C_MIN
|
|
||||||
f_andnot(f_lt(
|
|
||||||
Attribute::GidNumber,
|
|
||||||
PartialValue::Uint32(crate::plugins::gidnumber::GID_UNUSED_C_MIN)
|
|
||||||
))
|
|
||||||
]),
|
|
||||||
]))
|
|
||||||
]));
|
|
||||||
|
|
||||||
let results = self.internal_search(filter)?;
|
|
||||||
|
|
||||||
let affected_entries = results
|
|
||||||
.into_iter()
|
|
||||||
.map(|entry| entry.get_display_id())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let status = if affected_entries.is_empty() {
|
|
||||||
ProtoDomainUpgradeCheckStatus::Pass6To7Gidnumber
|
|
||||||
} else {
|
|
||||||
ProtoDomainUpgradeCheckStatus::Fail6To7Gidnumber
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(ProtoDomainUpgradeCheckItem {
|
|
||||||
status,
|
|
||||||
from_level: DOMAIN_LEVEL_6,
|
|
||||||
to_level: DOMAIN_LEVEL_7,
|
|
||||||
affected_entries,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
|
pub(crate) fn domain_upgrade_check_7_to_8_security_keys(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
|
) -> Result<ProtoDomainUpgradeCheckItem, OperationError> {
|
||||||
|
@ -1300,7 +912,7 @@ impl QueryServerReadTransaction<'_> {
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
|
// use super::{ProtoDomainUpgradeCheckItem, ProtoDomainUpgradeCheckStatus};
|
||||||
use crate::prelude::*;
|
use crate::prelude::*;
|
||||||
|
|
||||||
#[qs_test]
|
#[qs_test]
|
||||||
|
@ -1329,9 +941,8 @@ mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_6)]
|
#[qs_test(domain_level=DOMAIN_LEVEL_8)]
|
||||||
async fn test_migrations_dl6_dl7(server: &QueryServer) {
|
async fn test_migrations_dl8_dl9(server: &QueryServer) {
|
||||||
// Assert our instance was setup to version 6
|
|
||||||
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
let db_domain_version = write_txn
|
let db_domain_version = write_txn
|
||||||
|
@ -1340,167 +951,95 @@ mod tests {
|
||||||
.get_ava_single_uint32(Attribute::Version)
|
.get_ava_single_uint32(Attribute::Version)
|
||||||
.expect("Attribute Version not present");
|
.expect("Attribute Version not present");
|
||||||
|
|
||||||
assert_eq!(db_domain_version, DOMAIN_LEVEL_6);
|
assert_eq!(db_domain_version, DOMAIN_LEVEL_8);
|
||||||
|
|
||||||
// Create an oauth2 client that doesn't have a landing url set.
|
|
||||||
let oauth2_client_uuid = Uuid::new_v4();
|
|
||||||
|
|
||||||
let ea: Entry<EntryInit, EntryNew> = entry_init!(
|
|
||||||
(Attribute::Class, EntryClass::Object.to_value()),
|
|
||||||
(Attribute::Class, EntryClass::Account.to_value()),
|
|
||||||
(Attribute::Uuid, Value::Uuid(oauth2_client_uuid)),
|
|
||||||
(
|
|
||||||
Attribute::Class,
|
|
||||||
EntryClass::OAuth2ResourceServer.to_value()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Attribute::Class,
|
|
||||||
EntryClass::OAuth2ResourceServerPublic.to_value()
|
|
||||||
),
|
|
||||||
(Attribute::Name, Value::new_iname("test_resource_server")),
|
|
||||||
(
|
|
||||||
Attribute::DisplayName,
|
|
||||||
Value::new_utf8s("test_resource_server")
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Attribute::OAuth2RsOrigin,
|
|
||||||
Value::new_url_s("https://demo.example.com").unwrap()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
write_txn
|
|
||||||
.internal_create(vec![ea])
|
|
||||||
.expect("Unable to create oauth2 client");
|
|
||||||
|
|
||||||
// Set the version to 7.
|
|
||||||
write_txn
|
|
||||||
.internal_apply_domain_migration(DOMAIN_LEVEL_7)
|
|
||||||
.expect("Unable to set domain level to version 7");
|
|
||||||
|
|
||||||
// post migration verification.
|
|
||||||
let domain_entry = write_txn
|
|
||||||
.internal_search_uuid(UUID_DOMAIN_INFO)
|
|
||||||
.expect("Unable to access domain entry");
|
|
||||||
|
|
||||||
assert!(!domain_entry.attribute_pres(Attribute::PrivateCookieKey));
|
|
||||||
|
|
||||||
let oauth2_entry = write_txn
|
|
||||||
.internal_search_uuid(oauth2_client_uuid)
|
|
||||||
.expect("Unable to access oauth2 client entry");
|
|
||||||
|
|
||||||
let origin = oauth2_entry
|
|
||||||
.get_ava_single_url(Attribute::OAuth2RsOrigin)
|
|
||||||
.expect("Unable to access oauth2 client origin");
|
|
||||||
|
|
||||||
// The origin should have been cloned to the landing.
|
|
||||||
let landing = oauth2_entry
|
|
||||||
.get_ava_single_url(Attribute::OAuth2RsOriginLanding)
|
|
||||||
.expect("Unable to access oauth2 client landing");
|
|
||||||
|
|
||||||
assert_eq!(origin, landing);
|
|
||||||
|
|
||||||
write_txn.commit().expect("Unable to commit");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_7)]
|
|
||||||
async fn test_migrations_dl7_dl8(server: &QueryServer) {
|
|
||||||
// Assert our instance was setup to version 7
|
|
||||||
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
|
||||||
|
|
||||||
let db_domain_version = write_txn
|
|
||||||
.internal_search_uuid(UUID_DOMAIN_INFO)
|
|
||||||
.expect("unable to access domain entry")
|
|
||||||
.get_ava_single_uint32(Attribute::Version)
|
|
||||||
.expect("Attribute Version not present");
|
|
||||||
|
|
||||||
assert_eq!(db_domain_version, DOMAIN_LEVEL_7);
|
|
||||||
|
|
||||||
// Create an oauth2 client that doesn't have a landing url set.
|
|
||||||
let oauth2_client_uuid = Uuid::new_v4();
|
|
||||||
|
|
||||||
let ea: Entry<EntryInit, EntryNew> = entry_init!(
|
|
||||||
(Attribute::Class, EntryClass::Object.to_value()),
|
|
||||||
(Attribute::Class, EntryClass::Account.to_value()),
|
|
||||||
(Attribute::Uuid, Value::Uuid(oauth2_client_uuid)),
|
|
||||||
(
|
|
||||||
Attribute::Class,
|
|
||||||
EntryClass::OAuth2ResourceServer.to_value()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Attribute::Class,
|
|
||||||
EntryClass::OAuth2ResourceServerPublic.to_value()
|
|
||||||
),
|
|
||||||
(Attribute::Name, Value::new_iname("test_resource_server")),
|
|
||||||
(
|
|
||||||
Attribute::DisplayName,
|
|
||||||
Value::new_utf8s("test_resource_server")
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Attribute::OAuth2RsOriginLanding,
|
|
||||||
Value::new_url_s("https://demo.example.com/oauth2").unwrap()
|
|
||||||
),
|
|
||||||
(
|
|
||||||
Attribute::OAuth2RsOrigin,
|
|
||||||
Value::new_url_s("https://demo.example.com").unwrap()
|
|
||||||
)
|
|
||||||
);
|
|
||||||
|
|
||||||
write_txn
|
|
||||||
.internal_create(vec![ea])
|
|
||||||
.expect("Unable to create oauth2 client");
|
|
||||||
|
|
||||||
write_txn.commit().expect("Unable to commit");
|
write_txn.commit().expect("Unable to commit");
|
||||||
|
|
||||||
// pre migration verification.
|
// == pre migration verification. ==
|
||||||
// check we currently would fail a migration.
|
// check we currently would fail a migration.
|
||||||
|
|
||||||
let mut read_txn = server.read().await.unwrap();
|
// let mut read_txn = server.read().await.unwrap();
|
||||||
|
// drop(read_txn);
|
||||||
match read_txn.domain_upgrade_check_7_to_8_oauth2_strict_redirect_uri() {
|
|
||||||
Ok(ProtoDomainUpgradeCheckItem {
|
|
||||||
status: ProtoDomainUpgradeCheckStatus::Fail7To8Oauth2StrictRedirectUri,
|
|
||||||
..
|
|
||||||
}) => {
|
|
||||||
trace!("Failed as expected, very good.");
|
|
||||||
}
|
|
||||||
other => {
|
|
||||||
error!(?other);
|
|
||||||
unreachable!();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
drop(read_txn);
|
|
||||||
|
|
||||||
// Okay, fix the problem.
|
|
||||||
|
|
||||||
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
write_txn
|
// Fix any issues
|
||||||
.internal_modify_uuid(
|
|
||||||
oauth2_client_uuid,
|
|
||||||
&ModifyList::new_purge_and_set(
|
|
||||||
Attribute::OAuth2StrictRedirectUri,
|
|
||||||
Value::Bool(true),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.expect("Unable to enforce strict mode.");
|
|
||||||
|
|
||||||
// Set the version to 8.
|
// == Increase the version ==
|
||||||
write_txn
|
write_txn
|
||||||
.internal_apply_domain_migration(DOMAIN_LEVEL_8)
|
.internal_apply_domain_migration(DOMAIN_LEVEL_9)
|
||||||
.expect("Unable to set domain level to version 8");
|
.expect("Unable to set domain level to version 9");
|
||||||
|
|
||||||
// post migration verification.
|
// post migration verification.
|
||||||
|
|
||||||
write_txn.commit().expect("Unable to commit");
|
write_txn.commit().expect("Unable to commit");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_8)]
|
|
||||||
async fn test_migrations_dl8_dl9(_server: &QueryServer) {}
|
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_9)]
|
#[qs_test(domain_level=DOMAIN_LEVEL_9)]
|
||||||
async fn test_migrations_dl9_dl10(_server: &QueryServer) {}
|
async fn test_migrations_dl9_dl10(server: &QueryServer) {
|
||||||
|
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
let db_domain_version = write_txn
|
||||||
|
.internal_search_uuid(UUID_DOMAIN_INFO)
|
||||||
|
.expect("unable to access domain entry")
|
||||||
|
.get_ava_single_uint32(Attribute::Version)
|
||||||
|
.expect("Attribute Version not present");
|
||||||
|
|
||||||
|
assert_eq!(db_domain_version, DOMAIN_LEVEL_9);
|
||||||
|
|
||||||
|
write_txn.commit().expect("Unable to commit");
|
||||||
|
|
||||||
|
// == pre migration verification. ==
|
||||||
|
// check we currently would fail a migration.
|
||||||
|
|
||||||
|
// let mut read_txn = server.read().await.unwrap();
|
||||||
|
// drop(read_txn);
|
||||||
|
|
||||||
|
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
// Fix any issues
|
||||||
|
|
||||||
|
// == Increase the version ==
|
||||||
|
write_txn
|
||||||
|
.internal_apply_domain_migration(DOMAIN_LEVEL_10)
|
||||||
|
.expect("Unable to set domain level to version 10");
|
||||||
|
|
||||||
|
// post migration verification.
|
||||||
|
|
||||||
|
write_txn.commit().expect("Unable to commit");
|
||||||
|
}
|
||||||
|
|
||||||
#[qs_test(domain_level=DOMAIN_LEVEL_10)]
|
#[qs_test(domain_level=DOMAIN_LEVEL_10)]
|
||||||
async fn test_migrations_dl10_dl11(_server: &QueryServer) {}
|
async fn test_migrations_dl10_dl11(server: &QueryServer) {
|
||||||
|
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
let db_domain_version = write_txn
|
||||||
|
.internal_search_uuid(UUID_DOMAIN_INFO)
|
||||||
|
.expect("unable to access domain entry")
|
||||||
|
.get_ava_single_uint32(Attribute::Version)
|
||||||
|
.expect("Attribute Version not present");
|
||||||
|
|
||||||
|
assert_eq!(db_domain_version, DOMAIN_LEVEL_10);
|
||||||
|
|
||||||
|
write_txn.commit().expect("Unable to commit");
|
||||||
|
|
||||||
|
// == pre migration verification. ==
|
||||||
|
// check we currently would fail a migration.
|
||||||
|
|
||||||
|
// let mut read_txn = server.read().await.unwrap();
|
||||||
|
// drop(read_txn);
|
||||||
|
|
||||||
|
let mut write_txn = server.write(duration_from_epoch_now()).await.unwrap();
|
||||||
|
|
||||||
|
// Fix any issues
|
||||||
|
|
||||||
|
// == Increase the version ==
|
||||||
|
write_txn
|
||||||
|
.internal_apply_domain_migration(DOMAIN_LEVEL_11)
|
||||||
|
.expect("Unable to set domain level to version 11");
|
||||||
|
|
||||||
|
// post migration verification.
|
||||||
|
|
||||||
|
write_txn.commit().expect("Unable to commit");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1238,13 +1238,6 @@ pub trait QueryServerTransaction<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_domain_key_object_handle(&self) -> Result<Arc<KeyObject>, OperationError> {
|
fn get_domain_key_object_handle(&self) -> Result<Arc<KeyObject>, OperationError> {
|
||||||
#[cfg(test)]
|
|
||||||
if self.get_domain_version() < DOMAIN_LEVEL_6 {
|
|
||||||
// We must be in tests, and this is a DL5 to 6 test. For this we'll just make
|
|
||||||
// an ephemeral provider.
|
|
||||||
return Ok(crate::server::keys::KeyObjectInternal::new_test());
|
|
||||||
};
|
|
||||||
|
|
||||||
self.get_key_providers()
|
self.get_key_providers()
|
||||||
.get_key_object_handle(UUID_DOMAIN_INFO)
|
.get_key_object_handle(UUID_DOMAIN_INFO)
|
||||||
.ok_or(OperationError::KP0031KeyObjectNotFound)
|
.ok_or(OperationError::KP0031KeyObjectNotFound)
|
||||||
|
@ -2335,7 +2328,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
debug!(domain_previous_patch_level = ?previous_patch_level, domain_target_patch_level = ?domain_info_patch_level);
|
debug!(domain_previous_patch_level = ?previous_patch_level, domain_target_patch_level = ?domain_info_patch_level);
|
||||||
|
|
||||||
// We have to check for DL0 since that's the initialisation level.
|
// We have to check for DL0 since that's the initialisation level.
|
||||||
if previous_version <= DOMAIN_LEVEL_5 && previous_version != DOMAIN_LEVEL_0 {
|
if previous_version < DOMAIN_MIN_REMIGRATION_LEVEL && previous_version != DOMAIN_LEVEL_0 {
|
||||||
error!("UNABLE TO PROCEED. You are attempting a Skip update which is NOT SUPPORTED. You must upgrade one-version of Kanidm at a time.");
|
error!("UNABLE TO PROCEED. You are attempting a Skip update which is NOT SUPPORTED. You must upgrade one-version of Kanidm at a time.");
|
||||||
error!("For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html");
|
error!("For more see: https://kanidm.github.io/kanidm/stable/support.html#upgrade-policy and https://kanidm.github.io/kanidm/stable/server_updates.html");
|
||||||
error!(domain_previous_version = ?previous_version, domain_target_version = ?domain_info_version);
|
error!(domain_previous_version = ?previous_version, domain_target_version = ?domain_info_version);
|
||||||
|
@ -2343,21 +2336,8 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
return Err(OperationError::MG0008SkipUpgradeAttempted);
|
return Err(OperationError::MG0008SkipUpgradeAttempted);
|
||||||
}
|
}
|
||||||
|
|
||||||
if previous_version <= DOMAIN_LEVEL_6 && domain_info_version >= DOMAIN_LEVEL_7 {
|
|
||||||
self.migrate_domain_6_to_7()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Similar to the older system info migration handler, these allow "one shot" fixes
|
|
||||||
// to be issued and run by bumping the patch level.
|
|
||||||
if previous_patch_level < PATCH_LEVEL_1 && domain_info_patch_level >= PATCH_LEVEL_1 {
|
|
||||||
self.migrate_domain_patch_level_1()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if previous_version <= DOMAIN_LEVEL_7 && domain_info_version >= DOMAIN_LEVEL_8 {
|
|
||||||
self.migrate_domain_7_to_8()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if previous_version <= DOMAIN_LEVEL_8 && domain_info_version >= DOMAIN_LEVEL_9 {
|
if previous_version <= DOMAIN_LEVEL_8 && domain_info_version >= DOMAIN_LEVEL_9 {
|
||||||
|
// 1.4 -> 1.5
|
||||||
self.migrate_domain_8_to_9()?;
|
self.migrate_domain_8_to_9()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2366,10 +2346,12 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
if previous_version <= DOMAIN_LEVEL_9 && domain_info_version >= DOMAIN_LEVEL_10 {
|
if previous_version <= DOMAIN_LEVEL_9 && domain_info_version >= DOMAIN_LEVEL_10 {
|
||||||
|
// 1.5 -> 1.6
|
||||||
self.migrate_domain_9_to_10()?;
|
self.migrate_domain_9_to_10()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if previous_version <= DOMAIN_LEVEL_10 && domain_info_version >= DOMAIN_LEVEL_11 {
|
if previous_version <= DOMAIN_LEVEL_10 && domain_info_version >= DOMAIN_LEVEL_11 {
|
||||||
|
// 1.6 -> 1.7
|
||||||
self.migrate_domain_10_to_11()?;
|
self.migrate_domain_10_to_11()?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2394,7 +2376,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
|
||||||
let display_name = domain_entry
|
let display_name = domain_entry
|
||||||
.get_ava_single_utf8(Attribute::DomainDisplayName)
|
.get_ava_single_utf8(Attribute::DomainDisplayName)
|
||||||
.map(str::to_string)
|
.map(str::to_string)
|
||||||
.ok_or(OperationError::InvalidEntryState)?;
|
.unwrap_or_else(|| format!("Kanidm {}", domain_name));
|
||||||
|
|
||||||
let domain_ldap_allow_unix_pw_bind = domain_entry
|
let domain_ldap_allow_unix_pw_bind = domain_entry
|
||||||
.get_ava_single_bool(Attribute::LdapAllowUnixPwBind)
|
.get_ava_single_bool(Attribute::LdapAllowUnixPwBind)
|
||||||
|
|
|
@ -121,6 +121,7 @@ pub enum ClientRequest {
|
||||||
NssGroups,
|
NssGroups,
|
||||||
NssGroupByGid(u32),
|
NssGroupByGid(u32),
|
||||||
NssGroupByName(String),
|
NssGroupByName(String),
|
||||||
|
NssGroupsByMember(String),
|
||||||
PamAuthenticateInit {
|
PamAuthenticateInit {
|
||||||
account_id: String,
|
account_id: String,
|
||||||
info: PamServiceInfo,
|
info: PamServiceInfo,
|
||||||
|
@ -144,6 +145,7 @@ impl ClientRequest {
|
||||||
ClientRequest::NssGroups => "NssGroups".to_string(),
|
ClientRequest::NssGroups => "NssGroups".to_string(),
|
||||||
ClientRequest::NssGroupByGid(id) => format!("NssGroupByGid({})", id),
|
ClientRequest::NssGroupByGid(id) => format!("NssGroupByGid({})", id),
|
||||||
ClientRequest::NssGroupByName(id) => format!("NssGroupByName({})", id),
|
ClientRequest::NssGroupByName(id) => format!("NssGroupByName({})", id),
|
||||||
|
ClientRequest::NssGroupsByMember(id) => format!("NssGroupsByMember({})", id),
|
||||||
ClientRequest::PamAuthenticateInit { account_id, info } => format!(
|
ClientRequest::PamAuthenticateInit { account_id, info } => format!(
|
||||||
"PamAuthenticateInit{{ account_id={} tty={} pam_secvice{} rhost={} }}",
|
"PamAuthenticateInit{{ account_id={} tty={} pam_secvice{} rhost={} }}",
|
||||||
account_id,
|
account_id,
|
||||||
|
|
|
@ -285,6 +285,42 @@ pub fn get_group_entry_by_name(name: String, req_options: RequestOptions) -> Res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_group_entries_by_member(member: String, req_options: RequestOptions) -> Response<Vec<Group>> {
|
||||||
|
match req_options.connect_to_daemon() {
|
||||||
|
Source::Daemon(mut daemon_client) => {
|
||||||
|
let req = ClientRequest::NssGroupsByMember(member);
|
||||||
|
daemon_client
|
||||||
|
.call_and_wait(&req, None)
|
||||||
|
.map(|r| match r {
|
||||||
|
ClientResponse::NssGroups(l) => {
|
||||||
|
l.into_iter().map(group_from_nssgroup).collect()
|
||||||
|
}
|
||||||
|
_ => Vec::new(),
|
||||||
|
})
|
||||||
|
.map(Response::Success)
|
||||||
|
.unwrap_or_else(|_| Response::Success(vec![]))
|
||||||
|
}
|
||||||
|
Source::Fallback { users: _, groups } => {
|
||||||
|
if groups.is_empty() {
|
||||||
|
return Response::Unavail;
|
||||||
|
}
|
||||||
|
|
||||||
|
let membergroups = groups
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|etcgroup| {
|
||||||
|
if etcgroup.members.contains(&member) {
|
||||||
|
Some(group_from_etcgroup(etcgroup))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Response::Success(membergroups)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn passwd_from_etcuser(etc: EtcUser) -> Passwd {
|
fn passwd_from_etcuser(etc: EtcUser) -> Passwd {
|
||||||
Passwd {
|
Passwd {
|
||||||
name: etc.name,
|
name: etc.name,
|
||||||
|
|
|
@ -3,6 +3,7 @@ use kanidm_unix_common::constants::DEFAULT_CONFIG_PATH;
|
||||||
use libnss::group::{Group, GroupHooks};
|
use libnss::group::{Group, GroupHooks};
|
||||||
use libnss::interop::Response;
|
use libnss::interop::Response;
|
||||||
use libnss::passwd::{Passwd, PasswdHooks};
|
use libnss::passwd::{Passwd, PasswdHooks};
|
||||||
|
use libnss::initgroups::{InitgroupsHooks};
|
||||||
|
|
||||||
struct KanidmPasswd;
|
struct KanidmPasswd;
|
||||||
libnss_passwd_hooks!(kanidm, KanidmPasswd);
|
libnss_passwd_hooks!(kanidm, KanidmPasswd);
|
||||||
|
@ -61,3 +62,16 @@ impl GroupHooks for KanidmGroup {
|
||||||
core::get_group_entry_by_name(name, req_opt)
|
core::get_group_entry_by_name(name, req_opt)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct KanidmInitgroups;
|
||||||
|
libnss_initgroups_hooks!(kanidm, KanidmInitgroups);
|
||||||
|
|
||||||
|
impl InitgroupsHooks for KanidmInitgroups {
|
||||||
|
fn get_entries_by_user(user: String) -> Response<Vec<Group>> {
|
||||||
|
let req_opt = RequestOptions::Main {
|
||||||
|
config_path: DEFAULT_CONFIG_PATH,
|
||||||
|
};
|
||||||
|
|
||||||
|
core::get_group_entries_by_member(user, req_opt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -283,6 +283,14 @@ async fn handle_client(
|
||||||
error!("unable to load group, returning empty.");
|
error!("unable to load group, returning empty.");
|
||||||
ClientResponse::NssGroup(None)
|
ClientResponse::NssGroup(None)
|
||||||
}),
|
}),
|
||||||
|
ClientRequest::NssGroupsByMember(account_id) => cachelayer
|
||||||
|
.get_nssgroups_member_name(account_id.as_str())
|
||||||
|
.await
|
||||||
|
.map(ClientResponse::NssGroups)
|
||||||
|
.unwrap_or_else(|_| {
|
||||||
|
error!("unable to enum groups");
|
||||||
|
ClientResponse::NssGroups(Vec::new())
|
||||||
|
}),
|
||||||
ClientRequest::PamAuthenticateInit { account_id, info } => {
|
ClientRequest::PamAuthenticateInit { account_id, info } => {
|
||||||
match &pam_auth_session_state {
|
match &pam_auth_session_state {
|
||||||
Some(_auth_session) => {
|
Some(_auth_session) => {
|
||||||
|
|
|
@ -792,6 +792,37 @@ impl DbTxn<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_user_groups(&mut self, a_uuid: Uuid) -> Result<Vec<GroupToken>, CacheError> {
|
||||||
|
let mut stmt = self
|
||||||
|
.conn
|
||||||
|
.prepare("SELECT group_t.token FROM (group_t, memberof_t) WHERE group_t.uuid = memberof_t.g_uuid AND memberof_t.a_uuid = :a_uuid")
|
||||||
|
.map_err(|e| {
|
||||||
|
self.sqlite_error("select prepare", &e)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let data_iter = stmt
|
||||||
|
.query_map([a_uuid.as_hyphenated().to_string()], |row| row.get(0))
|
||||||
|
.map_err(|e| self.sqlite_error("query_map", &e))?;
|
||||||
|
let data: Result<Vec<Vec<u8>>, _> = data_iter
|
||||||
|
.map(|v| v.map_err(|e| self.sqlite_error("map", &e)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let data = data?;
|
||||||
|
|
||||||
|
Ok(data
|
||||||
|
.iter()
|
||||||
|
.filter_map(|token| {
|
||||||
|
// token convert with json.
|
||||||
|
// trace!("{:?}", token);
|
||||||
|
serde_json::from_slice(token.as_slice())
|
||||||
|
.map_err(|e| {
|
||||||
|
error!("json error -> {:?}", e);
|
||||||
|
})
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_group_members(&mut self, g_uuid: Uuid) -> Result<Vec<UserToken>, CacheError> {
|
pub fn get_group_members(&mut self, g_uuid: Uuid) -> Result<Vec<UserToken>, CacheError> {
|
||||||
let mut stmt = self
|
let mut stmt = self
|
||||||
.conn
|
.conn
|
||||||
|
|
|
@ -620,6 +620,17 @@ impl Resolver {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn get_usergroups(&self, g_uuid: Uuid) -> Vec<String> {
|
||||||
|
let mut dbtxn = self.db.write().await;
|
||||||
|
|
||||||
|
dbtxn
|
||||||
|
.get_user_groups(g_uuid)
|
||||||
|
.unwrap_or_else(|_| Vec::new())
|
||||||
|
.into_iter()
|
||||||
|
.map(|gt| self.token_gidattr(>))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
async fn get_groupmembers(&self, g_uuid: Uuid) -> Vec<String> {
|
async fn get_groupmembers(&self, g_uuid: Uuid) -> Vec<String> {
|
||||||
let mut dbtxn = self.db.write().await;
|
let mut dbtxn = self.db.write().await;
|
||||||
|
|
||||||
|
@ -780,6 +791,17 @@ impl Resolver {
|
||||||
Ok(r)
|
Ok(r)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn get_nssgroups_member_name(&self, account_id: &str) -> Result<Vec<NssGroup>, ()> {
|
||||||
|
if let Some(nss_user) = self.get_nssaccount(&account_id).await {
|
||||||
|
Ok(self.get_usergroups(nss_user).await
|
||||||
|
.into_iter()
|
||||||
|
.map(|g| self.token_gidattr(&g))
|
||||||
|
.collect())
|
||||||
|
} else {
|
||||||
|
Ok(Vec::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async fn get_nssgroup(&self, grp_id: Id) -> Result<Option<NssGroup>, ()> {
|
async fn get_nssgroup(&self, grp_id: Id) -> Result<Option<NssGroup>, ()> {
|
||||||
if let Some(mut nss_group) = self.system_provider.get_nssgroup(&grp_id).await {
|
if let Some(mut nss_group) = self.system_provider.get_nssgroup(&grp_id).await {
|
||||||
debug!("system provider satisfied request");
|
debug!("system provider satisfied request");
|
||||||
|
|
Loading…
Reference in a new issue