20230224 2437 orca remodel (#2591)

This commit is contained in:
Firstyear 2024-03-09 16:09:15 +10:00 committed by GitHub
parent 1887daa76a
commit 285f4362b2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
36 changed files with 119644 additions and 4879 deletions

7
Cargo.lock generated
View file

@ -4184,25 +4184,22 @@ checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d"
name = "orca"
version = "1.2.0-dev"
dependencies = [
"async-trait",
"clap",
"crossbeam",
"csv",
"dialoguer",
"futures-util",
"hashbrown 0.14.3",
"kanidm_build_profiles",
"kanidm_client",
"kanidm_proto",
"ldap3_proto",
"mathru",
"openssl",
"rand",
"rand_chacha",
"serde",
"serde_json",
"tikv-jemallocator",
"tokio",
"tokio-openssl",
"tokio-util",
"toml",
"tracing",
"tracing-subscriber",

View file

@ -189,6 +189,7 @@ proc-macro2 = "1.0.69"
qrcode = "^0.12.0"
quote = "1"
rand = "^0.8.5"
rand_chacha = "0.3.1"
regex = "1.10.2"
reqwest = { version = "0.11.20", default-features = false, features = [
"cookies",

View file

@ -23,6 +23,9 @@ cargo install wasm-bindgen-cli
- [ ] cargo audit
- [ ] cargo test
- [ ] setup a local instance and run orca (TBD)
- [ ] store a copy an an example db (TBD)
### Code Changes
- [ ] upgrade crypto policy values if required

View file

@ -76,11 +76,19 @@ impl SessionConsistency {
let invalidate: Option<BTreeSet<_>> = entry.get_ava_as_session_map(Attribute::UserAuthTokenSession)
.map(|sessions| {
sessions.iter().filter_map(|(session_id, session)| {
if !cred_ids.contains(&session.cred_id) {
info!(%session_id, "Removing auth session whose issuing credential no longer exists");
Some(PartialValue::Refer(*session_id))
} else {
None
match &session.state {
SessionState::RevokedAt(_) => {
// Ignore, it's already revoked.
None
}
SessionState::ExpiresAt(_) |
SessionState::NeverExpires =>
if !cred_ids.contains(&session.cred_id) {
info!(%session_id, "Revoking auth session whose issuing credential no longer exists");
Some(PartialValue::Refer(*session_id))
} else {
None
},
}
})
.collect()

View file

@ -18,23 +18,20 @@ test = true
doctest = false
[dependencies]
async-trait = { workspace = true }
clap = { workspace = true }
crossbeam = { workspace = true }
csv = { workspace = true }
dialoguer = { workspace = true }
futures-util = { workspace = true, features = ["sink"] }
hashbrown = { workspace = true }
kanidm_client = { workspace = true }
kanidm_proto = { workspace = true }
ldap3_proto = { workspace = true }
mathru = { workspace = true }
openssl = { workspace = true }
rand = { workspace = true }
rand_chacha = { workspace = true }
serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tokio = { workspace = true, features = ["rt-multi-thread"] }
tokio-openssl = { workspace = true }
tokio-util = { workspace = true, features = ["codec"] }
tokio = { workspace = true, features = ["rt-multi-thread", "sync"] }
toml = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
@ -45,3 +42,4 @@ tikv-jemallocator = { workspace = true }
[build-dependencies]
kanidm_build_profiles = { workspace = true }

56
tools/orca/README.md Normal file
View file

@ -0,0 +1,56 @@
# Orca - A Kanidm Load Testing Tool
Make a profile
```shell
orca setup-wizard --idm-admin-password ... \
--admin-password ... \
--control-uri 'https://localhost:8443' \
--profile ./profile.toml
```
Test the connection
```shell
orca conntest --profile ./profile.toml
```
Generate a State File
```shell
orca generate --profile ./profile.toml --state ./state.json
```
Run the test preflight to populate the sample data
```shell
orca populate --state ./state.json
```
Run the load test
```shell
orca run --state ./state.json
```
## Design Choices
### What is a profile?
A profile defines the connection parameters and test randomisation seed. From a profile you define
the parameters of the test you wish to perform.
### What is a state file?
A statefile is the fully generated state of all entries that will be created and then used in the
load test. The state file can be recreated from a profile and it's seed at anytime. The reason to
seperate these is that state files may get quite large, when what you really just need is the
ability to recreate them when needed.
This state file also contains all the details about accounts and entries so that during test
execution orca knows what it can and can not interact with.
### Why have a separate generate and preflight?
Because generating the data is single thread limited, this would also bottleneck entry creation. By
generating the data first, we can then execute preflight entry creation in parallel.

View file

@ -1,459 +0,0 @@
[
{
"conn": "-1",
"etime": "0",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"dbde6c65-4404-4f1d-8627-9a0956c37959"
],
"nentries": 4,
"rtime": "0:00:00",
"type": "precreate"
},
{
"conn": "1",
"etime": "0.000528091",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "0:00:00",
"type": "bind"
},
{
"conn": "2",
"etime": "0.000397756",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "0:00:00.038973",
"type": "bind"
},
{
"conn": "1",
"etime": "0.000585359",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "0:00:06.139358",
"type": "bind"
},
{
"conn": "2",
"etime": "0.000431519",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "0:00:06.148107",
"type": "bind"
},
{
"conn": "3",
"etime": "0.000508000",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:33:54.277936",
"type": "bind"
},
{
"conn": "4",
"etime": "0.000486000",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:34:00.790073",
"type": "bind"
},
{
"conn": "4",
"etime": "0.000401700",
"ids": [
"81e63faf-538a-4660-b327-7bcdea83f0aa"
],
"nentries": 0,
"rtime": "1:34:00.790858",
"type": "srch"
},
{
"conn": "5",
"etime": "0.000700300",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:34:14.651418",
"type": "bind"
},
{
"conn": "6",
"etime": "0.000602600",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:34:33.585998",
"type": "bind"
},
{
"conn": "7",
"etime": "0.000649300",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:34:51.648221",
"type": "bind"
},
{
"conn": "7",
"etime": "0.025628300",
"ids": [
"81e63faf-538a-4660-b327-7bcdea83f0aa"
],
"nentries": 1,
"rtime": "1:34:51.667005",
"type": "add"
},
{
"conn": "7",
"etime": "0.005475700",
"ids": [
"81e63faf-538a-4660-b327-7bcdea83f0aa"
],
"nentries": 1,
"rtime": "1:34:51.692743",
"type": "mod"
},
{
"conn": "7",
"etime": "0.015649500",
"ids": [
"5defe265-8a47-4f3f-9cd8-344061f1b071"
],
"nentries": 1,
"rtime": "1:34:51.698652",
"type": "add"
},
{
"conn": "7",
"etime": "0.002385200",
"ids": [
"169a8db1-d484-4519-ac6f-a19b70e6e3f8"
],
"nentries": 1,
"rtime": "1:34:51.714690",
"type": "add"
},
{
"conn": "7",
"etime": "0.004335000",
"ids": [
"37cc595b-fa48-4822-903f-e5c0a0bd79b5"
],
"nentries": 1,
"rtime": "1:34:51.717484",
"type": "add"
},
{
"conn": "7",
"etime": "0.003308400",
"ids": [
"0c6bfac0-9214-44f7-86f6-90a3cba18a54"
],
"nentries": 1,
"rtime": "1:34:51.721901",
"type": "add"
},
{
"conn": "7",
"etime": "0.002083600",
"ids": [
"543a641e-8667-4460-9536-65d7e5e1132b"
],
"nentries": 1,
"rtime": "1:34:51.725496",
"type": "add"
},
{
"conn": "7",
"etime": "0.010227700",
"ids": [
"34e53981-a403-4283-9d73-37cb30386e04"
],
"nentries": 1,
"rtime": "1:34:51.729333",
"type": "add"
},
{
"conn": "7",
"etime": "0.003348700",
"ids": [
"c0e4e2f6-5d16-4550-ae49-b30164545b89"
],
"nentries": 1,
"rtime": "1:34:51.740695",
"type": "add"
},
{
"conn": "7",
"etime": "0.003131700",
"ids": [
"83c088bf-8c78-4344-8726-b0f0ab4c2c9b"
],
"nentries": 1,
"rtime": "1:34:51.745069",
"type": "add"
},
{
"conn": "7",
"etime": "0.003571300",
"ids": [
"4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38"
],
"nentries": 1,
"rtime": "1:34:51.749149",
"type": "add"
},
{
"conn": "7",
"etime": "0.003427600",
"ids": [
"b88c12da-8c6a-497c-8ca9-832db87d10de"
],
"nentries": 1,
"rtime": "1:34:51.753605",
"type": "add"
},
{
"conn": "7",
"etime": "0.003846700",
"ids": [
"4c155ed6-a358-4a1b-bd37-b092e134fbb3"
],
"nentries": 1,
"rtime": "1:34:51.757942",
"type": "add"
},
{
"conn": "7",
"etime": "0.003432100",
"ids": [
"ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e"
],
"nentries": 1,
"rtime": "1:34:51.762748",
"type": "add"
},
{
"conn": "7",
"etime": "0.003518100",
"ids": [
"59770fb1-b160-4c54-816a-87dcd85ca392"
],
"nentries": 1,
"rtime": "1:34:51.767198",
"type": "add"
},
{
"conn": "8",
"etime": "0.000500500",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:34:55.204540",
"type": "bind"
},
{
"conn": "8",
"etime": "0.001086100",
"ids": [
"c0e4e2f6-5d16-4550-ae49-b30164545b89",
"169a8db1-d484-4519-ac6f-a19b70e6e3f8",
"4c155ed6-a358-4a1b-bd37-b092e134fbb3",
"34e53981-a403-4283-9d73-37cb30386e04",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"5defe265-8a47-4f3f-9cd8-344061f1b071",
"543a641e-8667-4460-9536-65d7e5e1132b",
"4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38",
"83c088bf-8c78-4344-8726-b0f0ab4c2c9b",
"37cc595b-fa48-4822-903f-e5c0a0bd79b5",
"ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e",
"0c6bfac0-9214-44f7-86f6-90a3cba18a54",
"b88c12da-8c6a-497c-8ca9-832db87d10de"
],
"nentries": 13,
"rtime": "1:34:55.205408",
"type": "srch"
},
{
"conn": "9",
"etime": "0.000667100",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:35:31.405417",
"type": "bind"
},
{
"conn": "9",
"etime": "0.005077600",
"ids": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
],
"nentries": 1,
"rtime": "1:35:31.425350",
"type": "add"
},
{
"conn": "9",
"etime": "0.000433300",
"ids": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
],
"nentries": 1,
"rtime": "1:35:31.430615",
"type": "srch"
},
{
"conn": "10",
"etime": "0.000564700",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:35:35.351913",
"type": "bind"
},
{
"conn": "10",
"etime": "0.000893100",
"ids": [
"c0e4e2f6-5d16-4550-ae49-b30164545b89",
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
],
"nentries": 2,
"rtime": "1:35:35.371215",
"type": "srch"
},
{
"conn": "10",
"etime": "0.000465000",
"ids": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
],
"nentries": 1,
"rtime": "1:35:35.373590",
"type": "srch"
},
{
"conn": "10",
"etime": "0.000540200",
"ids": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
],
"nentries": 1,
"rtime": "1:35:35.374385",
"type": "srch"
},
{
"conn": "11",
"etime": "0.000577300",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:35:41.643026",
"type": "bind"
},
{
"conn": "11",
"etime": "0.001376500",
"ids": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
],
"nentries": 1,
"rtime": "1:35:49.466015",
"type": "srch"
},
{
"conn": "11",
"etime": "0.009469100",
"ids": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
],
"nentries": 1,
"rtime": "1:35:49.468983",
"type": "mod"
},
{
"conn": "12",
"etime": "0.000602800",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:35:56.951866",
"type": "bind"
},
{
"conn": "12",
"etime": "0.000812300",
"ids": [
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
],
"nentries": 2,
"rtime": "1:35:56.971102",
"type": "srch"
},
{
"conn": "12",
"etime": "0.000426400",
"ids": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
],
"nentries": 1,
"rtime": "1:35:56.973504",
"type": "srch"
},
{
"conn": "12",
"etime": "0.000810100",
"ids": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
],
"nentries": 1,
"rtime": "1:35:56.974321",
"type": "srch"
},
{
"conn": "13",
"etime": "0.000633700",
"ids": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"nentries": 1,
"rtime": "1:36:25.041289",
"type": "bind"
},
{
"conn": "13",
"etime": "0.005287900",
"ids": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
],
"nentries": 1,
"rtime": "1:36:27.795820",
"type": "del"
}
]

View file

@ -1,962 +0,0 @@
{
"all_entities": {
"81e63faf-538a-4660-b327-7bcdea83f0aa": {
"Group": {
"name": "group_1657205471178919426",
"uuid": "81e63faf-538a-4660-b327-7bcdea83f0aa",
"members": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
"543a641e-8667-4460-9536-65d7e5e1132b": {
"Group": {
"name": "group_15437790154560783788",
"uuid": "543a641e-8667-4460-9536-65d7e5e1132b",
"members": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c": {
"Group": {
"name": "group_15846755699640016914",
"uuid": "3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"members": []
}
},
"5defe265-8a47-4f3f-9cd8-344061f1b071": {
"Group": {
"name": "group_16856201244000682350",
"uuid": "5defe265-8a47-4f3f-9cd8-344061f1b071",
"members": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
},
"37cc595b-fa48-4822-903f-e5c0a0bd79b5": {
"Group": {
"name": "group_5096429401270452330",
"uuid": "37cc595b-fa48-4822-903f-e5c0a0bd79b5",
"members": []
}
},
"34e53981-a403-4283-9d73-37cb30386e04": {
"Group": {
"name": "group_1914585360573050336",
"uuid": "34e53981-a403-4283-9d73-37cb30386e04",
"members": [
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
]
}
},
"0c6bfac0-9214-44f7-86f6-90a3cba18a54": {
"Group": {
"name": "group_8157469396614494968",
"uuid": "0c6bfac0-9214-44f7-86f6-90a3cba18a54",
"members": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
},
"f62b0bfe-3196-4836-bfce-4f2f8730138b": {
"Account": {
"name": "account_12917285583703370811",
"display_name": "Account 12917285583703370811",
"password": "JtNW-C0jB-YKg2-8UC0",
"uuid": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
"59770fb1-b160-4c54-816a-87dcd85ca392": {
"Group": {
"name": "group_1122290820441410853",
"uuid": "59770fb1-b160-4c54-816a-87dcd85ca392",
"members": []
}
},
"b88c12da-8c6a-497c-8ca9-832db87d10de": {
"Group": {
"name": "group_8636995776512342157",
"uuid": "b88c12da-8c6a-497c-8ca9-832db87d10de",
"members": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
"83c088bf-8c78-4344-8726-b0f0ab4c2c9b": {
"Group": {
"name": "group_14899354156377375966",
"uuid": "83c088bf-8c78-4344-8726-b0f0ab4c2c9b",
"members": []
}
},
"169a8db1-d484-4519-ac6f-a19b70e6e3f8": {
"Group": {
"name": "group_16047693327707073263",
"uuid": "169a8db1-d484-4519-ac6f-a19b70e6e3f8",
"members": [
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
"e56b8837-f4e7-4935-86d2-239f23b1f4ef": {
"Group": {
"name": "group_14259410542307289297",
"uuid": "e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"members": []
}
},
"4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38": {
"Group": {
"name": "group_7666706091976407941",
"uuid": "4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38",
"members": [
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
},
"c0e4e2f6-5d16-4550-ae49-b30164545b89": {
"Group": {
"name": "group_4054017059755809479",
"uuid": "c0e4e2f6-5d16-4550-ae49-b30164545b89",
"members": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
"dbde6c65-4404-4f1d-8627-9a0956c37959": {
"Group": {
"name": "group_12489660120465860992",
"uuid": "dbde6c65-4404-4f1d-8627-9a0956c37959",
"members": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef"
]
}
},
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214": {
"Group": {
"name": "group_5259949902021731134",
"uuid": "c28a49f3-c0cd-4eaf-806b-3bbc045ff214",
"members": [
"e56b8837-f4e7-4935-86d2-239f23b1f4ef"
]
}
},
"4c155ed6-a358-4a1b-bd37-b092e134fbb3": {
"Group": {
"name": "group_12242762395690138256",
"uuid": "4c155ed6-a358-4a1b-bd37-b092e134fbb3",
"members": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
},
"ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e": {
"Group": {
"name": "group_319193270919044761",
"uuid": "ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e",
"members": [
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"f62b0bfe-3196-4836-bfce-4f2f8730138b",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef"
]
}
}
},
"access": {
"f62b0bfe-3196-4836-bfce-4f2f8730138b": [
{
"Group": "0c6bfac0-9214-44f7-86f6-90a3cba18a54"
},
{
"Group": "169a8db1-d484-4519-ac6f-a19b70e6e3f8"
},
{
"Group": "34e53981-a403-4283-9d73-37cb30386e04"
},
{
"Group": "37cc595b-fa48-4822-903f-e5c0a0bd79b5"
},
{
"Group": "4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38"
},
{
"Group": "4c155ed6-a358-4a1b-bd37-b092e134fbb3"
},
{
"Group": "543a641e-8667-4460-9536-65d7e5e1132b"
},
{
"Group": "59770fb1-b160-4c54-816a-87dcd85ca392"
},
{
"Group": "5defe265-8a47-4f3f-9cd8-344061f1b071"
},
{
"Group": "81e63faf-538a-4660-b327-7bcdea83f0aa"
},
{
"Group": "83c088bf-8c78-4344-8726-b0f0ab4c2c9b"
},
{
"Group": "b88c12da-8c6a-497c-8ca9-832db87d10de"
},
{
"Group": "c0e4e2f6-5d16-4550-ae49-b30164545b89"
},
{
"Group": "c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
},
{
"Group": "dbde6c65-4404-4f1d-8627-9a0956c37959"
},
{
"Group": "ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e"
}
]
},
"accounts": [
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"precreate": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"dbde6c65-4404-4f1d-8627-9a0956c37959",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"f62b0bfe-3196-4836-bfce-4f2f8730138b"
],
"connections": [
{
"id": 1,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 528091
},
"rtime": {
"secs": 0,
"nanos": 0
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 585359
},
"rtime": {
"secs": 6,
"nanos": 139358000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
}
]
},
{
"id": 2,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 397756
},
"rtime": {
"secs": 0,
"nanos": 38973000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 431519
},
"rtime": {
"secs": 6,
"nanos": 148107000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
}
]
},
{
"id": 3,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 508000
},
"rtime": {
"secs": 5634,
"nanos": 277936000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
}
]
},
{
"id": 4,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 486000
},
"rtime": {
"secs": 5640,
"nanos": 790073000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 401700
},
"rtime": {
"secs": 5640,
"nanos": 790858000
},
"op_type": {
"Search": [
"81e63faf-538a-4660-b327-7bcdea83f0aa"
]
}
}
]
},
{
"id": 5,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 700300
},
"rtime": {
"secs": 5654,
"nanos": 651418000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
}
]
},
{
"id": 6,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 602600
},
"rtime": {
"secs": 5673,
"nanos": 585998000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
}
]
},
{
"id": 7,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 649300
},
"rtime": {
"secs": 5691,
"nanos": 648221000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 25628300
},
"rtime": {
"secs": 5691,
"nanos": 667005000
},
"op_type": {
"Add": [
"81e63faf-538a-4660-b327-7bcdea83f0aa"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 5475700
},
"rtime": {
"secs": 5691,
"nanos": 692743000
},
"op_type": {
"Mod": [
[
"81e63faf-538a-4660-b327-7bcdea83f0aa",
{
"Group": []
}
]
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 15649500
},
"rtime": {
"secs": 5691,
"nanos": 698652000
},
"op_type": {
"Add": [
"5defe265-8a47-4f3f-9cd8-344061f1b071"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 2385200
},
"rtime": {
"secs": 5691,
"nanos": 714690000
},
"op_type": {
"Add": [
"169a8db1-d484-4519-ac6f-a19b70e6e3f8"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 4335000
},
"rtime": {
"secs": 5691,
"nanos": 717484000
},
"op_type": {
"Add": [
"37cc595b-fa48-4822-903f-e5c0a0bd79b5"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3308400
},
"rtime": {
"secs": 5691,
"nanos": 721901000
},
"op_type": {
"Add": [
"0c6bfac0-9214-44f7-86f6-90a3cba18a54"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 2083600
},
"rtime": {
"secs": 5691,
"nanos": 725496000
},
"op_type": {
"Add": [
"543a641e-8667-4460-9536-65d7e5e1132b"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 10227700
},
"rtime": {
"secs": 5691,
"nanos": 729333000
},
"op_type": {
"Add": [
"34e53981-a403-4283-9d73-37cb30386e04"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3348700
},
"rtime": {
"secs": 5691,
"nanos": 740695000
},
"op_type": {
"Add": [
"c0e4e2f6-5d16-4550-ae49-b30164545b89"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3131700
},
"rtime": {
"secs": 5691,
"nanos": 745069000
},
"op_type": {
"Add": [
"83c088bf-8c78-4344-8726-b0f0ab4c2c9b"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3571300
},
"rtime": {
"secs": 5691,
"nanos": 749149000
},
"op_type": {
"Add": [
"4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3427600
},
"rtime": {
"secs": 5691,
"nanos": 753605000
},
"op_type": {
"Add": [
"b88c12da-8c6a-497c-8ca9-832db87d10de"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3846700
},
"rtime": {
"secs": 5691,
"nanos": 757942000
},
"op_type": {
"Add": [
"4c155ed6-a358-4a1b-bd37-b092e134fbb3"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3432100
},
"rtime": {
"secs": 5691,
"nanos": 762748000
},
"op_type": {
"Add": [
"ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 3518100
},
"rtime": {
"secs": 5691,
"nanos": 767198000
},
"op_type": {
"Add": [
"59770fb1-b160-4c54-816a-87dcd85ca392"
]
}
}
]
},
{
"id": 8,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 500499
},
"rtime": {
"secs": 5695,
"nanos": 204540000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 1086100
},
"rtime": {
"secs": 5695,
"nanos": 205408000
},
"op_type": {
"Search": [
"0c6bfac0-9214-44f7-86f6-90a3cba18a54",
"169a8db1-d484-4519-ac6f-a19b70e6e3f8",
"34e53981-a403-4283-9d73-37cb30386e04",
"37cc595b-fa48-4822-903f-e5c0a0bd79b5",
"4a15deef-c31c-4a0d-ab3f-6ccd8d2f4e38",
"4c155ed6-a358-4a1b-bd37-b092e134fbb3",
"543a641e-8667-4460-9536-65d7e5e1132b",
"5defe265-8a47-4f3f-9cd8-344061f1b071",
"83c088bf-8c78-4344-8726-b0f0ab4c2c9b",
"b88c12da-8c6a-497c-8ca9-832db87d10de",
"c0e4e2f6-5d16-4550-ae49-b30164545b89",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef",
"ef1a6e8f-2e8a-49a7-8a95-88ee1feee02e"
]
}
}
]
},
{
"id": 9,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 667100
},
"rtime": {
"secs": 5731,
"nanos": 405417000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 5077600
},
"rtime": {
"secs": 5731,
"nanos": 425350000
},
"op_type": {
"Add": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 433300
},
"rtime": {
"secs": 5731,
"nanos": 430615000
},
"op_type": {
"Search": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
]
}
}
]
},
{
"id": 10,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 564700
},
"rtime": {
"secs": 5735,
"nanos": 351913000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 893100
},
"rtime": {
"secs": 5735,
"nanos": 371215000
},
"op_type": {
"Search": [
"c0e4e2f6-5d16-4550-ae49-b30164545b89",
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 465000
},
"rtime": {
"secs": 5735,
"nanos": 373590000
},
"op_type": {
"Search": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 540200
},
"rtime": {
"secs": 5735,
"nanos": 374385000
},
"op_type": {
"Search": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
}
]
},
{
"id": 11,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 577300
},
"rtime": {
"secs": 5741,
"nanos": 643026000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 1376500
},
"rtime": {
"secs": 5749,
"nanos": 466015000
},
"op_type": {
"Search": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 9469100
},
"rtime": {
"secs": 5749,
"nanos": 468983000
},
"op_type": {
"Mod": [
[
"dbde6c65-4404-4f1d-8627-9a0956c37959",
{
"Group": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c",
"4c155ed6-a358-4a1b-bd37-b092e134fbb3"
]
}
]
]
}
}
]
},
{
"id": 12,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 602800
},
"rtime": {
"secs": 5756,
"nanos": 951866000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 812300
},
"rtime": {
"secs": 5756,
"nanos": 971102000
},
"op_type": {
"Search": [
"c28a49f3-c0cd-4eaf-806b-3bbc045ff214",
"e56b8837-f4e7-4935-86d2-239f23b1f4ef"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 426400
},
"rtime": {
"secs": 5756,
"nanos": 973504000
},
"op_type": {
"Search": [
"3fe997de-8e79-4a0a-b057-8ff39c9c2f7c"
]
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 810100
},
"rtime": {
"secs": 5756,
"nanos": 974321000
},
"op_type": {
"Search": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
}
]
},
{
"id": 13,
"ops": [
{
"orig_etime": {
"secs": 0,
"nanos": 633700
},
"rtime": {
"secs": 5785,
"nanos": 41289000
},
"op_type": {
"Bind": "f62b0bfe-3196-4836-bfce-4f2f8730138b"
}
},
{
"orig_etime": {
"secs": 0,
"nanos": 5287900
},
"rtime": {
"secs": 5787,
"nanos": 795820000
},
"op_type": {
"Delete": [
"dbde6c65-4404-4f1d-8627-9a0956c37959"
]
}
}
]
}
]
}

View file

@ -1,21 +0,0 @@
name = "Orca Small Example"
data = "data.json"
results = "/tmp/small_results"
[ds_config]
uri = "ldaps://172.24.20.4:49153"
base_dn = "dc=example,dc=com"
dm_pw = "ds9n539EaYtD2CsGOUATsOUeyFy4OZVPAN6jEEm.WP52NVz7j.VLhAVG5twbcaSoa"
[kani_http_config]
uri = "https://172.24.20.4:8443"
admin_pw = "YWySv7W65D1Zq001jgT1zxg5TEsz6ex80MQ9EKDG7t0RrQU0"
[kani_ldap_config]
uri = "https://172.24.20.4:8443"
ldap_uri = "ldaps://172.24.20.4:3636"
admin_pw = "YWySv7W65D1Zq001jgT1zxg5TEsz6ex80MQ9EKDG7t0RrQU0"
base_dn = "dc=example,dc=com"
# [search_basic_config]

View file

@ -1,21 +0,0 @@
name = "Orca Small Example"
data = "data.json"
results = "/tmp/small_results"
[ds_config]
uri = "ldaps://localhost:636"
base_dn = "dc=example,dc=com"
dm_pw = "password"
[kani_http_config]
uri = "https://localhost:8443"
admin_pw = "laNgei6doa9iengoh4to"
[kani_ldap_config]
uri = "https://localhost:8443"
ldap_uri = "ldaps://localhost:3636"
admin_pw = "laNgei6doa9iengoh4to"
base_dn = "dc=example,dc=com"
# [search_basic_config]

View file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2019 Solvenium
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -0,0 +1,5 @@
# names-dataset
Retrieved 2024-02-24
[names-dataset](https://github.com/solvenium/names-dataset/tree/master)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,70 +0,0 @@
#!/bin/bash
set -e
if [ ! -f "$0" ]; then
echo "This script must be run from the tools/orca directory."
exit 1
fi
MYDIR="$(pwd)"
echo "Running this will run the setup_dev_environment script"
echo "which resets the local dev environment to a default state."
echo ""
echo "Also, you'll need to start the server in another tab."
echo ""
echo "Hit ctrl-c to quit now if that's not what you intend!"
read -rp "Press Enter to continue"
cd ../../server/daemon/ || exit 1
KANI_TEMP="$(mktemp -d)"
echo "Running the script..."
../../scripts/setup_dev_environment.sh | tee "${KANI_TEMP}/kanifile"
echo "#########################"
echo "Back to orca now..."
echo "#########################"
if [ -z "${KANIDM_CONFIG}" ]; then
KANIDM_CONFIG="../../examples/insecure_server.toml"
fi
ADMIN_PW=$(grep -E "^admin password" "${KANI_TEMP}/kanifile" | awk '{print $NF}')
IDM_ADMIN_PW=$(grep -E "^idm_admin password" "${KANI_TEMP}/kanifile" | awk '{print $NF}')
rm "${KANI_TEMP}/kanifile"
if [ -n "${DEBUG}" ]; then
echo "Admin pw: ${ADMIN_PW}"
echo "IDM Admin pw: ${IDM_ADMIN_PW}"
fi
cd "$MYDIR" || exit 1
LDAP_DN="DN=$(grep domain "${KANIDM_CONFIG}" | awk '{print $NF}' | tr -d '"' | sed -E 's/\./,DN=/g')"
PROFILE_PATH="/tmp/kanidm/orca.toml"
cargo run --bin orca -- configure \
--profile "${PROFILE_PATH}" \
--admin-password "${ADMIN_PW}" \
--kanidm-uri "$(grep origin "${KANIDM_CONFIG}" | awk '{print $NF}' | tr -d '"')" \
--ldap-uri "ldaps://$(grep domain "${KANIDM_CONFIG}" | awk '{print $NF}' | tr -d '"'):636" \
--ldap-base-dn "${LDAP_DN}"
echo "Generating data..."
cargo run --bin orca -- generate --output /tmp/kanidm/orcatest
echo "Running connection test..."
cargo run --bin orca -- conntest --profile "${PROFILE_PATH}" kanidm
echo "Now you can run things!"
echo "To set up the environment, run:"
echo "cargo run --bin orca --release -- setup --profile /tmp/kanidm/orca.toml kanidm"
echo "cargo run --bin orca --release -- run --profile /tmp/kanidm/orca.toml kanidm search-basic"

View file

@ -1,193 +0,0 @@
use hashbrown::{HashMap, HashSet};
use std::time::Duration;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
pub fn readable_password_from_random() -> String {
let mut trng = thread_rng();
format!(
"{}-{}-{}-{}",
(&mut trng)
.sample_iter(&Alphanumeric)
.take(4)
.map(|v| v as char)
.collect::<String>(),
(&mut trng)
.sample_iter(&Alphanumeric)
.take(4)
.map(|v| v as char)
.collect::<String>(),
(&mut trng)
.sample_iter(&Alphanumeric)
.take(4)
.map(|v| v as char)
.collect::<String>(),
(&mut trng)
.sample_iter(&Alphanumeric)
.take(4)
.map(|v| v as char)
.collect::<String>(),
)
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Account {
pub name: String,
pub display_name: String,
pub password: String,
pub uuid: Uuid,
}
impl Account {
pub fn get_ds_ldap_dn(&self, basedn: &str) -> String {
format!("uid={},ou=people,{}", self.name.as_str(), basedn)
}
pub fn get_ipa_ldap_dn(&self, basedn: &str) -> String {
format!("uid={},cn=users,cn=accounts,{}", self.name.as_str(), basedn)
}
pub fn generate(uuid: Uuid) -> Self {
let mut rng = rand::thread_rng();
let id: u64 = rng.gen();
let name = format!("account_{}", id);
let display_name = format!("Account {}", id);
Account {
name,
display_name,
password: readable_password_from_random(),
uuid,
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct Group {
pub name: String,
pub uuid: Uuid,
pub members: Vec<Uuid>,
}
impl Group {
pub fn get_ds_ldap_dn(&self, basedn: &str) -> String {
format!("cn={},ou=groups,{}", self.name.as_str(), basedn)
}
pub fn get_ipa_ldap_dn(&self, basedn: &str) -> String {
format!("cn={},cn=groups,cn=accounts,{}", self.name.as_str(), basedn)
}
pub fn generate(uuid: Uuid, members: Vec<Uuid>) -> Self {
let mut rng = rand::thread_rng();
let id: u64 = rng.gen();
let name = format!("group_{}", id);
Group {
name,
uuid,
members,
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum Entity {
Account(Account),
Group(Group),
}
impl Entity {
pub fn get_uuid(&self) -> Uuid {
match self {
Entity::Account(a) => a.uuid,
Entity::Group(g) => g.uuid,
}
}
pub fn get_name(&self) -> &str {
match self {
Entity::Account(a) => a.name.as_str(),
Entity::Group(g) => g.name.as_str(),
}
}
pub fn get_ds_ldap_dn(&self, basedn: &str) -> String {
match self {
Entity::Account(a) => a.get_ds_ldap_dn(basedn),
Entity::Group(g) => g.get_ds_ldap_dn(basedn),
}
}
pub fn get_ipa_ldap_dn(&self, basedn: &str) -> String {
match self {
Entity::Account(a) => a.get_ipa_ldap_dn(basedn),
Entity::Group(g) => g.get_ipa_ldap_dn(basedn),
}
}
pub fn get_entity_type(&self) -> EntityType {
match self {
Entity::Account(a) => EntityType::Account(a.uuid),
Entity::Group(g) => EntityType::Group(g.uuid),
}
}
}
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum EntityType {
Account(Uuid),
Group(Uuid),
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Change {
Account,
// What it should be set to
Group(Vec<Uuid>),
}
#[derive(Debug, Serialize, Deserialize)]
pub enum OpType {
Bind(Uuid),
Add(Vec<Uuid>),
Mod(Vec<(Uuid, Change)>),
Delete(Vec<Uuid>),
Search(Vec<Uuid>),
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Op {
pub orig_etime: Duration,
pub rtime: Duration,
pub op_type: OpType,
}
impl Op {
pub fn require_reset<'a>(&'a self) -> Option<Box<dyn Iterator<Item = Uuid> + 'a>> {
match &self.op_type {
OpType::Add(ids) => Some(Box::new(ids.iter().copied())),
OpType::Mod(changes) => Some(Box::new(changes.iter().map(|v| v.0))),
_ => None,
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Conn {
pub id: i32,
pub ops: Vec<Op>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct TestData {
pub all_entities: HashMap<Uuid, Entity>,
pub access: HashMap<Uuid, Vec<EntityType>>,
pub accounts: HashSet<Uuid>,
pub precreate: HashSet<Uuid>,
pub connections: Vec<Conn>,
}

View file

@ -1,450 +0,0 @@
use hashbrown::{HashMap, HashSet};
use kanidm_proto::constants::{
ATTR_UID, LDAP_ATTR_CN, LDAP_ATTR_DISPLAY_NAME, LDAP_ATTR_GROUPS, LDAP_ATTR_OBJECTCLASS,
LDAP_ATTR_OU,
};
use std::time::{Duration, Instant};
use ldap3_proto::proto::*;
use uuid::Uuid;
use crate::data::*;
use crate::ldap::{LdapClient, LdapSchema};
use crate::profile::DsConfig;
use crate::{TargetServer, TargetServerBuilder};
#[derive(Debug)]
pub struct DirectoryServer {
ldap: LdapClient,
dm_pw: String,
}
impl DirectoryServer {
fn construct(uri: String, dm_pw: String, basedn: String) -> Result<Self, ()> {
let ldap = LdapClient::new(uri, basedn, LdapSchema::Rfc2307bis)?;
Ok(DirectoryServer { ldap, dm_pw })
}
pub fn build(uri: String, dm_pw: String, basedn: String) -> Result<TargetServer, ()> {
Self::construct(uri, dm_pw, basedn).map(TargetServer::DirSrv)
}
#[allow(clippy::new_ret_no_self)]
pub fn new(lconfig: &DsConfig) -> Result<TargetServer, ()> {
Self::construct(
lconfig.uri.clone(),
lconfig.dm_pw.clone(),
lconfig.base_dn.clone(),
)
.map(TargetServer::DirSrv)
}
pub fn info(&self) -> String {
format!("Directory Server Connection: {}", self.ldap.uri)
}
pub fn builder(&self) -> TargetServerBuilder {
TargetServerBuilder::DirSrv(
self.ldap.uri.clone(),
self.dm_pw.clone(),
self.ldap.basedn.clone(),
)
}
pub async fn open_admin_connection(&self) -> Result<(), ()> {
self.ldap.open_dm_connection(&self.dm_pw).await
}
pub async fn setup_admin_delete_uuids(&self, targets: &[Uuid]) -> Result<(), ()> {
// We might hit admin limits depending on the dataset size, so we probably
// need to do this iteratively eventually. Or just change the limits ...
let filter = LdapFilter::Or(
targets
.iter()
.map(|u| LdapFilter::Equality(LDAP_ATTR_CN.to_string(), u.to_string()))
.collect(),
);
print!("(|");
for u in targets.iter() {
print!("(cn={})", u);
}
println!(")");
let res = self.ldap.search(filter).await?;
for ent in res.iter() {
debug!("Deleting ... {}", ent.dn);
self.ldap.delete(ent.dn.clone()).await?;
}
Ok(())
}
pub async fn setup_admin_precreate_entities(
&self,
targets: &HashSet<Uuid>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
// Check if ou=people and ou=group exist
let res = self
.ldap
.search(LdapFilter::Equality(
LDAP_ATTR_OU.to_string(),
"people".to_string(),
))
.await?;
if res.is_empty() {
// Doesn't exist
info!("Creating ou=people");
let ou_people = LdapAddRequest {
dn: format!("ou=people,{}", self.ldap.basedn),
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec![
"top".as_bytes().into(),
"organizationalUnit".as_bytes().into(),
],
},
LdapAttribute {
atype: LDAP_ATTR_OU.to_string(),
vals: vec!["people".as_bytes().into()],
},
],
};
self.ldap.add(ou_people).await?;
}
let res = self
.ldap
.search(LdapFilter::Equality(
LDAP_ATTR_OU.to_string(),
LDAP_ATTR_GROUPS.to_string(),
))
.await?;
if res.is_empty() {
// Doesn't exist
info!("Creating ou={}", LDAP_ATTR_GROUPS);
let ou_groups: LdapAddRequest = LdapAddRequest {
dn: format!("ou={},{}", LDAP_ATTR_GROUPS, self.ldap.basedn),
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec![
"top".as_bytes().into(),
"organizationalUnit".as_bytes().into(),
],
},
LdapAttribute {
atype: LDAP_ATTR_OU.to_string(),
vals: vec![LDAP_ATTR_GROUPS.as_bytes().into()],
},
],
};
self.ldap.add(ou_groups).await?;
}
// Now go and create the rest.
// We stick ACI's on the rootdse, so we can clear them and reset them easier.
for u in targets {
// does it already exist?
let res = self
.ldap
.search(LdapFilter::Equality(
LDAP_ATTR_CN.to_string(),
u.to_string(),
))
.await?;
if !res.is_empty() {
continue;
}
let e = all_entities.get(u).unwrap();
let dn = e.get_ds_ldap_dn(&self.ldap.basedn);
match e {
Entity::Account(a) => {
let account = LdapAddRequest {
dn,
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec![
"top".as_bytes().into(),
"nsPerson".as_bytes().into(),
"nsAccount".as_bytes().into(),
"nsOrgPerson".as_bytes().into(),
"posixAccount".as_bytes().into(),
],
},
LdapAttribute {
atype: LDAP_ATTR_CN.to_string(),
vals: vec![a.uuid.as_bytes().to_vec()],
},
LdapAttribute {
atype: ATTR_UID.to_string(),
vals: vec![a.name.as_bytes().into()],
},
LdapAttribute {
atype: LDAP_ATTR_DISPLAY_NAME.to_string(),
vals: vec![a.display_name.as_bytes().into()],
},
LdapAttribute {
atype: "userPassword".to_string(),
vals: vec![a.password.as_bytes().into()],
},
LdapAttribute {
atype: "homeDirectory".to_string(),
vals: vec![format!("/home/{}", a.uuid).as_bytes().into()],
},
LdapAttribute {
atype: "uidNumber".to_string(),
vals: vec!["1000".as_bytes().into()],
},
LdapAttribute {
atype: "gidNumber".to_string(),
vals: vec!["1000".as_bytes().into()],
},
],
};
self.ldap.add(account).await?;
}
Entity::Group(g) => {
let group = LdapAddRequest {
dn,
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec![
"top".as_bytes().into(),
"groupOfNames".as_bytes().into(),
],
},
LdapAttribute {
atype: LDAP_ATTR_CN.to_string(),
vals: vec![g.uuid.as_bytes().to_vec(), g.name.as_bytes().into()],
},
],
};
self.ldap.add(group).await?;
}
}
}
// Add all the members.
for g in targets.iter().filter_map(|u| {
let e = all_entities.get(u).unwrap();
match e {
Entity::Group(g) => Some(g),
_ => None,
}
}) {
// List of dns
let vals: Vec<Vec<u8>> = g
.members
.iter()
.map(|id| {
all_entities
.get(id)
.unwrap()
.get_ds_ldap_dn(&self.ldap.basedn)
.as_bytes()
.into()
})
.collect();
let req = LdapModifyRequest {
dn: g.get_ds_ldap_dn(&self.ldap.basedn),
changes: vec![LdapModify {
operation: LdapModifyType::Replace,
modification: LdapPartialAttribute {
atype: "member".to_string(),
vals,
},
}],
};
self.ldap.modify(req).await?;
}
Ok(())
}
pub async fn setup_access_controls(
&self,
access: &HashMap<Uuid, Vec<EntityType>>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
// Create top level priv groups
let res = self
.ldap
.search(LdapFilter::Equality(
LDAP_ATTR_CN.to_string(),
"priv_account_manage".to_string(),
))
.await?;
if res.is_empty() {
// Doesn't exist
info!("Creating cn=priv_account_manage");
let group = LdapAddRequest {
dn: format!("cn=priv_account_manage,{}", self.ldap.basedn),
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec!["top".as_bytes().into(), "groupOfNames".as_bytes().into()],
},
LdapAttribute {
atype: LDAP_ATTR_CN.to_string(),
vals: vec!["priv_account_manage".as_bytes().into()],
},
],
};
self.ldap.add(group).await?;
}
let res = self
.ldap
.search(LdapFilter::Equality(
LDAP_ATTR_CN.to_string(),
"priv_group_manage".to_string(),
))
.await?;
if res.is_empty() {
// Doesn't exist
info!("Creating cn=priv_group_manage");
let group = LdapAddRequest {
dn: format!("cn=priv_group_manage,{}", self.ldap.basedn),
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec!["top".as_bytes().into(), "groupOfNames".as_bytes().into()],
},
LdapAttribute {
atype: LDAP_ATTR_CN.to_string(),
vals: vec!["priv_group_manage".as_bytes().into()],
},
],
};
self.ldap.add(group).await?;
}
// Add the acis with mod replace.
let acimod = LdapModifyRequest {
dn: self.ldap.basedn.clone(),
changes: vec![
LdapModify {
operation: LdapModifyType::Replace,
modification: LdapPartialAttribute {
atype: "aci".to_string(),
vals: vec![
r#"(targetattr="dc || description || objectClass")(targetfilter="(objectClass=domain)")(version 3.0; acl "Enable anyone domain read"; allow (read, search, compare)(userdn="ldap:///anyone");)"#.as_bytes().into(),
r#"(targetattr="ou || objectClass")(targetfilter="(objectClass=organizationalUnit)")(version 3.0; acl "Enable anyone ou read"; allow (read, search, compare)(userdn="ldap:///anyone");)"#.as_bytes().into(),
r#"(targetattr="cn || member || gidNumber || nsUniqueId || description || objectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable anyone group read"; allow (read, search, compare)(userdn="ldap:///anyone");)"#.as_bytes().into(),
format!(r#"(targetattr="cn || member || gidNumber || description || objectClass")(targetfilter="(objectClass=groupOfNames)")(version 3.0; acl "Enable group_admin to manage groups"; allow (write,add, delete)(groupdn="ldap:///cn=priv_group_manage,{}");)"#, self.ldap.basedn).as_bytes().into(),
r#"(targetattr="objectClass || description || nsUniqueId || uid || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || nsSshPublicKey || nsAccountLock || userCertificate")(targetfilter="(objectClass=posixaccount)")(version 3.0; acl "Enable anyone user read"; allow (read, search, compare)(userdn="ldap:///anyone");)"#.as_bytes().into(),
r#"(targetattr="displayName || legalName || userPassword || nsSshPublicKey")(version 3.0; acl "Enable self partial modify"; allow (write)(userdn="ldap:///self");)"#.as_bytes().into(),
format!(r#"(targetattr="uid || description || displayName || loginShell || uidNumber || gidNumber || gecos || homeDirectory || cn || memberOf || mail || legalName || telephoneNumber || mobile")(targetfilter="(&(objectClass=nsPerson)(objectClass=nsAccount))")(version 3.0; acl "Enable user admin create"; allow (write, add, delete, read)(groupdn="ldap:///cn=priv_account_manage,{}");)"#, self.ldap.basedn).as_bytes().into(),
]
}
}
]
};
self.ldap.modify(acimod).await?;
// Add members as needed.
let mut priv_account = Vec::new();
let mut priv_group = Vec::new();
for (id, list) in access.iter() {
// get the users name.
let account = all_entities.get(id).unwrap();
let need_account = list
.iter()
.filter(|v| matches!(v, EntityType::Account(_)))
.count()
== 0;
let need_group = list
.iter()
.filter(|v| matches!(v, EntityType::Group(_)))
.count()
== 0;
if need_account {
priv_account.push(
account
.get_ds_ldap_dn(&self.ldap.basedn)
.as_bytes()
.to_vec(),
)
}
if need_group {
priv_group.push(
account
.get_ds_ldap_dn(&self.ldap.basedn)
.as_bytes()
.to_vec(),
)
}
}
// Sort and dedup
priv_account.sort_unstable();
priv_group.sort_unstable();
priv_account.dedup();
priv_group.dedup();
// Do the mod in one pass.
info!("Setting up cn=priv_group_manage");
let req = LdapModifyRequest {
dn: format!("cn=priv_group_manage,{}", self.ldap.basedn),
changes: vec![LdapModify {
operation: LdapModifyType::Delete,
modification: LdapPartialAttribute {
atype: "member".to_string(),
vals: priv_group,
},
}],
};
let _ = self.ldap.modify(req).await;
info!("Setting up cn=priv_account_manage");
let req = LdapModifyRequest {
dn: format!("cn=priv_account_manage,{}", self.ldap.basedn),
changes: vec![LdapModify {
operation: LdapModifyType::Delete,
modification: LdapPartialAttribute {
atype: "member".to_string(),
vals: priv_account,
},
}],
};
let _ = self.ldap.modify(req).await;
Ok(())
}
pub async fn open_user_connection(
&self,
test_start: Instant,
name: &str,
pw: &str,
) -> Result<(Duration, Duration), ()> {
self.ldap.open_user_connection(test_start, name, pw).await
}
pub async fn close_connection(&self) {
self.ldap.close_connection().await;
}
pub async fn search(
&self,
test_start: Instant,
ids: &[String],
) -> Result<(Duration, Duration, usize), ()> {
self.ldap.search_name(test_start, ids).await
}
}

11
tools/orca/src/error.rs Normal file
View file

@ -0,0 +1,11 @@
pub enum Error {
Io,
SerdeToml,
SerdeJson,
KanidmClient,
ProfileBuilder,
Tokio,
Interupt,
Crossbeam,
InvalidState,
}

View file

@ -1,164 +1,132 @@
use std::fs::File;
use std::path::Path;
use std::time::Duration;
use uuid::Uuid;
use crate::error::Error;
use crate::kani::KanidmOrcaClient;
use crate::profile::Profile;
use crate::state::{Credential, Flag, Model, Person, PreflightState, State};
use rand::distributions::{Alphanumeric, DistString};
use rand::seq::SliceRandom;
use rand::SeedableRng;
use rand_chacha::ChaCha8Rng;
use hashbrown::{HashMap, HashSet};
use std::collections::BTreeSet;
use crate::data::*;
const PEOPLE_PREFIX: &str = "person";
const N_USERS: usize = 3000;
const N_GROUPS: usize = 1500;
const N_MEMBERSHIPS: usize = 10;
const N_NEST: usize = 4;
#[derive(Debug)]
pub struct PartialGroup {
pub name: String,
pub members: BTreeSet<String>,
}
pub(crate) fn doit(output: &Path) {
info!(
"Performing data generation into {}",
output.to_str().unwrap(),
fn random_name(prefix: &str, rng: &mut ChaCha8Rng) -> String {
let suffix = Alphanumeric.sample_string(rng, 8).to_lowercase();
format!("{}_{}", prefix, suffix)
}
fn random_password(rng: &mut ChaCha8Rng) -> String {
Alphanumeric.sample_string(rng, 24)
}
pub async fn populate(_client: &KanidmOrcaClient, profile: Profile) -> Result<State, Error> {
// IMPORTANT: We have to perform these steps in order so that the RNG is deterministic between
// multiple invocations.
let mut seeded_rng = ChaCha8Rng::seed_from_u64(profile.seed());
let female_given_names = std::include_str!("../names-dataset/dataset/Female_given_names.txt");
let male_given_names = std::include_str!("../names-dataset/dataset/Male_given_names.txt");
let given_names = female_given_names
.split('\n')
.chain(male_given_names.split('\n'))
.collect::<Vec<_>>();
let surnames = std::include_str!("../names-dataset/dataset/Surnames.txt");
let surnames = surnames.split('\n').collect::<Vec<_>>();
debug!(
"name pool: given: {} - family: {}",
given_names.len(),
surnames.len()
);
let mut rng = rand::thread_rng();
// PHASE 0 - For now, set require MFA off.
let preflight_flags = vec![Flag::DisableAllPersonsMFAPolicy];
if N_MEMBERSHIPS >= N_GROUPS {
error!("Too many memberships per group. Memberships must be less that n-groups");
return;
// PHASE 1 - generate a pool of persons that are not-yet created for future import.
// todo! may need a random username vec for later stuff
// PHASE 2 - generate persons
// - assign them credentials of various types.
let mut persons = Vec::with_capacity(profile.person_count() as usize);
let mut person_usernames = BTreeSet::new();
for _ in 0..profile.person_count() {
let given_name = given_names
.choose(&mut seeded_rng)
.expect("name set corrupted");
let surname = surnames
.choose(&mut seeded_rng)
.expect("name set corrupted");
let display_name = format!("{} {}", given_name, surname);
let username = display_name
.chars()
.filter(|c| c.is_ascii_alphanumeric())
.collect::<String>()
.to_lowercase();
let mut username = if username.is_empty() {
random_name(PEOPLE_PREFIX, &mut seeded_rng)
} else {
username
};
while person_usernames.contains(&username) {
username = random_name(PEOPLE_PREFIX, &mut seeded_rng);
}
let password = random_password(&mut seeded_rng);
// TODO: Add more and different "models" to each person for their actions.
let model = Model::Basic;
// =======
// Data is ready, make changes to the server. These should be idempotent if possible.
let p = Person {
preflight_state: PreflightState::Present,
username: username.clone(),
display_name,
member_of: BTreeSet::default(),
credential: Credential::Password { plain: password },
model,
};
debug!(?p);
person_usernames.insert(username.clone());
persons.push(p);
}
// Open before we start so we have it ready to go.
let out_file = match File::create(output) {
Ok(f) => f,
Err(e) => {
error!("Failed to open {} - {:?}", output.to_str().unwrap(), e);
return;
}
// PHASE 3 - generate groups for integration access, assign persons.
// PHASE 4 - generate groups for user modification rights
// PHASE 5 - generate excess groups with nesting. Randomly assign persons.
// PHASE 6 - generate integrations -
// PHASE 7 - given the intergariotns and groupings,
// Return the state.
let state = State {
profile,
// ---------------
preflight_flags,
persons,
};
// Number of users
let accounts: Vec<_> = (0..N_USERS)
.map(|i| Account {
name: format!("testuser{}", i),
display_name: format!("Test User {}", i),
password: readable_password_from_random(),
uuid: Uuid::new_v4(),
})
.collect();
// Number of groups.
let mut groups: Vec<_> = (0..N_GROUPS)
.map(|i| Group {
name: format!("testgroup{}", i),
uuid: Uuid::new_v4(),
members: Vec::new(),
})
.collect();
// Should groups be randomly nested?
// The way this is done is we split the array based on nest level. If it's 1, we split
// in 2, 2 we split in 3 and so on.
if N_NEST > 0 {
debug!("Nesting Groups");
let chunk_size = N_GROUPS / (N_NEST + 1);
if chunk_size == 0 {
error!("Unable to chunk groups, need (N_GROUPS / (N_NEST + 1)) > 0");
return;
}
let mut chunk_iter = groups.chunks_mut(chunk_size);
// Can't fail due to above checks.
let mut p_chunk = chunk_iter.next().unwrap();
// while let Some(w_chunk) = chunk_iter.next() {
for w_chunk in chunk_iter {
// add items from work chunk to parent chunk
p_chunk
.iter_mut()
.zip(w_chunk.iter())
.for_each(|(p, w): (&mut _, &_)| p.members.push(w.uuid));
// swap w_chunk to p_chunk
p_chunk = w_chunk;
}
}
// Number of memberships per user.
// We use rand for this to sample random numbers of
for acc in accounts.iter() {
// Sample randomly.
for idx in rand::seq::index::sample(&mut rng, N_GROUPS, N_MEMBERSHIPS).iter() {
groups[idx].members.push(acc.uuid);
}
}
// Build from the generated data above.
let all_entities: HashMap<Uuid, Entity> = accounts
.into_iter()
.map(|acc| (acc.uuid, Entity::Account(acc)))
.chain(groups.into_iter().map(|grp| (grp.uuid, Entity::Group(grp))))
.collect();
// Define the entries that should exist "at the start of the test". For now, we just
// create everything. Maybe when we start to add mod tests we need to retain a pool
// of things to retain here for those ops.
let precreate: HashSet<_> = all_entities.keys().copied().collect();
// The set of accounts in all_entities.
let accounts: HashSet<Uuid> = all_entities
.iter()
.filter_map(|(uuid, ent)| match ent {
Entity::Account(_) => Some(*uuid),
_ => None,
})
.collect();
// This defines a map of "entity" to "what can it manipulate". This
// is used to create access controls in some cases for mod tests.
//
// For example, if we have user with uuid X and it changes Group with
// uuid Y, then we need to ensure that X has group-mod permissions over
// Y in some capacity.
let access: HashMap<Uuid, Vec<EntityType>> = HashMap::new();
// The set of operations to simulate. We pre-calc these so tests can randomly
// sample and perform the searches as needed.
// We don't have original times, so we can fudge these.
let orig_etime = Duration::from_secs(1);
let rtime = Duration::from_secs(1);
// Needed for random sampling.
let all_ids: Vec<_> = all_entities.keys().copied().collect();
let all_ids_len = all_ids.len();
let connections: Vec<_> = (0..all_ids_len)
.map(|id| {
// Could be rand?
let n_search = 1;
let mut search_ids = Vec::new();
for idx in rand::seq::index::sample(&mut rng, all_ids_len, n_search).iter() {
search_ids.push(all_ids[idx]);
}
//
Conn {
id: id as i32,
ops: vec![Op {
orig_etime,
rtime,
op_type: OpType::Search(search_ids),
}],
}
})
.collect();
let td = TestData {
all_entities,
access,
accounts,
precreate,
connections,
};
if let Err(e) = serde_json::to_writer_pretty(out_file, &td) {
error!("Writing to file -> {:?}", e);
};
Ok(state)
}

View file

@ -1,298 +0,0 @@
use hashbrown::{HashMap, HashSet};
use kanidm_proto::constants::{
ATTR_UID, LDAP_ATTR_CN, LDAP_ATTR_DISPLAY_NAME, LDAP_ATTR_OBJECTCLASS, LDAP_CLASS_GROUPOFNAMES,
};
use ldap3_proto::proto::*;
use std::time::{Duration, Instant};
use uuid::Uuid;
use crate::data::*;
use crate::ldap::{LdapClient, LdapSchema};
use crate::profile::IpaConfig;
use crate::{TargetServer, TargetServerBuilder};
#[derive(Debug)]
pub struct IpaServer {
ldap: LdapClient,
realm: String,
admin_pw: String,
}
impl IpaServer {
fn construct(uri: String, realm: String, admin_pw: String) -> Result<Self, ()> {
// explode the realm to basedn.
// dev.kanidm.com
// dc=dev,dc=kanidm,dc=com
let basedn = format!("dc={}", realm.replace('.', ",dc="));
let ldap = LdapClient::new(uri, basedn, LdapSchema::Rfc2307bis)?;
Ok(IpaServer {
ldap,
realm,
admin_pw,
})
}
pub fn build(uri: String, realm: String, admin_pw: String) -> Result<TargetServer, ()> {
Self::construct(uri, realm, admin_pw).map(TargetServer::Ipa)
}
#[allow(clippy::new_ret_no_self)]
pub fn new(lconfig: &IpaConfig) -> Result<TargetServer, ()> {
Self::construct(
lconfig.uri.clone(),
lconfig.realm.clone(),
lconfig.admin_pw.clone(),
)
.map(TargetServer::Ipa)
}
pub fn info(&self) -> String {
format!("Ipa Server Connection: {} @ {}", self.realm, self.ldap.uri)
}
pub fn builder(&self) -> TargetServerBuilder {
TargetServerBuilder::Ipa(
self.ldap.uri.clone(),
self.realm.clone(),
self.admin_pw.clone(),
)
}
pub async fn open_admin_connection(&self) -> Result<(), ()> {
self.ldap.open_ipa_admin_connection(&self.admin_pw).await
}
pub async fn setup_admin_delete_uuids(&self, _targets: &[Uuid]) -> Result<(), ()> {
// todo!();
Ok(())
}
pub async fn setup_admin_precreate_entities(
&self,
targets: &HashSet<Uuid>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
for u in targets {
let e = all_entities.get(u).unwrap();
// does it already exist?
let res = self
.ldap
.search(LdapFilter::Equality(
"cn".to_string(),
e.get_name().to_string(),
))
.await?;
if !res.is_empty() {
continue;
}
let dn = e.get_ipa_ldap_dn(&self.ldap.basedn);
match e {
Entity::Account(a) => {
let account = LdapAddRequest {
dn,
attributes: vec![
LdapAttribute {
atype: LDAP_ATTR_OBJECTCLASS.to_string(),
vals: vec![
"ipaobject".as_bytes().into(),
"person".as_bytes().into(),
"top".as_bytes().into(),
"ipasshuser".as_bytes().into(),
"inetorgperson".as_bytes().into(),
"organizationalperson".as_bytes().into(),
"krbticketpolicyaux".as_bytes().into(),
"krbprincipalaux".as_bytes().into(),
"inetuser".as_bytes().into(),
"posixaccount".as_bytes().into(),
"meporiginentry".as_bytes().into(),
],
},
LdapAttribute {
atype: "ipauniqueid".to_string(),
vals: vec!["autogenerate".as_bytes().into()],
},
LdapAttribute {
atype: ATTR_UID.to_string(),
vals: vec![a.name.as_bytes().into()],
},
LdapAttribute {
atype: LDAP_ATTR_CN.to_string(),
vals: vec![a.name.as_bytes().into()],
},
LdapAttribute {
atype: "givenName".to_string(),
vals: vec![a.name.as_bytes().into()],
},
LdapAttribute {
atype: "sn".to_string(),
vals: vec![a.name.as_bytes().into()],
},
LdapAttribute {
atype: LDAP_ATTR_DISPLAY_NAME.to_string(),
vals: vec![a.display_name.as_bytes().into()],
},
LdapAttribute {
atype: "gecos".to_string(),
vals: vec![a.display_name.as_bytes().into()],
},
LdapAttribute {
atype: "userPassword".to_string(),
vals: vec![a.password.as_bytes().into()],
},
LdapAttribute {
atype: "initials".to_string(),
vals: vec!["tu".as_bytes().into()],
},
LdapAttribute {
atype: "homeDirectory".to_string(),
vals: vec![format!("/home/{}", a.name).as_bytes().into()],
},
LdapAttribute {
atype: "mail".to_string(),
vals: vec![format!("{}@{}", a.name, self.realm).as_bytes().into()],
},
LdapAttribute {
atype: "loginshell".to_string(),
vals: vec!["/bin/zsh".as_bytes().into()],
},
LdapAttribute {
atype: "uidNumber".to_string(),
vals: vec!["-1".as_bytes().into()],
},
LdapAttribute {
atype: "gidNumber".to_string(),
vals: vec!["-1".as_bytes().into()],
},
LdapAttribute {
atype: "krbextradata".to_string(),
vals: vec!["placeholder".as_bytes().into()],
},
LdapAttribute {
atype: "krblastpwdchange".to_string(),
vals: vec!["20230119053224Z".as_bytes().into()],
},
LdapAttribute {
atype: "krbPasswordExpiration".to_string(),
vals: vec!["20380119053224Z".as_bytes().into()],
},
LdapAttribute {
atype: "krbPrincipalName".to_string(),
vals: vec![format!("{}@{}", a.name, self.realm.to_uppercase())
.as_bytes()
.into()],
},
LdapAttribute {
atype: "krbCanonicalName".to_string(),
vals: vec![format!("{}@{}", a.name, self.realm.to_uppercase())
.as_bytes()
.into()],
},
],
};
self.ldap.add(account).await?;
}
Entity::Group(g) => {
let group = LdapAddRequest {
dn,
attributes: vec![
LdapAttribute {
atype: "objectClass".to_string(),
vals: vec![
"top".as_bytes().into(),
LDAP_CLASS_GROUPOFNAMES.as_bytes().into(),
"nestedgroup".as_bytes().into(),
"ipausergroup".as_bytes().into(),
"ipaobject".as_bytes().into(),
"posixgroup".as_bytes().into(),
],
},
LdapAttribute {
atype: "cn".to_string(),
vals: vec![g.name.as_bytes().into()],
},
LdapAttribute {
atype: "ipauniqueid".to_string(),
vals: vec!["autogenerate".as_bytes().into()],
},
LdapAttribute {
atype: "gidNumber".to_string(),
vals: vec!["-1".as_bytes().into()],
},
],
};
self.ldap.add(group).await?;
}
}
}
// Add all the members.
for g in targets.iter().filter_map(|u| {
let e = all_entities.get(u).unwrap();
match e {
Entity::Group(g) => Some(g),
_ => None,
}
}) {
// List of dns
let vals: Vec<Vec<u8>> = g
.members
.iter()
.map(|id| {
all_entities
.get(id)
.unwrap()
.get_ipa_ldap_dn(&self.ldap.basedn)
.as_bytes()
.into()
})
.collect();
let req = LdapModifyRequest {
dn: g.get_ipa_ldap_dn(&self.ldap.basedn),
changes: vec![LdapModify {
operation: LdapModifyType::Replace,
modification: LdapPartialAttribute {
atype: "member".to_string(),
vals,
},
}],
};
self.ldap.modify(req).await?;
}
Ok(())
}
pub async fn setup_access_controls(
&self,
_access: &HashMap<Uuid, Vec<EntityType>>,
_all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
// todo!();
Ok(())
}
pub async fn open_user_connection(
&self,
test_start: Instant,
name: &str,
pw: &str,
) -> Result<(Duration, Duration), ()> {
self.ldap.open_user_connection(test_start, name, pw).await
}
pub async fn close_connection(&self) {
self.ldap.close_connection().await;
}
pub async fn search(
&self,
test_start: Instant,
ids: &[String],
) -> Result<(Duration, Duration, usize), ()> {
self.ldap.search_name(test_start, ids).await
}
}

View file

@ -1,434 +1,100 @@
use hashbrown::{HashMap, HashSet};
use std::time::{Duration, Instant};
use kanidm_client::{KanidmClient, KanidmClientBuilder};
use kanidm_client::{ClientError, KanidmClient, KanidmClientBuilder, StatusCode};
use kanidm_proto::internal::*;
use uuid::Uuid;
use crate::error::Error;
use crate::profile::Profile;
use crate::data::*;
use crate::ldap::{LdapClient, LdapSchema};
use crate::profile::{KaniHttpConfig, KaniLdapConfig};
use crate::{TargetServer, TargetServerBuilder};
#[derive(Debug)]
pub struct KaniHttpServer {
uri: String,
admin_pw: String,
client: KanidmClient,
// This client contains our admin and idm_admin connections that are
// pre-authenticated for use against the kanidm server. In addition,
// new clients can be requested for our test actors.
pub struct KanidmOrcaClient {
#[allow(dead_code)]
admin_client: KanidmClient,
idm_admin_client: KanidmClient,
// In future we probably need a way to connect to all the nodes?
// Or we just need all their uris.
}
#[derive(Debug)]
pub struct KaniLdapServer {
http: KaniHttpServer,
ldap: LdapClient,
}
impl KaniHttpServer {
fn construct(uri: String, admin_pw: String) -> Result<Self, ()> {
let client = KanidmClientBuilder::new()
.address(uri.clone())
impl KanidmOrcaClient {
pub async fn new(profile: &Profile) -> Result<Self, Error> {
let admin_client = KanidmClientBuilder::new()
.address(profile.control_uri().to_string())
.danger_accept_invalid_hostnames(true)
.danger_accept_invalid_certs(true)
.build()
.map_err(|e| {
error!("Unable to create kanidm client {:?}", e);
.map_err(|err| {
error!(?err, "Unable to create kanidm client");
Error::KanidmClient
})?;
Ok(KaniHttpServer {
uri,
admin_pw,
client,
admin_client
.auth_simple_password("admin", profile.admin_password())
.await
.map_err(|err| {
error!(?err, "Unable to authenticate as admin");
Error::KanidmClient
})?;
let idm_admin_client = admin_client.new_session().map_err(|err| {
error!(?err, "Unable to create new session");
Error::KanidmClient
})?;
idm_admin_client
.auth_simple_password("idm_admin", profile.idm_admin_password())
.await
.map_err(|err| {
error!(?err, "Unable to authenticate as idm_admin");
Error::KanidmClient
})?;
Ok(KanidmOrcaClient {
admin_client,
idm_admin_client,
})
}
pub fn build(uri: String, admin_pw: String) -> Result<TargetServer, ()> {
Self::construct(uri, admin_pw).map(TargetServer::Kanidm)
}
#[allow(clippy::new_ret_no_self)]
pub fn new(khconfig: &KaniHttpConfig) -> Result<TargetServer, ()> {
Self::construct(khconfig.uri.clone(), khconfig.admin_pw.clone()).map(TargetServer::Kanidm)
}
pub fn info(&self) -> String {
format!("Kanidm HTTP Connection: {}", self.uri)
}
pub fn builder(&self) -> TargetServerBuilder {
TargetServerBuilder::Kanidm(self.uri.clone(), self.admin_pw.clone())
}
// open the admin internal connection
pub async fn open_admin_connection(&self) -> Result<(), ()> {
self.client
.auth_simple_password("admin", &self.admin_pw)
pub async fn disable_mfa_requirement(&self) -> Result<(), Error> {
self.idm_admin_client
.group_account_policy_credential_type_minimum_set("idm_all_persons", "any")
.await
.map_err(|e| {
error!("Unable to authenticate -> {:?}", e);
})?;
// For admin to work, we need idm permissions.
// NOT RECOMMENDED IN PRODUCTION.
self.client
.idm_group_add_members("idm_admins", &["admin"])
.await
.map(|_| ())
.map_err(|e| {
error!("Unable to extend admin permissions (idm) -> {:?}", e);
.map_err(|err| {
error!(?err, "Unable to modify idm_all_persons policy");
Error::KanidmClient
})
}
pub async fn setup_admin_delete_uuids(&self, targets: &[Uuid]) -> Result<(), ()> {
// Build the filter.
let inner: Vec<Filter> = targets
.iter()
.map(|u| Filter::Eq("name".to_string(), format!("{}", u)))
.collect();
let filter = Filter::Or(inner);
// Submit it.
self.client.delete(filter).await.map(|_| ()).or_else(|e| {
error!("Error during delete -> {:?}", e);
Ok(())
})
}
pub async fn setup_admin_precreate_entities(
&self,
targets: &HashSet<Uuid>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
// Create all the accounts and groups
let num_uuids = targets.len();
let mut current_slice = 1;
info!("Have to do {} uuids", num_uuids);
for (index, uuid) in targets.iter().enumerate() {
if num_uuids / 10 * current_slice > index {
info!("{}% complete", current_slice * 10);
current_slice += 1;
}
let entity = all_entities.get(uuid).unwrap();
match entity {
Entity::Account(a) => {
self.client
.idm_person_account_create(&a.name, &a.display_name)
.await
.map(|_| ())
.or_else(|e| {
match e {
ClientError::Http(
StatusCode::INTERNAL_SERVER_ERROR,
Some(OperationError::Plugin(PluginError::AttrUnique(_))),
_,
) => {
// Ignore.
debug!("Account already exists ...");
Ok(())
}
_ => {
error!("Error creating account -> {:?}", e);
Err(())
}
}
})?;
// Now set the account password
self.client
.idm_person_account_primary_credential_set_password(&a.name, &a.password)
.await
.map(|_| ())
.map_err(|e| {
error!("Unable to set password for {}: {:?}", a.name, e);
})?;
// For ldap tests, we need to make these posix accounts.
self.client
.idm_person_account_unix_extend(&a.name, None, None)
.await
.map(|_| ())
.map_err(|e| {
error!("Unable to set unix attributes for {}: {:?}", a.name, e);
})?;
self.client
.idm_person_account_unix_cred_put(&a.name, &a.password)
.await
.map(|_| ())
.map_err(|e| {
error!("Unable to set unix password for {}: {:?}", a.name, e);
})?;
}
Entity::Group(g) => {
self.client
.idm_group_create(&g.name, None)
.await
.map(|_| ())
.or_else(|e| {
match e {
ClientError::Http(
StatusCode::INTERNAL_SERVER_ERROR,
Some(OperationError::Plugin(PluginError::AttrUnique(_))),
_,
) => {
// Ignore.
debug!("Group already exists ...");
Ok(())
}
_ => {
error!("Error creating group -> {:?}", e);
Err(())
}
}
})?;
}
}
}
// Then add the members to the groups.
for g in targets.iter().filter_map(|u| {
let e = all_entities.get(u).unwrap();
match e {
Entity::Group(g) => Some(g),
_ => None,
}
}) {
let m: Vec<_> = g
.members
.iter()
.map(|id| all_entities.get(id).unwrap().get_name())
.collect();
self.client
.idm_group_set_members(&g.name, m.as_slice())
.await
.map(|_| ())
.or_else(|e| {
error!("Error setting group members -> {:?}", e);
Ok(())
})?;
}
Ok(())
}
pub async fn setup_access_controls(
&self,
access: &HashMap<Uuid, Vec<EntityType>>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
// To make this somewhat efficient, we fold each access req to "need group" or "need user"
// access.
debug!("setup_access_controls");
for (id, list) in access.iter() {
// get the users name.
let account = all_entities.get(id).unwrap();
let need_account = list
.iter()
.filter(|v| matches!(v, EntityType::Account(_)))
.count()
== 0;
let need_group = list
.iter()
.filter(|v| matches!(v, EntityType::Group(_)))
.count()
== 0;
if need_account {
self.client
.idm_group_add_members("idm_account_manage_priv", &[account.get_name()])
.await
.map(|_| ())
.or_else(|e| {
error!("Error setting group members -> {:?}", e);
Ok(())
})?;
self.client
.idm_group_add_members("idm_hp_account_manage_priv", &[account.get_name()])
.await
.map(|_| ())
.or_else(|e| {
error!("Error setting group members -> {:?}", e);
Ok(())
})?;
}
if need_group {
self.client
.idm_group_add_members("idm_group_manage_priv", &[account.get_name()])
.await
.map(|_| ())
.or_else(|e| {
error!("Error setting group members -> {:?}", e);
Ok(())
})?;
self.client
.idm_group_add_members("idm_hp_group_manage_priv", &[account.get_name()])
.await
.map(|_| ())
.or_else(|e| {
error!("Error setting group members -> {:?}", e);
Ok(())
})?;
}
}
Ok(())
}
pub async fn open_user_connection(
&self,
test_start: Instant,
name: &str,
pw: &str,
) -> Result<(Duration, Duration), ()> {
let start = Instant::now();
self.client
.auth_simple_password(name, pw)
pub async fn person_exists(&self, username: &str) -> Result<bool, Error> {
self.idm_admin_client
.idm_person_account_get(username)
.await
.map_err(|e| {
error!("Unable to authenticate -> {:?}", e);
})
.map(|_| {
let end = Instant::now();
let diff = end.duration_since(start);
let rel_diff = start.duration_since(test_start);
(rel_diff, diff)
.map(|e| e.is_some())
.map_err(|err| {
error!(?err, ?username, "Unable to check person");
Error::KanidmClient
})
}
pub async fn close_connection(&self) {
assert!(self
.client
.logout()
pub async fn person_create(&self, username: &str, display_name: &str) -> Result<(), Error> {
self.idm_admin_client
.idm_person_account_create(username, display_name)
.await
.map_err(|e| error!("close_connection {:?}", e))
.is_ok());
.map_err(|err| {
error!(?err, ?username, "Unable to create person");
Error::KanidmClient
})
}
pub async fn search(
pub async fn person_set_pirmary_password_only(
&self,
test_start: Instant,
ids: &[String],
) -> Result<(Duration, Duration, usize), ()> {
// Create the filter
let inner: Vec<_> = ids
.iter()
.map(|n| Filter::Eq("name".to_string(), n.to_string()))
.collect();
let filter = Filter::Or(inner);
let start = Instant::now();
let l = self
.client
.search(filter)
username: &str,
password: &str,
) -> Result<(), Error> {
self.idm_admin_client
.idm_person_account_primary_credential_set_password(username, password)
.await
.map(|r| r.len())
.map_err(|e| {
error!("{:?}", e);
})?;
let end = Instant::now();
let diff = end.duration_since(start);
let rel_diff = start.duration_since(test_start);
Ok((rel_diff, diff, l))
}
}
impl KaniLdapServer {
fn construct(
uri: String,
admin_pw: String,
ldap_uri: String,
basedn: String,
) -> Result<Box<Self>, ()> {
let http = KaniHttpServer::construct(uri, admin_pw)?;
let ldap = LdapClient::new(ldap_uri, basedn, LdapSchema::Kanidm)?;
Ok(Box::new(KaniLdapServer { http, ldap }))
}
pub fn build(
uri: String,
admin_pw: String,
ldap_uri: String,
basedn: String,
) -> Result<TargetServer, ()> {
Self::construct(uri, admin_pw, ldap_uri, basedn).map(TargetServer::KanidmLdap)
}
#[allow(clippy::new_ret_no_self)]
pub fn new(klconfig: &KaniLdapConfig) -> Result<TargetServer, ()> {
Self::construct(
klconfig.uri.clone(),
klconfig.admin_pw.clone(),
klconfig.ldap_uri.clone(),
klconfig.base_dn.clone(),
)
.map(TargetServer::KanidmLdap)
}
pub fn info(&self) -> String {
format!(
"Kanidm LDAP Connection: {} {}",
self.ldap.uri, self.ldap.basedn
)
}
pub fn builder(&self) -> TargetServerBuilder {
TargetServerBuilder::KanidmLdap(
self.http.uri.clone(),
self.http.admin_pw.clone(),
self.ldap.uri.clone(),
self.ldap.basedn.clone(),
)
}
pub async fn open_admin_connection(&self) -> Result<(), ()> {
self.http.open_admin_connection().await
}
pub async fn setup_admin_delete_uuids(&self, targets: &[Uuid]) -> Result<(), ()> {
self.http.setup_admin_delete_uuids(targets).await
}
pub async fn setup_admin_precreate_entities(
&self,
targets: &HashSet<Uuid>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
self.http
.setup_admin_precreate_entities(targets, all_entities)
.await
}
pub async fn setup_access_controls(
&self,
access: &HashMap<Uuid, Vec<EntityType>>,
all_entities: &HashMap<Uuid, Entity>,
) -> Result<(), ()> {
self.http.setup_access_controls(access, all_entities).await
}
pub async fn open_user_connection(
&self,
test_start: Instant,
name: &str,
pw: &str,
) -> Result<(Duration, Duration), ()> {
self.ldap.open_user_connection(test_start, name, pw).await
}
pub async fn close_connection(&self) {
self.ldap.close_connection().await;
}
pub async fn search(
&self,
test_start: Instant,
ids: &[String],
) -> Result<(Duration, Duration, usize), ()> {
self.ldap.search_name(test_start, ids).await
.map_err(|err| {
error!(?err, ?username, "Unable to set person password");
Error::KanidmClient
})
}
}

View file

@ -1,366 +0,0 @@
use core::pin::Pin;
use std::net::{SocketAddr, ToSocketAddrs};
use std::time::{Duration, Instant};
use futures_util::sink::SinkExt;
use futures_util::stream::StreamExt;
use ldap3_proto::proto::*;
use ldap3_proto::LdapCodec;
use openssl::ssl::{Ssl, SslConnector, SslMethod, SslVerifyMode};
// use std::sync::atomic::{AtomicUsize, Ordering};
use tokio::net::TcpStream;
use tokio::sync::Mutex;
use tokio_openssl::SslStream;
use tokio_util::codec::Framed;
struct LdapInner {
pub framed: Framed<SslStream<TcpStream>, LdapCodec>,
pub msgid: i32,
}
pub enum LdapSchema {
Kanidm,
Rfc2307bis,
}
pub struct LdapClient {
pub uri: String,
pub addr: SocketAddr,
pub basedn: String,
pub schema: LdapSchema,
conn: Mutex<Option<LdapInner>>,
}
impl std::fmt::Debug for LdapClient {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LdapClient")
.field("uri", &self.uri)
.field("addr", &self.addr)
.finish()
}
}
impl LdapClient {
pub fn new(uri: String, basedn: String, schema: LdapSchema) -> Result<Self, ()> {
// Turn this into an address.
debug!("ldap_uri {}", uri);
// First remove the ldaps from the start.
let trimmed = uri.trim_start_matches("ldaps://");
// Then provide the rest to the to_socket_addrs.
let addr = match trimmed.to_socket_addrs() {
Ok(mut addrs_iter) => match addrs_iter.next() {
Some(addr) => addr,
None => {
error!("No ldap uri addresses found");
return Err(());
}
},
Err(e) => {
error!("Unable to parse LDAP uri address - {:?}", e);
return Err(());
}
};
debug!("addr -> {:?}", addr);
// now we store this for the tcp stream later.
// https://docs.rs/tokio/1.5.0/tokio/net/struct.TcpStream.html
Ok(LdapClient {
uri,
addr,
basedn,
schema,
conn: Mutex::new(None),
})
}
async fn bind(&self, dn: String, pw: String) -> Result<(), ()> {
let msg = LdapMsg {
msgid: 1,
op: LdapOp::BindRequest(LdapBindRequest {
dn,
cred: LdapBindCred::Simple(pw),
}),
ctrl: vec![],
};
let tcpstream = TcpStream::connect(self.addr)
.await
.map_err(|e| error!("Failed to connect to {} -> {:?}", self.uri, e))?;
// Now add TLS
let mut tls_parms = SslConnector::builder(SslMethod::tls_client()).map_err(|e| {
error!("openssl -> {:?}", e);
})?;
tls_parms.set_verify(SslVerifyMode::NONE);
let tls_parms = tls_parms.build();
let mut tlsstream = Ssl::new(tls_parms.context())
.and_then(|tls_obj| SslStream::new(tls_obj, tcpstream))
.map_err(|e| {
error!("Failed to initialise TLS -> {:?}", e);
})?;
SslStream::connect(Pin::new(&mut tlsstream))
.await
.map_err(|e| {
error!("Failed to initialise TLS -> {:?}", e);
})?;
let mut framed = Framed::new(tlsstream, LdapCodec::default());
framed.send(msg).await.map_err(|e| {
error!("Unable to bind -> {:?}", e);
})?;
if let Some(Ok(msg)) = framed.next().await {
if let LdapOp::BindResponse(res) = msg.op {
if res.res.code == LdapResultCode::Success {
let mut mguard = self.conn.lock().await;
*mguard = Some(LdapInner { framed, msgid: 1 });
return Ok(());
}
}
}
error!("Failed to bind");
Err(())
}
pub async fn open_dm_connection(&self, pw: &str) -> Result<(), ()> {
self.bind("cn=Directory Manager".to_string(), pw.to_string())
.await
}
pub async fn open_ipa_admin_connection(&self, pw: &str) -> Result<(), ()> {
let admin_dn = format!("uid=admin,cn=users,cn=accounts,{}", self.basedn);
self.bind(admin_dn, pw.to_string()).await
}
pub async fn open_user_connection(
&self,
test_start: Instant,
name: &str,
pw: &str,
) -> Result<(Duration, Duration), ()> {
let dn = match self.schema {
LdapSchema::Kanidm => name.to_string(),
LdapSchema::Rfc2307bis => format!("uid={},ou=people,{}", name, self.basedn),
};
let start = Instant::now();
self.bind(dn, pw.to_string()).await?;
let end = Instant::now();
let diff = end.duration_since(start);
let rel_diff = start.duration_since(test_start);
Ok((rel_diff, diff))
}
pub async fn close_connection(&self) {
let mut mguard = self.conn.lock().await;
*mguard = None;
}
pub async fn search_name(
&self,
test_start: Instant,
ids: &[String],
) -> Result<(Duration, Duration, usize), ()> {
let name_attr = match self.schema {
LdapSchema::Kanidm => "name",
LdapSchema::Rfc2307bis => "cn",
};
let filter = LdapFilter::Or(
ids.iter()
.map(|n| LdapFilter::Equality(name_attr.to_string(), n.to_string()))
.collect(),
);
let start = Instant::now();
let res = self.search(filter).await?;
let end = Instant::now();
let diff = end.duration_since(start);
let rel_diff = start.duration_since(test_start);
Ok((rel_diff, diff, res.len()))
}
pub async fn search(&self, filter: LdapFilter) -> Result<Vec<LdapSearchResultEntry>, ()> {
// Create the search filter
let req = LdapSearchRequest {
base: self.basedn.clone(),
scope: LdapSearchScope::Subtree,
aliases: LdapDerefAliases::Never,
sizelimit: 0,
timelimit: 0,
typesonly: false,
filter,
attrs: vec![],
};
// Prep the proto msg
let mut mguard = self.conn.lock().await;
let inner = match (*mguard).as_mut() {
Some(i) => i,
None => {
error!("No connection available");
return Err(());
}
};
inner.msgid += 1;
let msgid = inner.msgid;
let msg = LdapMsg {
msgid,
ctrl: vec![],
op: LdapOp::SearchRequest(req),
};
// Send it
inner.framed.send(msg).await.map_err(|e| {
error!("Unable to search -> {:?}", e);
})?;
let mut results = Vec::new();
// It takes a lot more work to process a response from ldap :(
while let Some(Ok(msg)) = inner.framed.next().await {
match msg.op {
LdapOp::SearchResultEntry(ent) => results.push(ent),
LdapOp::SearchResultDone(res) => {
if res.code == LdapResultCode::Success {
break;
} else {
error!("Search Failed -> {:?}", res);
return Err(());
}
}
_ => {
error!("Invalid ldap response state");
return Err(());
}
}
}
Ok(results)
}
pub async fn delete(&self, dn: String) -> Result<(), ()> {
let mut mguard = self.conn.lock().await;
let inner = match (*mguard).as_mut() {
Some(i) => i,
None => {
error!("No connection available");
return Err(());
}
};
inner.msgid += 1;
let msgid = inner.msgid;
let msg = LdapMsg {
msgid,
ctrl: vec![],
op: LdapOp::DelRequest(dn),
};
// Send it
inner.framed.send(msg).await.map_err(|e| {
error!("Unable to delete -> {:?}", e);
})?;
if let Some(Ok(msg)) = inner.framed.next().await {
if let LdapOp::DelResponse(res) = msg.op {
if res.code == LdapResultCode::Success {
return Ok(());
} else {
error!("Delete Failed -> {:?}", res);
return Err(());
}
}
}
error!("Invalid ldap response state");
Err(())
}
pub async fn add(&self, req: LdapAddRequest) -> Result<(), ()> {
let mut mguard = self.conn.lock().await;
let inner = match (*mguard).as_mut() {
Some(i) => i,
None => {
error!("No connection available");
return Err(());
}
};
inner.msgid += 1;
let msgid = inner.msgid;
let msg = LdapMsg {
msgid,
ctrl: vec![],
op: LdapOp::AddRequest(req),
};
// Send it
inner.framed.send(msg).await.map_err(|e| {
error!("Unable to add -> {:?}", e);
})?;
if let Some(Ok(msg)) = inner.framed.next().await {
if let LdapOp::AddResponse(res) = msg.op {
if res.code == LdapResultCode::Success {
return Ok(());
} else {
error!("Add Failed -> {:?}", res);
return Err(());
}
}
}
error!("Invalid ldap response state");
Err(())
}
pub async fn modify(&self, req: LdapModifyRequest) -> Result<(), ()> {
let mut mguard = self.conn.lock().await;
let inner = match (*mguard).as_mut() {
Some(i) => i,
None => {
error!("No connection available");
return Err(());
}
};
inner.msgid += 1;
let msgid = inner.msgid;
let msg = LdapMsg {
msgid,
ctrl: vec![],
op: LdapOp::ModifyRequest(req),
};
// Send it
inner.framed.send(msg).await.map_err(|e| {
error!("Unable to modify -> {:?}", e);
})?;
if let Some(Ok(msg)) = inner.framed.next().await {
if let LdapOp::ModifyResponse(res) = msg.op {
if res.code == LdapResultCode::Success {
return Ok(());
} else {
error!("Modify Failed -> {:?}", res);
return Err(());
}
}
}
error!("Invalid ldap response state");
Err(())
}
}

View file

@ -1,4 +1,4 @@
#![deny(warnings)]
// #![deny(warnings)]
#![warn(unused_extern_crates)]
#![allow(clippy::panic)]
#![deny(clippy::unreachable)]
@ -13,212 +13,43 @@ static ALLOC: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[macro_use]
extern crate tracing;
use hashbrown::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};
use std::path::PathBuf;
use std::process::ExitCode;
use clap::{Parser, Subcommand};
use uuid::Uuid;
use clap::Parser;
use crate::ds::DirectoryServer;
use crate::ipa::IpaServer;
use crate::kani::{KaniHttpServer, KaniLdapServer};
use crate::profile::Profile;
use crate::setup::config;
use crate::profile::{Profile, ProfileBuilder};
mod data;
mod ds;
use tokio::sync::broadcast;
mod error;
mod generate;
mod ipa;
mod kani;
mod ldap;
mod preprocess;
mod model;
mod model_basic;
mod populate;
mod profile;
mod runner;
mod setup;
mod run;
mod state;
mod stats;
include!("./opt.rs");
impl OrcaOpt {
pub fn debug(&self) -> bool {
fn debug(&self) -> bool {
match self {
OrcaOpt::TestConnection(opt) => opt.copt.debug,
OrcaOpt::Generate(opt) => opt.copt.debug,
OrcaOpt::PreProc(opt) => opt.copt.debug,
OrcaOpt::Setup(opt) => opt.copt.debug,
OrcaOpt::Run(opt) => opt.copt.debug,
OrcaOpt::Version(opt) => opt.debug,
OrcaOpt::Configure(opt) => opt.copt.debug,
OrcaOpt::Version { common }
| OrcaOpt::SetupWizard { common, .. }
| OrcaOpt::TestConnection { common, .. }
| OrcaOpt::GenerateData { common, .. }
| OrcaOpt::PopulateData { common, .. }
| OrcaOpt::Run { common, .. } => common.debug,
}
}
}
pub enum TargetServerBuilder {
Kanidm(String, String),
KanidmLdap(String, String, String, String),
DirSrv(String, String, String),
Ipa(String, String, String),
}
impl TargetServerBuilder {
#[allow(clippy::result_unit_err)]
pub fn build(self) -> Result<TargetServer, ()> {
match self {
TargetServerBuilder::Kanidm(a, b) => KaniHttpServer::build(a, b),
TargetServerBuilder::KanidmLdap(a, b, c, d) => KaniLdapServer::build(a, b, c, d),
TargetServerBuilder::DirSrv(a, b, c) => DirectoryServer::build(a, b, c),
TargetServerBuilder::Ipa(a, b, c) => IpaServer::build(a, b, c),
}
}
}
#[allow(clippy::large_enum_variant)]
pub enum TargetServer {
Kanidm(KaniHttpServer),
KanidmLdap(Box<KaniLdapServer>),
DirSrv(DirectoryServer),
Ipa(IpaServer),
}
impl TargetServer {
fn info(&self) -> String {
match self {
TargetServer::Kanidm(k) => k.info(),
TargetServer::KanidmLdap(k) => k.info(),
TargetServer::DirSrv(k) => k.info(),
TargetServer::Ipa(k) => k.info(),
}
}
fn rname(&self) -> &str {
match self {
TargetServer::Kanidm(_) => "kanidm_http",
TargetServer::KanidmLdap(_) => "kanidm_ldap",
TargetServer::DirSrv(_) => "directory_server",
TargetServer::Ipa(_) => "ipa",
}
}
fn builder(&self) -> TargetServerBuilder {
match self {
TargetServer::Kanidm(k) => k.builder(),
TargetServer::KanidmLdap(k) => k.builder(),
TargetServer::DirSrv(k) => k.builder(),
TargetServer::Ipa(k) => k.builder(),
}
}
async fn open_admin_connection(&self) -> Result<(), ()> {
match self {
TargetServer::Kanidm(k) => k.open_admin_connection().await,
TargetServer::KanidmLdap(k) => k.open_admin_connection().await,
TargetServer::DirSrv(k) => k.open_admin_connection().await,
TargetServer::Ipa(k) => k.open_admin_connection().await,
}
}
async fn setup_admin_delete_uuids(&self, targets: &[Uuid]) -> Result<(), ()> {
match self {
TargetServer::Kanidm(k) => k.setup_admin_delete_uuids(targets).await,
TargetServer::KanidmLdap(k) => k.setup_admin_delete_uuids(targets).await,
TargetServer::DirSrv(k) => k.setup_admin_delete_uuids(targets).await,
TargetServer::Ipa(k) => k.setup_admin_delete_uuids(targets).await,
}
}
async fn setup_admin_precreate_entities(
&self,
targets: &HashSet<Uuid>,
all_entities: &HashMap<Uuid, data::Entity>,
) -> Result<(), ()> {
match self {
TargetServer::Kanidm(k) => {
k.setup_admin_precreate_entities(targets, all_entities)
.await
}
TargetServer::KanidmLdap(k) => {
k.setup_admin_precreate_entities(targets, all_entities)
.await
}
TargetServer::DirSrv(k) => {
k.setup_admin_precreate_entities(targets, all_entities)
.await
}
TargetServer::Ipa(k) => {
k.setup_admin_precreate_entities(targets, all_entities)
.await
}
}
}
async fn setup_access_controls(
&self,
access: &HashMap<Uuid, Vec<data::EntityType>>,
all_entities: &HashMap<Uuid, data::Entity>,
) -> Result<(), ()> {
match self {
TargetServer::Kanidm(k) => k.setup_access_controls(access, all_entities).await,
TargetServer::KanidmLdap(k) => k.setup_access_controls(access, all_entities).await,
TargetServer::DirSrv(k) => k.setup_access_controls(access, all_entities).await,
TargetServer::Ipa(k) => k.setup_access_controls(access, all_entities).await,
}
}
async fn open_user_connection(
&self,
test_start: Instant,
name: &str,
pw: &str,
) -> Result<(Duration, Duration), ()> {
match self {
TargetServer::Kanidm(k) => k.open_user_connection(test_start, name, pw).await,
TargetServer::KanidmLdap(k) => k.open_user_connection(test_start, name, pw).await,
TargetServer::DirSrv(k) => k.open_user_connection(test_start, name, pw).await,
TargetServer::Ipa(k) => k.open_user_connection(test_start, name, pw).await,
}
}
async fn close_connection(&self) {
match self {
TargetServer::Kanidm(k) => k.close_connection().await,
TargetServer::KanidmLdap(k) => k.close_connection().await,
TargetServer::DirSrv(k) => k.close_connection().await,
TargetServer::Ipa(k) => k.close_connection().await,
}
}
async fn search(
&self,
test_start: Instant,
ids: &[String],
) -> Result<(Duration, Duration, usize), ()> {
match self {
TargetServer::Kanidm(k) => k.search(test_start, ids).await,
TargetServer::KanidmLdap(k) => k.search(test_start, ids).await,
TargetServer::DirSrv(k) => k.search(test_start, ids).await,
TargetServer::Ipa(k) => k.search(test_start, ids).await,
}
}
}
async fn conntest(target: &TargetOpt, profile_path: &Path) -> Result<(), ()> {
info!(
"Performing conntest of {:?} from {}",
target,
profile_path.to_str().unwrap(),
);
let (_data, _profile, server) = config(target, profile_path)?;
server
.open_admin_connection()
.await
.map(|_| info!("success"))
.map_err(|_| error!("connection test failed"))
}
#[tokio::main]
async fn main() {
#[tokio::main(flavor = "multi_thread")]
async fn main() -> ExitCode {
let opt = OrcaOpt::parse();
if opt.debug() {
@ -227,99 +58,221 @@ async fn main() {
"orca=debug,kanidm=debug,kanidm_client=debug,webauthn=debug",
);
}
tracing_subscriber::fmt::init();
info!("Orca - the Kanidm Load Testing Utility.");
debug!("cli -> {:?}", opt);
match opt {
OrcaOpt::Version(_opt) => {
OrcaOpt::Version { .. } => {
println!("orca {}", env!("KANIDM_PKG_VERSION"));
std::process::exit(0);
return ExitCode::SUCCESS;
}
OrcaOpt::TestConnection(opt) => {
let _ = conntest(&opt.target, &opt.profile_path).await;
// Build the profile and the test dimensions.
OrcaOpt::SetupWizard {
common: _,
admin_password,
idm_admin_password,
control_uri,
seed,
profile_path,
} => {
// For now I hardcoded some dimensions, but we should prompt
// the user for these later.
let seed = seed.map(|seed| {
if seed < 0 {
seed.wrapping_mul(-1) as u64
} else {
seed as u64
}
});
let builder =
ProfileBuilder::new(control_uri, admin_password, idm_admin_password).seed(seed);
let profile = match builder.build() {
Ok(p) => p,
Err(_err) => {
return ExitCode::FAILURE;
}
};
match profile.write_to_path(&profile_path) {
Ok(_) => {
return ExitCode::SUCCESS;
}
Err(_err) => {
return ExitCode::FAILURE;
}
}
}
OrcaOpt::Generate(opt) => generate::doit(&opt.output_path),
OrcaOpt::PreProc(opt) => preprocess::doit(&opt.input_path, &opt.output_path),
OrcaOpt::Setup(opt) => {
let _ = setup::doit(&opt.target, &opt.profile_path).await;
// Test the connection
OrcaOpt::TestConnection {
common: _,
profile_path,
} => {
let profile = match Profile::try_from(profile_path.as_path()) {
Ok(p) => p,
Err(_err) => {
return ExitCode::FAILURE;
}
};
info!("Performing conntest of {}", profile.control_uri());
match kani::KanidmOrcaClient::new(&profile).await {
Ok(_) => {
info!("success");
return ExitCode::SUCCESS;
}
Err(_err) => {
return ExitCode::FAILURE;
}
}
}
OrcaOpt::Run(opt) => {
let _ = runner::doit(&opt.test_type, &opt.target, &opt.profile_path).await;
// read the profile that we are going to be using/testing
// load the related data (if any) or generate it
// run the test!
// From the profile and test dimensions, generate the data into a state file.
OrcaOpt::GenerateData {
common: _,
profile_path,
state_path,
} => {
let profile = match Profile::try_from(profile_path.as_path()) {
Ok(p) => p,
Err(_err) => {
return ExitCode::FAILURE;
}
};
let client = match kani::KanidmOrcaClient::new(&profile).await {
Ok(client) => client,
Err(_err) => {
return ExitCode::FAILURE;
}
};
// do-it.
let state = match generate::populate(&client, profile).await {
Ok(s) => s,
Err(_err) => {
return ExitCode::FAILURE;
}
};
match state.write_to_path(&state_path) {
Ok(_) => {
return ExitCode::SUCCESS;
}
Err(_err) => {
return ExitCode::FAILURE;
}
}
}
//
OrcaOpt::PopulateData {
common: _,
state_path,
} => {
let state = match state::State::try_from(state_path.as_path()) {
Ok(p) => p,
Err(_err) => {
return ExitCode::FAILURE;
}
};
match populate::preflight(state).await {
Ok(_) => {
return ExitCode::SUCCESS;
}
Err(_err) => {
return ExitCode::FAILURE;
}
};
}
// Run the test based on the state file.
OrcaOpt::Run {
common: _,
state_path,
} => {
let state = match state::State::try_from(state_path.as_path()) {
Ok(p) => p,
Err(_err) => {
return ExitCode::FAILURE;
}
};
// We have a broadcast channel setup for controlling the state of
// various actors and parts.
//
// We want a small amount of backlog because there are a few possible
// commands that could be sent.
let (control_tx, control_rx) = broadcast::channel(8);
let mut run_execute = tokio::task::spawn(run::execute(state, control_rx));
loop {
tokio::select! {
// Note that we pass a &mut handle here because we want the future to join
// but not be consumed each loop iteration.
result = &mut run_execute => {
match result {
Ok(_) => {
return ExitCode::SUCCESS;
}
Err(_err) => {
return ExitCode::FAILURE;
}
};
}
// Signal handling.
Ok(()) = tokio::signal::ctrl_c() => {
info!("Stopping Task ...");
let _ = control_tx.send(run::Signal::Stop);
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::terminate();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Kill it with fire I guess.
return ExitCode::FAILURE;
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::alarm();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::hangup();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined1();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
Some(()) = async move {
let sigterm = tokio::signal::unix::SignalKind::user_defined2();
#[allow(clippy::unwrap_used)]
tokio::signal::unix::signal(sigterm).unwrap().recv().await
} => {
// Ignore
}
}
}
}
OrcaOpt::Configure(opt) => update_config_file(opt),
};
debug!("Exit");
}
fn update_config_file(opt: ConfigOpt) {
let mut profile = match opt.profile.exists() {
true => {
let file_contents = std::fs::read_to_string(&opt.profile).unwrap();
toml::from_str(&file_contents).unwrap()
}
false => Profile::default(),
};
println!("Current profile:\n{}", toml::to_string(&profile).unwrap());
if let Some(name) = opt.name {
println!("Updating config name.");
profile.name = name;
};
if let Some(new_password) = opt.admin_password {
println!("Updating admin password.");
profile.kani_http_config.as_mut().unwrap().admin_pw = new_password.clone();
profile.kani_ldap_config.as_mut().unwrap().admin_pw = new_password;
};
if let Some(kani_uri) = opt.kanidm_uri {
println!("Updating kanidm uri.");
profile.kani_http_config.as_mut().unwrap().uri = kani_uri.clone();
profile.kani_ldap_config.as_mut().unwrap().uri = kani_uri;
};
if let Some(ldap_uri) = opt.ldap_uri {
println!("Updating ldap uri.");
profile.kani_ldap_config.as_mut().unwrap().ldap_uri = ldap_uri;
};
if let Some(base_dn) = opt.ldap_base_dn {
println!("Updating base DN.");
profile.kani_ldap_config.as_mut().unwrap().base_dn = base_dn;
};
if let Some(data_file) = opt.data_file {
println!("Updating data_file path.");
profile.data = data_file;
};
if let Some(results) = opt.results {
println!("Updating results path.");
profile.results = results.to_str().unwrap().to_string();
};
let file_contents = match toml::to_string(&profile) {
Err(err) => {
error!("Failed to serialize the config file: {:?}", err);
return;
}
Ok(val) => val,
};
match std::fs::write(&opt.profile, &file_contents) {
Err(err) => {
eprintln!("Failed to write the config file: {:?}", err);
return;
}
Ok(_) => {
println!("Wrote out the new config file");
}
};
println!("New config:\n{}", file_contents);
}

118
tools/orca/src/model.rs Normal file
View file

@ -0,0 +1,118 @@
use crate::error::Error;
use crate::run::{EventDetail, EventRecord};
use crate::state::*;
use std::time::{Duration, Instant};
use kanidm_client::KanidmClient;
use async_trait::async_trait;
pub enum TransitionAction {
Login,
Logout,
}
// Is this the right way? Should transitions/delay be part of the actor model? Should
// they be responsible.
pub struct Transition {
pub delay: Option<Duration>,
pub action: TransitionAction,
}
impl Transition {
#[allow(dead_code)]
pub fn delay(&self) -> Option<Duration> {
self.delay
}
}
pub enum TransitionResult {
// Success
Ok,
// We need to re-authenticate, the session expired.
// AuthenticationNeeded,
// An error occured.
Error,
}
#[async_trait]
pub trait ActorModel {
async fn transition(
&mut self,
client: &KanidmClient,
person: &Person,
) -> Result<EventRecord, Error>;
}
pub async fn login(
client: &KanidmClient,
person: &Person,
) -> Result<(TransitionResult, EventRecord), Error> {
// Should we measure the time of each call rather than the time with multiple calls?
let start = Instant::now();
let result = match &person.credential {
Credential::Password { plain } => {
client
.auth_simple_password(person.username.as_str(), plain.as_str())
.await
}
};
let end = Instant::now();
let duration = end.duration_since(start);
match result {
Ok(_) => Ok((
TransitionResult::Ok,
EventRecord {
start,
duration,
details: EventDetail::Authentication,
},
)),
Err(client_err) => {
debug!(?client_err);
Ok((
TransitionResult::Error,
EventRecord {
start,
duration,
details: EventDetail::Error,
},
))
}
}
}
pub async fn logout(
client: &KanidmClient,
_person: &Person,
) -> Result<(TransitionResult, EventRecord), Error> {
let start = Instant::now();
let result = client.logout().await;
let end = Instant::now();
let duration = end.duration_since(start);
match result {
Ok(_) => Ok((
TransitionResult::Ok,
EventRecord {
start,
duration,
details: EventDetail::Logout,
},
)),
Err(client_err) => {
debug!(?client_err);
Ok((
TransitionResult::Error,
EventRecord {
start,
duration,
details: EventDetail::Error,
},
))
}
}
}

View file

@ -0,0 +1,87 @@
use crate::model::{self, ActorModel, Transition, TransitionAction, TransitionResult};
use crate::error::Error;
use crate::run::EventRecord;
use crate::state::*;
use kanidm_client::KanidmClient;
use async_trait::async_trait;
use std::time::Duration;
enum State {
Unauthenticated,
Authenticated,
}
pub struct ActorBasic {
state: State,
}
impl ActorBasic {
pub fn new() -> Self {
ActorBasic {
state: State::Unauthenticated,
}
}
}
#[async_trait]
impl ActorModel for ActorBasic {
async fn transition(
&mut self,
client: &KanidmClient,
person: &Person,
) -> Result<EventRecord, Error> {
let transition = self.next_transition();
if let Some(delay) = transition.delay {
tokio::time::sleep(delay).await;
}
// Once we get to here, we want the transition to go ahead.
let (result, event) = match transition.action {
TransitionAction::Login => model::login(client, person).await,
TransitionAction::Logout => model::logout(client, person).await,
}?;
// Given the result, make a choice about what text.
self.next_state(result);
Ok(event)
}
}
impl ActorBasic {
fn next_transition(&mut self) -> Transition {
match self.state {
State::Unauthenticated => Transition {
delay: None,
action: TransitionAction::Login,
},
State::Authenticated => Transition {
delay: Some(Duration::from_millis(100)),
action: TransitionAction::Logout,
},
}
}
fn next_state(&mut self, result: TransitionResult) {
// Is this a design flaw? We probably need to know what the state was that we
// requested to move to?
match (&self.state, result) {
(State::Unauthenticated, TransitionResult::Ok) => {
self.state = State::Authenticated;
}
(State::Unauthenticated, TransitionResult::Error) => {
self.state = State::Unauthenticated;
}
(State::Authenticated, TransitionResult::Ok) => {
self.state = State::Unauthenticated;
}
(State::Authenticated, TransitionResult::Error) => {
self.state = State::Unauthenticated;
}
}
}
}

View file

@ -1,5 +1,3 @@
use std::str::FromStr;
#[derive(Debug, Parser)]
struct CommonOpt {
#[clap(short, long)]
@ -8,168 +6,9 @@ struct CommonOpt {
}
#[derive(Debug, Parser)]
struct PreProcOpt {
#[clap(flatten)]
pub copt: CommonOpt,
#[clap(value_parser, short, long = "input")]
/// Path to unprocessed data in json format.
pub input_path: PathBuf,
#[clap(value_parser, short, long = "output")]
/// Path to write the processed output.
pub output_path: PathBuf,
}
#[derive(Debug, Parser)]
struct GenerateOpt {
#[clap(flatten)]
pub copt: CommonOpt,
#[clap(value_parser, short, long = "output")]
/// Path to write the generated output.
pub output_path: PathBuf,
}
#[derive(Debug, Parser)]
struct SetupOpt {
#[clap(flatten)]
pub copt: CommonOpt,
#[clap(name = "target")]
pub target: TargetOpt,
#[clap(value_parser, short, long = "profile")]
/// Path to the test profile.
pub profile_path: PathBuf,
}
#[derive(Debug, Parser)]
struct RunOpt {
#[clap(flatten)]
pub copt: CommonOpt,
#[clap(name = "target")]
pub target: TargetOpt,
#[clap(
name = "test-type",
help = "Which type of test to run against this system: currently supports 'search-basic'"
)]
/// Which type of test to run against this system
pub test_type: TestTypeOpt,
#[clap(value_parser, short, long = "profile")]
/// Path to the test profile.
pub profile_path: PathBuf,
}
#[derive(Debug, Parser)]
/// Configuration options
struct ConfigOpt {
#[clap(flatten)]
pub copt: CommonOpt,
#[clap(value_parser, short, long)]
/// Update the admin password
pub admin_password: Option<String>,
#[clap(value_parser, short, long)]
/// Update the Kanidm URI
pub kanidm_uri: Option<String>,
#[clap(value_parser, short, long)]
/// Update the LDAP URI
pub ldap_uri: Option<String>,
#[clap(value_parser, long)]
/// Update the LDAP base DN
pub ldap_base_dn: Option<String>,
#[clap(value_parser, short = 'D', long)]
/// Set the configuration name
pub name: Option<String>,
#[clap(value_parser, long)]
/// The data file path to update (or create)
pub data_file: Option<String>,
#[clap(value_parser, short, long)]
/// The place we'll drop the results
pub results: Option<PathBuf>,
#[clap(value_parser, short, long)]
/// The configuration file path to update (or create)
pub profile: PathBuf,
}
#[derive(Debug, Subcommand, Clone)]
/// The target to run against
pub(crate) enum TargetOpt {
#[clap(name = "ds")]
/// Run against the ldap/ds profile
Ds,
#[clap(name = "ipa")]
/// Run against the ipa profile
Ipa,
#[clap(name = "kanidm")]
/// Run against the kanidm http profile
Kanidm,
#[clap(name = "kanidm-ldap")]
/// Run against the kanidm ldap profile
KanidmLdap,
}
impl FromStr for TargetOpt {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"ds" => Ok(TargetOpt::Ds),
"ipa" => Ok(TargetOpt::Ipa),
"kanidm" => Ok(TargetOpt::Kanidm),
"kanidm-ldap" => Ok(TargetOpt::KanidmLdap),
_ => Err("Invalid target type. Must be ds, ipa, kanidm, or kanidm-ldap"),
}
}
}
#[derive(Debug, Subcommand, Clone)]
pub(crate) enum TestTypeOpt {
#[clap(name = "search-basic")]
/// Perform a basic search-only test
SearchBasic,
}
impl FromStr for TestTypeOpt {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"search-basic" => Ok(TestTypeOpt::SearchBasic),
_ => Err("Invalid test type."),
}
}
}
impl std::fmt::Display for TestTypeOpt {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match &self {
TestTypeOpt::SearchBasic => write!(f, "search-basic"),
}
}
}
#[derive(Debug, Parser)]
#[clap(
name = "orca",
about = "Orca Load Testing Utility
Orca works in a few steps.
1. Create an orca config which defines the targets you want to be able to setup and load test. See example_profiles/small/orca.toml
2. (Optional) preprocess an anonymised 389-ds access log (created from an external tool) into an orca data set.
3. 'orca setup' the kanidm/389-ds instance from the orca data set. You can see an example of this in example_profiles/small/data.json. This will reset the database, and add tons of entries etc. For example:
orca setup kanidm -p ./example_profiles/small/orca.toml
4. 'orca run' one of the metrics, based on that data set. For example:
orca run -p example_profiles/small/orca.toml kanidm search-basic
"
)]
#[clap(name = "orca", about = "Orca Load Testing Utility")]
enum OrcaOpt {
/*
#[clap(name = "conntest")]
/// Perform a connection test against the specified target
TestConnection(SetupOpt),
@ -186,10 +25,85 @@ enum OrcaOpt {
#[clap(name = "run")]
/// Run the load test as defined by the test profile
Run(RunOpt),
#[clap(name = "version")]
/// Print version info and exit
Version(CommonOpt),
#[clap(name = "configure")]
/// Update a config file
Configure(ConfigOpt),
*/
SetupWizard {
#[clap(flatten)]
common: CommonOpt,
#[clap(long)]
/// Update the admin password
admin_password: String,
#[clap(long)]
/// Update the idm_admin password
idm_admin_password: String,
#[clap(long)]
/// Update the Kanidm URI
control_uri: String,
#[clap(long)]
/// Optional RNG seed. Takes a signed 64bit integer and turns it into an unsigned one for use.
/// This allows deterministic regeneration of a test state file.
seed: Option<i64>,
// Todo - support the extra uris field for replicated tests.
#[clap(long = "profile")]
/// The configuration file path to update (or create)
profile_path: PathBuf,
},
#[clap(name = "conntest")]
/// Perform a connection test
TestConnection {
#[clap(flatten)]
common: CommonOpt,
#[clap(long = "profile")]
/// Path to the test profile.
profile_path: PathBuf,
},
#[clap(name = "generate")]
/// Create a new state file that is populated with a complete dataset, ready
/// to be loaded into a kanidm instance.
GenerateData {
#[clap(flatten)]
common: CommonOpt,
#[clap(long = "profile")]
/// Path to the test profile.
profile_path: PathBuf,
#[clap(long = "state")]
/// Path to the state file.
state_path: PathBuf,
},
#[clap(name = "populate")]
/// Populate the data for the test into the Kanidm instance.
PopulateData {
#[clap(flatten)]
common: CommonOpt,
#[clap(long = "state")]
/// Path to the state file.
state_path: PathBuf,
},
#[clap(name = "run")]
/// Run the simulation.
Run {
#[clap(flatten)]
common: CommonOpt,
#[clap(long = "state")]
/// Path to the state file.
state_path: PathBuf,
},
#[clap(name = "version")]
/// Print version info and exit
Version {
#[clap(flatten)]
common: CommonOpt,
},
}

View file

@ -0,0 +1,70 @@
use crate::error::Error;
use crate::kani;
use crate::state::*;
use std::sync::Arc;
async fn apply_flags(client: Arc<kani::KanidmOrcaClient>, flags: &[Flag]) -> Result<(), Error> {
for flag in flags {
match flag {
Flag::DisableAllPersonsMFAPolicy => client.disable_mfa_requirement().await?,
}
}
Ok(())
}
async fn preflight_person(
client: Arc<kani::KanidmOrcaClient>,
person: Person,
) -> Result<(), Error> {
debug!(?person);
if client.person_exists(&person.username).await? {
// Do nothing? Do we need to reset them later?
} else {
client
.person_create(&person.username, &person.display_name)
.await?;
}
match &person.credential {
Credential::Password { plain } => {
client
.person_set_pirmary_password_only(&person.username, plain)
.await?;
}
}
Ok(())
}
pub async fn preflight(state: State) -> Result<(), Error> {
// Get the admin client.
let client = Arc::new(kani::KanidmOrcaClient::new(&state.profile).await?);
// Apply any flags if they exist.
apply_flags(client.clone(), state.preflight_flags.as_slice()).await?;
// Create persons.
let mut tasks = Vec::with_capacity(state.persons.len());
for person in state.persons.into_iter() {
let c = client.clone();
tasks.push(tokio::spawn(preflight_person(c, person)))
}
for task in tasks {
task.await.map_err(|tokio_err| {
error!(?tokio_err, "Failed to join task");
Error::Tokio
})??;
// The double ? isn't a mistake, it's because this is Result<Result<T, E>, E>
// and flatten is nightly.
}
// Create groups.
// Create integrations.
info!("Ready to 🛫");
Ok(())
}

View file

@ -1,389 +0,0 @@
use hashbrown::{HashMap, HashSet};
use std::cmp::Ordering;
use std::collections::BTreeMap;
use std::convert::TryFrom;
use std::fs::File;
use std::io::BufReader;
use std::path::Path;
use std::str::FromStr;
use std::time::Duration;
use rand::seq::SliceRandom;
use rand::Rng;
use serde::Deserialize;
use uuid::Uuid;
use crate::data::*;
#[derive(Debug, Deserialize)]
struct RawRecord {
conn: String,
etime: String,
ids: Vec<Uuid>,
nentries: u32,
rtime: String,
#[serde(rename = "type")]
op_type: String,
}
#[derive(Debug, PartialEq)]
enum RawOpType {
Precreate,
Add,
Search,
Mod,
Delete,
Bind,
}
impl FromStr for RawOpType {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"precreate" => Ok(RawOpType::Precreate),
"srch" => Ok(RawOpType::Search),
"bind" => Ok(RawOpType::Bind),
"mod" => Ok(RawOpType::Mod),
"del" => Ok(RawOpType::Delete),
"add" => Ok(RawOpType::Add),
_ => Err(()),
}
}
}
#[derive(Debug)]
struct Record {
conn: i32,
etime: Duration,
ids: Vec<Uuid>,
_nentries: u32,
rtime: Duration,
op_type: RawOpType,
}
fn parse_rtime(s: &str) -> Result<Duration, ()> {
// R times are "0:00:00" or "1:34:51.714690"
// So we need to split on :, and then parse each part.
// This is HH:MM:SS.ms
let v: Vec<&str> = s.split(':').collect();
if v.len() != 3 {
return Err(());
}
let hh = v[0].parse::<u32>().map_err(|_| ())?;
let mm = v[1].parse::<u32>().map_err(|_| ())?;
let ss = f64::from_str(v[2]).map_err(|_| ())?;
let ext_secs = ((mm * 60) + (hh * 3600)) as f64;
Ok(Duration::from_secs_f64(ext_secs + ss))
}
impl Record {
#[allow(clippy::wrong_self_convention)]
fn into_op(&self, all_entities: &HashMap<Uuid, Entity>, exists: &mut Vec<Uuid>) -> Op {
let op_type = match self.op_type {
RawOpType::Add => {
self.ids.iter().for_each(|id| {
if let Err(idx) = exists.binary_search(id) {
exists.insert(idx, *id);
} else {
panic!();
}
});
// Map them all
let new = self
.ids
.iter()
.map(|id| all_entities.get(id).unwrap().get_uuid())
.collect();
OpType::Add(new)
}
RawOpType::Search => OpType::Search(self.ids.clone()),
RawOpType::Mod => {
let mut rng = &mut rand::thread_rng();
let max_m = (exists.len() / 3) + 1;
let mods = self
.ids
.iter()
.map(|id| {
match all_entities.get(id) {
Some(Entity::Account(_a)) => (*id, Change::Account),
Some(Entity::Group(_g)) => {
// This could be better! It's quite an evil method at the moment...
let m = rng.gen_range(0..max_m);
let ngrp = exists.choose_multiple(&mut rng, m).cloned().collect();
(*id, Change::Group(ngrp))
}
None => {
panic!();
}
}
})
.collect();
OpType::Mod(mods)
}
RawOpType::Delete => {
// Remove them.
self.ids.iter().for_each(|id| {
if let Ok(idx) = exists.binary_search(id) {
exists.remove(idx);
} else {
panic!();
}
});
// Could consider checking that everything DOES exist before we start ...
OpType::Delete(self.ids.clone())
}
RawOpType::Bind => OpType::Bind(self.ids[0]),
_ => panic!(),
};
Op {
orig_etime: self.etime,
rtime: self.rtime,
op_type,
}
}
}
impl TryFrom<RawRecord> for Record {
type Error = ();
fn try_from(value: RawRecord) -> Result<Self, Self::Error> {
let RawRecord {
conn,
etime,
mut ids,
nentries,
rtime,
op_type,
} = value;
let conn = conn.parse::<i32>().map_err(|_| ())?;
let etime = f64::from_str(&etime)
.map(Duration::from_secs_f64)
.map_err(|_| ())?;
let op_type = RawOpType::from_str(&op_type).map_err(|_| ())?;
let rtime = parse_rtime(&rtime).map_err(|_| ())?;
ids.sort_unstable();
ids.dedup();
Ok(Record {
conn,
etime,
ids,
_nentries: nentries,
rtime,
op_type,
})
}
}
pub fn doit(input: &Path, output: &Path) {
info!(
"Preprocessing data from {} to {} ...",
input.to_str().unwrap(),
output.to_str().unwrap()
);
let file = match File::open(input) {
Ok(f) => f,
Err(e) => {
error!("Failed to open {} - {:?}", input.to_str().unwrap(), e);
return;
}
};
let out_file = match File::create(output) {
Ok(f) => f,
Err(e) => {
error!("Failed to open {} - {:?}", output.to_str().unwrap(), e);
return;
}
};
let reader = BufReader::new(file);
let u: Vec<RawRecord> = match serde_json::from_reader(reader) {
Ok(data) => data,
Err(e) => {
error!("Failed to parse {} - {:?}", input.to_str().unwrap(), e);
return;
}
};
let data: Result<Vec<_>, _> = u.into_iter().map(Record::try_from).collect();
let data = match data {
Ok(d) => d,
Err(_) => {
error!("Failed to transform record");
return;
}
};
// Now we can start to preprocess everything.
let mut rng = &mut rand::thread_rng();
// We need to know all id's of entries that will ever exist
let all_ids: HashSet<Uuid> = data
.iter()
.flat_map(|rec| rec.ids.iter())
.copied()
.collect();
// Remove anything that is a pre-create event.
let (precreate, mut other): (Vec<_>, Vec<_>) = data
.into_iter()
.partition(|rec| rec.op_type == RawOpType::Precreate);
// Before we can precreate, we need an idea to what each
// item is. Lets get all ids and see which ones ever did a bind.
// This means they are probably an account.
let accounts: HashSet<Uuid> = other
.iter()
.filter(|rec| rec.op_type == RawOpType::Bind)
.flat_map(|rec| rec.ids.iter())
.copied()
.collect();
let mut precreate: Vec<Uuid> = precreate
.iter()
.flat_map(|rec| rec.ids.iter())
.copied()
.collect();
precreate.sort_unstable();
precreate.dedup();
let max_m = (all_ids.len() / 3) + 1;
// Now generate what our db entities all look like in one pass. This is a combo
// of the precreate ids, and the ids that are ever accessed.
let all_entities: HashMap<Uuid, Entity> = all_ids
.iter()
.map(|id| {
let ent = if accounts.contains(id) {
Entity::Account(Account::generate(*id))
} else {
// Choose the number of members:
let m = rng.gen_range(0..max_m);
let members = (precreate).choose_multiple(&mut rng, m).cloned().collect();
Entity::Group(Group::generate(*id, members))
};
(*id, ent)
})
.collect();
// Order everything, this will make it easier to get everything into connection groups
// with their sub-operations in a correct order.
other.sort_by(|a, b| match a.conn.cmp(&b.conn) {
Ordering::Equal => a.rtime.cmp(&b.rtime),
r => r,
});
let mut connections: BTreeMap<i32, Conn> = BTreeMap::new();
let mut exists = precreate.clone();
// Consume all the remaining records into connection structures.
other.iter().for_each(|rec| {
debug!("{:?}", rec);
if let Some(c) = connections.get_mut(&rec.conn) {
c.ops.push(rec.into_op(&all_entities, &mut exists));
} else {
connections.insert(
rec.conn,
Conn {
id: rec.conn,
ops: vec![rec.into_op(&all_entities, &mut exists)],
},
);
}
});
// now collect these into the set of connections containing their operations.
let connections: Vec<_> = connections.into_values().collect();
// Now from the set of connections, we need to know what access may or may not
// be required.
let mut access: HashMap<Uuid, Vec<EntityType>> = HashMap::new();
connections.iter().for_each(|conn| {
let mut curbind = None;
// start by assuming there is no auth
conn.ops.iter().for_each(|op| {
// if it's a bind, update our current access.
match &op.op_type {
OpType::Bind(id) => curbind = Some(id),
OpType::Add(list) | OpType::Delete(list) => {
if let Some(id) = curbind.as_ref() {
let mut nlist: Vec<EntityType> = list
.iter()
.map(|uuid| all_entities.get(uuid).unwrap().get_entity_type())
.collect();
if let Some(ac) = access.get_mut(*id) {
ac.append(&mut nlist);
} else {
access.insert(**id, nlist);
}
} else {
// Else, no current bind, wtf?
panic!();
}
}
OpType::Mod(list) => {
if let Some(id) = curbind.as_ref() {
let mut nlist: Vec<EntityType> = list
.iter()
.map(|v| all_entities.get(&v.0).unwrap().get_entity_type())
.collect();
if let Some(ac) = access.get_mut(*id) {
ac.append(&mut nlist);
} else {
access.insert(**id, nlist);
}
} else {
// Else, no current bind, wtf?
panic!();
}
}
OpType::Search(_) => {}
}
// if it's a mod, declare we need that.
});
});
// For each access
// sort/dedup them.
access.values_mut().for_each(|v| {
v.sort_unstable();
v.dedup();
});
let precreate: HashSet<_> = precreate.into_iter().collect();
// Create the struct
let td = TestData {
all_entities,
access,
accounts,
precreate,
connections,
};
// Finally, write it out;
if let Err(e) = serde_json::to_writer_pretty(out_file, &td) {
error!("Writing to file -> {:?}", e);
};
}

View file

@ -1,86 +1,219 @@
use kanidm_proto::constants::{DEFAULT_LDAP_LOCALHOST, DEFAULT_SERVER_LOCALHOST};
use crate::error::Error;
use rand::{thread_rng, Rng};
use serde::{Deserialize, Serialize};
use std::path::Path;
use std::time::Duration;
#[derive(Debug, Serialize, Deserialize)]
pub struct DsConfig {
pub uri: String,
pub dm_pw: String,
pub base_dn: String,
}
// Sorry nerds, capping this at 40 bits.
const ITEM_UPPER_BOUND: u64 = 1 << 40;
#[derive(Debug, Serialize, Deserialize)]
pub struct IpaConfig {
pub uri: String,
pub realm: String,
pub admin_pw: String,
}
const DEFAULT_GROUP_COUNT: u64 = 10;
const DEFAULT_PERSON_COUNT: u64 = 10;
#[derive(Debug, Serialize, Deserialize)]
pub struct KaniHttpConfig {
pub uri: String,
pub admin_pw: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct KaniLdapConfig {
pub uri: String,
pub ldap_uri: String,
pub admin_pw: String,
pub base_dn: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct SearchBasicConfig {
// Could consider fn for this #[serde(default = "Priority::lowest")]
pub warmup_seconds: u32,
pub workers: u32,
}
impl Default for SearchBasicConfig {
fn default() -> Self {
SearchBasicConfig {
warmup_seconds: 5,
workers: 16,
}
}
}
const DEFAULT_WARMUP_TIME: u64 = 10;
const DEFAULT_TEST_TIME: Option<u64> = Some(180);
#[derive(Debug, Serialize, Deserialize)]
pub struct Profile {
pub name: String,
pub data: String,
pub results: String,
pub ds_config: Option<DsConfig>,
pub ipa_config: Option<IpaConfig>,
pub kani_http_config: Option<KaniHttpConfig>,
pub kani_ldap_config: Option<KaniLdapConfig>,
#[serde(default)]
pub search_basic_config: SearchBasicConfig,
control_uri: String,
admin_password: String,
idm_admin_password: String,
seed: i64,
extra_uris: Vec<String>,
// Dimensions of the test to setup.
warmup_time: u64,
test_time: Option<u64>,
group_count: u64,
person_count: u64,
}
impl Default for Profile {
fn default() -> Self {
let kani_http_config = KaniHttpConfig {
uri: format!("https://{}", DEFAULT_SERVER_LOCALHOST),
admin_pw: "".to_string(),
};
impl Profile {
pub fn control_uri(&self) -> &str {
self.control_uri.as_str()
}
let kani_ldap_config = KaniLdapConfig {
uri: format!("https://{}", DEFAULT_SERVER_LOCALHOST),
ldap_uri: format!("ldaps://{}", DEFAULT_LDAP_LOCALHOST),
admin_pw: "".to_string(),
base_dn: "dn=localhost".to_string(),
};
pub fn extra_uris(&self) -> &[String] {
self.extra_uris.as_slice()
}
Self {
name: "orca default profile".to_string(),
data: "/tmp/kanidm/orcatest".to_string(),
results: "/tmp/kanidm/orca-results/".to_string(),
ds_config: None,
ipa_config: None,
kani_http_config: Some(kani_http_config),
kani_ldap_config: Some(kani_ldap_config),
search_basic_config: SearchBasicConfig::default(),
pub fn admin_password(&self) -> &str {
self.admin_password.as_str()
}
pub fn idm_admin_password(&self) -> &str {
self.idm_admin_password.as_str()
}
#[allow(dead_code)]
pub fn group_count(&self) -> u64 {
self.group_count
}
pub fn person_count(&self) -> u64 {
self.person_count
}
pub fn seed(&self) -> u64 {
if self.seed < 0 {
self.seed.wrapping_mul(-1) as u64
} else {
self.seed as u64
}
}
pub fn warmup_time(&self) -> Duration {
Duration::from_secs(self.warmup_time)
}
pub fn test_time(&self) -> Option<Duration> {
self.test_time.map(Duration::from_secs)
}
}
pub struct ProfileBuilder {
pub control_uri: String,
pub admin_password: String,
pub idm_admin_password: String,
pub seed: Option<u64>,
pub extra_uris: Vec<String>,
// Dimensions of the test to setup.
pub warmup_time: Option<u64>,
pub test_time: Option<Option<u64>>,
pub group_count: Option<u64>,
pub person_count: Option<u64>,
}
fn validate_u64_bound(value: Option<u64>, default: u64) -> Result<u64, Error> {
if let Some(v) = value {
if v > ITEM_UPPER_BOUND {
error!("group count exceeds upper bound ({})", ITEM_UPPER_BOUND);
Err(Error::ProfileBuilder)
} else {
Ok(v)
}
} else {
Ok(default)
}
}
impl ProfileBuilder {
pub fn new(control_uri: String, admin_password: String, idm_admin_password: String) -> Self {
ProfileBuilder {
control_uri,
admin_password,
idm_admin_password,
seed: None,
extra_uris: Vec::new(),
warmup_time: None,
test_time: None,
group_count: None,
person_count: None,
}
}
pub fn seed(mut self, seed: Option<u64>) -> Self {
self.seed = seed;
self
}
#[allow(dead_code)]
pub fn warmup_time(mut self, time: Option<u64>) -> Self {
self.warmup_time = time;
self
}
#[allow(dead_code)]
pub fn test_time(mut self, time: Option<Option<u64>>) -> Self {
self.test_time = time;
self
}
#[allow(dead_code)]
pub fn group_count(mut self, group_count: Option<u64>) -> Self {
self.group_count = group_count;
self
}
#[allow(dead_code)]
pub fn person_count(mut self, person_count: Option<u64>) -> Self {
self.person_count = person_count;
self
}
pub fn build(self) -> Result<Profile, Error> {
let ProfileBuilder {
control_uri,
admin_password,
idm_admin_password,
seed,
extra_uris: _,
warmup_time,
test_time,
group_count,
person_count,
} = self;
let seed: u64 = seed.unwrap_or_else(|| {
let mut rng = thread_rng();
rng.gen()
});
let extra_uris = Vec::new();
let group_count = validate_u64_bound(group_count, DEFAULT_GROUP_COUNT)?;
let person_count = validate_u64_bound(person_count, DEFAULT_PERSON_COUNT)?;
let warmup_time = warmup_time.unwrap_or(DEFAULT_WARMUP_TIME);
let test_time = test_time.unwrap_or(DEFAULT_TEST_TIME);
let seed: i64 = if seed > i64::MAX as u64 {
// let it wrap around
let seed = seed - i64::MAX as u64;
-(seed as i64)
} else {
seed as i64
};
Ok(Profile {
control_uri,
admin_password,
idm_admin_password,
seed,
extra_uris,
warmup_time,
test_time,
group_count,
person_count,
})
}
}
impl Profile {
pub fn write_to_path(&self, path: &Path) -> Result<(), Error> {
let file_contents = toml::to_string(self).map_err(|toml_err| {
error!(?toml_err);
Error::SerdeToml
})?;
std::fs::write(path, file_contents).map_err(|io_err| {
error!(?io_err);
Error::Io
})
}
}
impl TryFrom<&Path> for Profile {
type Error = Error;
fn try_from(path: &Path) -> Result<Self, Self::Error> {
let file_contents = std::fs::read_to_string(path).map_err(|io_err| {
error!(?io_err);
Error::Io
})?;
toml::from_str(&file_contents).map_err(|toml_err| {
error!(?toml_err);
Error::SerdeToml
})
}
}

229
tools/orca/src/run.rs Normal file
View file

@ -0,0 +1,229 @@
use crate::error::Error;
use crate::state::*;
use crate::stats::{BasicStatistics, TestPhase};
use std::sync::Arc;
use rand::seq::SliceRandom;
use rand::SeedableRng;
use rand_chacha::ChaCha8Rng;
use crossbeam::queue::{ArrayQueue, SegQueue};
use kanidm_client::{KanidmClient, KanidmClientBuilder};
use tokio::sync::broadcast;
use std::time::{Duration, Instant};
async fn actor_person(
client: KanidmClient,
person: Person,
stats_queue: Arc<SegQueue<EventRecord>>,
mut actor_rx: broadcast::Receiver<Signal>,
) -> Result<(), Error> {
let mut model = person.model.as_dyn_object();
while let Err(broadcast::error::TryRecvError::Empty) = actor_rx.try_recv() {
let event = model.transition(&client, &person).await?;
stats_queue.push(event);
}
debug!("Stopped person {}", person.username);
Ok(())
}
pub struct EventRecord {
pub start: Instant,
pub duration: Duration,
pub details: EventDetail,
}
pub enum EventDetail {
Authentication,
Logout,
Error,
}
#[derive(Clone, Debug)]
pub enum Signal {
Stop,
}
async fn execute_inner(
warmup: Duration,
test_time: Option<Duration>,
mut control_rx: broadcast::Receiver<Signal>,
stat_ctrl: Arc<ArrayQueue<TestPhase>>,
) -> Result<(), Error> {
// Delay for warmup time.
tokio::select! {
_ = tokio::time::sleep(warmup) => {
// continue.
}
_ = control_rx.recv() => {
// Untill we add other signal types, any event is
// either Ok(Signal::Stop) or Err(_), both of which indicate
// we need to stop immediately.
return Err(Error::Interupt);
}
}
let start = Instant::now();
if let Err(crossbeam_err) = stat_ctrl.push(TestPhase::Start(start)) {
error!(
?crossbeam_err,
"Unable to signal statistics collector to start"
);
return Err(Error::Crossbeam);
}
if let Some(test_time) = test_time {
let sleep = tokio::time::sleep(test_time);
tokio::pin!(sleep);
let recv = (control_rx).recv();
tokio::pin!(recv);
// Wait for some condition (signal, or time).
tokio::select! {
_ = sleep => {
// continue.
}
_ = recv => {
// Untill we add other signal types, any event is
// either Ok(Signal::Stop) or Err(_), both of which indicate
// we need to stop immediately.
return Err(Error::Interupt);
}
}
} else {
let _ = control_rx.recv().await;
}
let end = Instant::now();
if let Err(crossbeam_err) = stat_ctrl.push(TestPhase::End(end)) {
error!(
?crossbeam_err,
"Unable to signal statistics collector to start"
);
return Err(Error::Crossbeam);
}
Ok(())
}
pub async fn execute(state: State, control_rx: broadcast::Receiver<Signal>) -> Result<(), Error> {
// Create a statistics queue.
let stats_queue = Arc::new(SegQueue::new());
let stats_ctrl = Arc::new(ArrayQueue::new(4));
// Spawn the stats aggregator
let c_stats_queue = stats_queue.clone();
let c_stats_ctrl = stats_ctrl.clone();
let mut dyn_data_collector = BasicStatistics::new();
let stats_task =
tokio::task::spawn_blocking(move || dyn_data_collector.run(c_stats_queue, c_stats_ctrl));
// Create clients. Note, we actually seed these deterministically too, so that
// or persons are spread over the clients that exist, in a way that is also
// deterministic.
let mut seeded_rng = ChaCha8Rng::seed_from_u64(state.profile.seed());
let clients = std::iter::once(state.profile.control_uri().to_string())
.chain(state.profile.extra_uris().iter().cloned())
.map(|uri| {
KanidmClientBuilder::new()
.address(uri)
.danger_accept_invalid_hostnames(true)
.danger_accept_invalid_certs(true)
.build()
.map_err(|err| {
error!(?err, "Unable to create kanidm client");
Error::KanidmClient
})
})
.collect::<Result<Vec<_>, _>>()?;
let (actor_tx, _actor_rx) = broadcast::channel(1);
// Start the actors
let mut tasks = Vec::with_capacity(state.persons.len());
for person in state.persons.into_iter() {
let client = clients
.choose(&mut seeded_rng)
.expect("Invalid client set")
.new_session()
.map_err(|err| {
error!(?err, "Unable to create kanidm client");
Error::KanidmClient
})?;
let c_stats_queue = stats_queue.clone();
let c_actor_rx = actor_tx.subscribe();
tasks.push(tokio::spawn(actor_person(
client,
person,
c_stats_queue,
c_actor_rx,
)))
}
let warmup = state.profile.warmup_time();
let testtime = state.profile.test_time();
// We run a seperate test inner so we don't have to worry about
// task spawn/join within our logic.
let c_stats_ctrl = stats_ctrl.clone();
// Don't ? this, we want to stash the result so we cleanly stop all the workers
// before returning the inner test result.
let test_result = execute_inner(warmup, testtime, control_rx, c_stats_ctrl).await;
info!("stopping stats");
// The statistics collector has been working in the BG, and was likely told
// to end by now, but if not (due to an error) send a signal to stop immediately.
if let Err(crossbeam_err) = stats_ctrl.push(TestPhase::StopNow) {
error!(
?crossbeam_err,
"Unable to signal statistics collector to stop"
);
return Err(Error::Crossbeam);
}
info!("stopping workers");
// Test workers to stop
actor_tx.send(Signal::Stop).map_err(|broadcast_err| {
error!(?broadcast_err, "Unable to signal workers to stop");
Error::Tokio
})?;
info!("joining workers");
// Join all the tasks.
for task in tasks {
task.await.map_err(|tokio_err| {
error!(?tokio_err, "Failed to join task");
Error::Tokio
})??;
// The double ? isn't a mistake, it's because this is Result<Result<T, E>, E>
// and flatten is nightly.
}
// By this point the stats task should have been told to halt and rejoin.
stats_task.await.map_err(|tokio_err| {
error!(?tokio_err, "Failed to join statistics task");
Error::Tokio
})??;
// Not an error, two ? to handle the inner data collector error.
// Complete!
test_result
}

View file

@ -1,67 +0,0 @@
use std::fs::create_dir_all;
use std::path::{Path, PathBuf};
use dialoguer::Confirm;
use crate::setup::config;
use crate::{TargetOpt, TestTypeOpt};
mod search;
pub(crate) async fn doit(
testtype: &TestTypeOpt,
target: &TargetOpt,
profile_path: &Path,
) -> Result<(), ()> {
info!(
"Performing test {} against {:?} from {}",
testtype,
target,
profile_path.to_str().unwrap(),
);
let (data, profile, server) = config(target, profile_path)?;
debug!("Profile -> {:?}", profile);
let result_path = PathBuf::from(&profile.results);
if !result_path.exists() {
debug!(
"Couldn't find results directory from profile: {:#?}",
result_path
);
match Confirm::new()
.with_prompt(
format!("I couldn't find the directory you told me to send results to ({:?}). Would you like to create it?",
result_path,)
)
.interact()
{
Ok(_) => match create_dir_all(result_path.as_path()) {
Ok(_) => info!("Successfully created {:#?}", result_path.canonicalize()),
Err(error) => {
error!("{:#?}", error);
return Err(());
}
},
_ => {
println!("Ok, going to quit!");
return Err(());
}
}
}
if !result_path.is_dir() {
error!("Profile: results must be a directory");
return Err(());
};
debug!("Result Path -> {}", result_path.to_str().unwrap());
// Match on what kind of test we are doing. It takes over from here.
match testtype {
TestTypeOpt::SearchBasic => search::basic(data, profile, server, result_path).await?,
};
info!("Test {} complete.", testtype);
Ok(())
}

View file

@ -1,339 +0,0 @@
use std::fs::File;
use std::io::BufWriter;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::{Duration, Instant};
use crossbeam::channel::{unbounded, RecvTimeoutError};
use mathru::statistics::distrib::{Continuous, Normal};
use rand::seq::{IteratorRandom, SliceRandom};
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast;
use tokio::task;
use crate::data::{Entity, OpType, TestData};
use crate::profile::Profile;
use crate::{TargetServer, TargetServerBuilder};
#[derive(Debug, Clone)]
enum TestPhase {
WarmUp,
// Running,
Shutdown,
}
#[derive(Serialize, Deserialize)]
struct CsvRow {
start: f64,
duration: f64,
count: usize,
}
async fn basic_arbiter(
mut broadcast_rx: tokio::sync::broadcast::Receiver<TestPhase>,
raw_results_rx: &crossbeam::channel::Receiver<(Duration, Duration, usize)>,
warmup_seconds: u32,
) -> Vec<(Duration, Duration, usize)> {
info!("Starting test arbiter ...");
// Wait on the message that the workers have started the warm up.
let bcast_msg = broadcast_rx.recv().await.unwrap();
if !matches!(bcast_msg, TestPhase::WarmUp) {
error!("Invalid broadcast state to arbiter");
return Vec::new();
}
// Wait for warmup seconds.
// end of warmup
let end_of_warmup = Instant::now() + Duration::from_secs(warmup_seconds as u64);
let mut count = 0;
loop {
match raw_results_rx.recv_deadline(end_of_warmup) {
// We are currently discarding results.
Ok(_) => {
count += 1;
}
Err(RecvTimeoutError::Timeout) => {
break;
}
Err(_) => {
error!("Worker channel error");
return Vec::new();
}
}
}
info!("Warmup has passed, collecting data");
let mut results = Vec::with_capacity(count * 4);
// Now we are running, so collect our data.
let end_of_test = Instant::now() + Duration::from_secs(10);
loop {
match raw_results_rx.recv_deadline(end_of_test) {
Ok(datum) => results.push(datum),
Err(RecvTimeoutError::Timeout) => {
break;
}
Err(_) => {
error!("Worker channel error");
return Vec::new();
}
}
}
info!(
"Stopping test arbiter. Gathered {} datapoints",
results.len()
);
results
}
async fn basic_worker(
test_start: Instant,
builder: TargetServerBuilder,
name: String,
pw: String,
searches: Arc<Vec<Vec<String>>>,
mut broadcast_rx: tokio::sync::broadcast::Receiver<TestPhase>,
raw_results_tx: crossbeam::channel::Sender<(Duration, Duration, usize)>,
) {
debug!("Starting worker ...");
let server = match builder.build() {
Ok(s) => s,
Err(_) => {
error!("Failed to build client");
return;
}
};
if server
.open_user_connection(test_start, &name, &pw)
.await
.is_err()
{
error!("Failed to authenticate connection");
return;
}
loop {
// While nothing in broadcast.
match broadcast_rx.try_recv() {
Ok(TestPhase::Shutdown) => {
// Complete.
break;
}
Err(tokio::sync::broadcast::error::TryRecvError::Empty) | Ok(_) => {
// Ignore
}
Err(_) => {
error!("broadcast error");
return;
}
}
let s = {
let mut rng = rand::thread_rng();
searches.as_slice().choose(&mut rng).unwrap()
};
// Ensure we are logged out.
server.close_connection().await;
// Search something!
let cr = match server.open_user_connection(test_start, &name, &pw).await {
Ok(r) => r,
Err(_) => {
error!("Failed to authenticate connection");
continue;
}
};
let sr = match server.search(test_start, s.as_slice()).await {
Ok(r) => r,
Err(_) => {
error!("Search Error");
continue;
}
};
// Append results
let r = (cr.0, cr.1 + sr.1, sr.2);
let _ = raw_results_tx.send(r);
}
// Done
debug!("Stopping worker ...");
}
pub(crate) async fn basic(
data: TestData,
profile: Profile,
server: TargetServer,
result_path: PathBuf,
) -> Result<(), ()> {
// From all the data, process and find all the search events.
// Create these into an Arc<vec> so they can be sampled from by workers.
let searches: Vec<Vec<String>> = data
.connections
.iter()
.flat_map(|conn| conn.ops.iter())
.filter_map(|op| {
if let OpType::Search(list) = &op.op_type {
// Now get each name.
let names: Vec<String> = list
.iter()
.map(|u| data.all_entities.get(u).unwrap().get_name().to_string())
.collect();
Some(names)
} else {
None
}
})
.collect();
let searches = Arc::new(searches);
// We need a channel for all the results.
let (raw_results_tx, raw_results_rx) = unbounded();
// Setup a broadcast for the notifications.
let (broadcast_tx, broadcast_rx) = broadcast::channel(2);
// Start an arbiter that will control the test.
// This should use spawn blocking.
let warmup_seconds = profile.search_basic_config.warmup_seconds;
let arbiter_join_handle =
task::spawn(
async move { basic_arbiter(broadcast_rx, &raw_results_rx, warmup_seconds).await },
);
// Get out our conn details
let mut rng = rand::thread_rng();
// But only if they exist from the start.
let accs = data
.accounts
.intersection(&data.precreate)
.choose_multiple(&mut rng, profile.search_basic_config.workers as usize);
let mut accs: Vec<_> = accs
.into_iter()
.filter_map(|u| {
let e = data.all_entities.get(u).unwrap();
if let Entity::Account(aref) = e {
Some((aref.name.clone(), aref.password.clone()))
} else {
None
}
})
.collect();
if accs.is_empty() {
error!("No accounts found in data set, unable to proceed");
return Err(());
}
while accs.len() < (profile.search_basic_config.workers as usize) {
let mut dup = accs.clone();
accs.append(&mut dup);
}
let test_start = Instant::now();
// Start up as many async as workers requested.
for i in 0..profile.search_basic_config.workers {
// give each worker
// * server connection
let builder = server.builder();
// Which is authenticated ...
let name = accs[i as usize].0.clone();
let pw = accs[i as usize].1.clone();
// * arc searches
let searches_c = searches.clone();
// * the broadcast receiver.
let broadcast_rx_c = broadcast_tx.subscribe();
// * the result queue
let raw_results_tx_c = raw_results_tx.clone();
task::spawn(async move {
basic_worker(
test_start,
builder,
name,
pw,
searches_c,
broadcast_rx_c,
raw_results_tx_c,
)
.await
});
}
info!("Starting the warmup...");
// Tell the arbiter to start the warm up counter now.
broadcast_tx
.send(TestPhase::WarmUp)
.map_err(|_| error!("Unable to broadcast warmup state change"))?;
// Wait on the arbiter, it will return our results when it's ready.
let raw_results = arbiter_join_handle.await.map_err(|_| {
error!("Test arbiter was unable to rejoin.");
})?;
// Now signal the workers to stop. We don't care if this fails.
let _ = broadcast_tx
.send(TestPhase::Shutdown)
.map_err(|_| error!("Unable to broadcast stop state change, but that's OK."));
// Now we can finalise our data, based on what analysis we can actually do here.
process_raw_results(&raw_results);
// Write the raw results out.
let result_name = format!("basic_{}.csv", server.rname());
let result_path = result_path.join(result_name);
let result_file = match File::create(&result_path) {
Ok(f) => f,
Err(e) => {
error!("Failed to open {} - {:?}", result_path.to_str().unwrap(), e);
return Err(());
}
};
let mut wtr = csv::Writer::from_writer(BufWriter::new(result_file));
raw_results
.into_iter()
.try_for_each(|(s, d, c)| {
wtr.serialize(CsvRow {
start: s.as_secs_f64(),
duration: d.as_secs_f64(),
count: c,
})
})
.map_err(|e| error!("csv error {:?}", e))?;
wtr.flush().map_err(|e| error!("csv error {:?}", e))?;
Ok(())
}
fn process_raw_results(raw_results: &[(Duration, Duration, usize)]) {
// Do nerd shit.
// Get the times
let optimes: Vec<_> = raw_results
.iter()
.map(|(_, d, _)| d.as_secs_f64())
.collect();
let distrib: Normal<f64> = Normal::from_data(&optimes);
let sd = distrib.variance().sqrt();
info!("mean: {} seconds", distrib.mean());
info!("variance: {}", distrib.variance());
info!("SD: {} seconds", sd);
info!("95%: {}", distrib.mean() + (2.0 * sd));
}

View file

@ -1,155 +0,0 @@
use std::fs::File;
use std::io::{BufReader, Read};
use std::path::{Path, PathBuf};
use uuid::Uuid;
use crate::data::TestData;
use crate::ds::DirectoryServer;
use crate::ipa::IpaServer;
use crate::kani::{KaniHttpServer, KaniLdapServer};
use crate::profile::Profile;
use crate::{TargetOpt, TargetServer};
pub(crate) fn config(
target: &TargetOpt,
profile_path: &Path,
) -> Result<(TestData, Profile, TargetServer), ()> {
// read the profile that we are going to be using/testing
let mut f = File::open(profile_path).map_err(|e| {
error!("Unable to open profile file [{:?}] 🥺", e);
})?;
let mut contents = String::new();
f.read_to_string(&mut contents)
.map_err(|e| error!("unable to read profile contents {:?}", e))?;
let profile: Profile = toml::from_str(contents.as_str())
.map_err(|e| eprintln!("unable to parse config {:?}", e))?;
debug!("Profile -> {:?}", profile);
// Where is our datafile?
let data_path = if Path::new(&profile.data).is_absolute() {
PathBuf::from(&profile.data)
} else if let Some(p) = profile_path.parent() {
p.join(&profile.data)
} else {
error!(
"Unable to find parent directory of {}",
profile_path.to_str().unwrap()
);
return Err(());
};
debug!("Data Path -> {}", data_path.to_str().unwrap());
// Does our target section exist?
let server: TargetServer = match target {
TargetOpt::Ds => {
if let Some(dsconfig) = profile.ds_config.as_ref() {
DirectoryServer::new(dsconfig)?
} else {
error!("To use ds, you must have the ds_config section in your profile");
return Err(());
}
}
TargetOpt::Ipa => {
if let Some(ipaconfig) = profile.ipa_config.as_ref() {
IpaServer::new(ipaconfig)?
} else {
error!("To use ipa, you must have the ipa_config section in your profile");
return Err(());
}
}
TargetOpt::KanidmLdap => {
if let Some(klconfig) = profile.kani_ldap_config.as_ref() {
KaniLdapServer::new(klconfig)?
} else {
error!("To use kanidm_ldap, you must have the kani_ldap_config section in your profile");
return Err(());
}
}
TargetOpt::Kanidm => {
if let Some(khconfig) = profile.kani_http_config.as_ref() {
KaniHttpServer::new(khconfig)?
} else {
error!("To use kanidm, you must have the kani_http_config section in your profile");
return Err(());
}
}
};
debug!("Target server info -> {}", server.info());
// load the related data (if any) or generate it if that is what we have.
let data_file = File::open(&data_path).map_err(|e| {
error!("Unable to open data file [{:?}] 🥺", e);
})?;
let data_reader = BufReader::new(data_file);
let data: TestData = serde_json::from_reader(data_reader).map_err(|e| {
error!(
"Unable to process data file {}. You may need to preprocess it again: {:?}",
data_path.display(),
e
);
})?;
Ok((data, profile, server))
}
pub(crate) async fn doit(target: &TargetOpt, profile_path: &Path) -> Result<(), ()> {
info!(
"Performing setup of {:?} from {}",
target,
profile_path.to_str().unwrap(),
);
let (data, _profile, server) = config(target, profile_path)?;
// ensure that things we will "add" won't be there.
// delete anything that is modded, so that it will be reset.
let mut remove: Vec<Uuid> = data
.connections
.iter()
.flat_map(|conn| conn.ops.iter())
.filter_map(|op| op.require_reset())
.flatten()
/*
// Do we need to recreate all groups? If they were modded, we already reset them ...
.chain(
Box::new(
data.precreate.iter().filter(|e| e.is_group()).map(|e| e.get_uuid()) )
)
*/
.collect();
remove.sort_unstable();
remove.dedup();
debug!("Will remove IDS -> {:?}", remove);
server.open_admin_connection().await?;
// Delete everything that needs to be removed.
server.setup_admin_delete_uuids(remove.as_slice()).await?;
// ensure that all items we need to precreate are!
server
.setup_admin_precreate_entities(&data.precreate, &data.all_entities)
.await?;
// Setup access controls - if something modifies something that IS NOT
// itself, we grant them extra privs.
server
.setup_access_controls(&data.access, &data.all_entities)
.await?;
// Done!
Ok(())
}

91
tools/orca/src/state.rs Normal file
View file

@ -0,0 +1,91 @@
use crate::error::Error;
use crate::model::ActorModel;
use crate::profile::Profile;
use serde::{Deserialize, Serialize};
use std::collections::BTreeSet;
use std::path::Path;
/// A serialisable state representing the content of a kanidm database and potential
/// test content that can be created and modified.
///
/// This is all generated ahead of time before the test so that during the test
/// as minimal calculation as possible is required.
#[derive(Debug, Serialize, Deserialize)]
pub struct State {
pub profile: Profile,
// ----------------------------
pub preflight_flags: Vec<Flag>,
pub persons: Vec<Person>,
// groups: Vec<Group>,
// oauth_clients: Vec<Oauth2Clients>,
}
impl State {
pub fn write_to_path(&self, path: &Path) -> Result<(), Error> {
let output = std::fs::File::create(path).map_err(|io_err| {
error!(?io_err);
Error::Io
})?;
serde_json::to_writer(output, self).map_err(|json_err| {
error!(?json_err);
Error::SerdeJson
})
}
}
impl TryFrom<&Path> for State {
type Error = Error;
fn try_from(path: &Path) -> Result<Self, Self::Error> {
let input = std::fs::File::open(path).map_err(|io_err| {
error!(?io_err);
Error::Io
})?;
serde_json::from_reader(input).map_err(|json_err| {
error!(?json_err);
Error::SerdeJson
})
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Flag {
DisableAllPersonsMFAPolicy,
}
#[derive(Debug, Serialize, Deserialize)]
pub enum PreflightState {
Present,
Absent,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum Model {
/// This is a "hardcoded" model that just authenticates and searches
Basic,
}
impl Model {
pub fn as_dyn_object(&self) -> Box<dyn ActorModel + Send> {
match self {
Model::Basic => Box::new(crate::model_basic::ActorBasic::new()),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub enum Credential {
Password { plain: String },
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Person {
pub preflight_state: PreflightState,
pub username: String,
pub display_name: String,
pub member_of: BTreeSet<String>,
pub credential: Credential,
pub model: Model,
}

108
tools/orca/src/stats.rs Normal file
View file

@ -0,0 +1,108 @@
use crate::error::Error;
use crate::run::EventRecord;
use crossbeam::queue::{ArrayQueue, SegQueue};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use mathru::statistics::distrib::{Continuous, Normal};
#[derive(Debug)]
pub enum TestPhase {
Start(Instant),
End(Instant),
StopNow,
}
pub trait DataCollector {
fn run(
&mut self,
stats_queue: Arc<SegQueue<EventRecord>>,
ctrl: Arc<ArrayQueue<TestPhase>>,
) -> Result<(), Error>;
}
pub struct BasicStatistics {}
impl BasicStatistics {
#[allow(clippy::new_ret_no_self)]
pub fn new() -> Box<dyn DataCollector + Send> {
Box::new(BasicStatistics {})
}
}
impl DataCollector for BasicStatistics {
fn run(
&mut self,
stats_queue: Arc<SegQueue<EventRecord>>,
ctrl: Arc<ArrayQueue<TestPhase>>,
) -> Result<(), Error> {
debug!("Started statistics collector");
// Wait for an event on ctrl. We use small amounts of backoff if none are
// present yet.
let start = loop {
match ctrl.pop() {
Some(TestPhase::Start(start)) => {
break start;
}
Some(TestPhase::End(_)) => {
// Invalid state.
return Err(Error::InvalidState);
}
Some(TestPhase::StopNow) => {
// We have been told to stop immediately.
return Ok(());
}
None => thread::sleep(Duration::from_millis(100)),
}
};
// Due to the design of this collector, we don't do anything until the end of the test.
let end = loop {
match ctrl.pop() {
Some(TestPhase::Start(_)) => {
// Invalid state.
return Err(Error::InvalidState);
}
Some(TestPhase::End(end)) => {
break end;
}
Some(TestPhase::StopNow) => {
// We have been told to stop immediately.
return Ok(());
}
None => thread::sleep(Duration::from_millis(100)),
}
};
let mut count: usize = 0;
let mut optimes = Vec::new();
// We will drain this now.
while let Some(event_record) = stats_queue.pop() {
if event_record.start < start || event_record.start > end {
// Skip event, outside of the test time window
continue;
}
count += 1;
optimes.push(event_record.duration.as_secs_f64());
}
info!("Received {} events", count);
let distrib: Normal<f64> = Normal::from_data(&optimes);
let sd = distrib.variance().sqrt();
info!("mean: {} seconds", distrib.mean());
info!("variance: {}", distrib.variance());
info!("SD: {} seconds", sd);
info!("95%: {}", distrib.mean() + (2.0 * sd));
debug!("Ended statistics collector");
Ok(())
}
}