diff --git a/examples/unixd b/examples/unixd index f9f638ce2..4058963cd 100644 --- a/examples/unixd +++ b/examples/unixd @@ -140,3 +140,19 @@ version = '2' pam_allowed_login_groups = ["posix_group"] +# Allow extension (mapping) of a local system groups members with members from a +# kanidm provided group. An example of this is that the local group +# `libvirt` can has it's membership extended with the members from +# `virt-admins`. This section can be repeated many times. +# +# Default: empty set (no group maps) + +# [[kanidm.map_group]] +# local = "libvirt" +# with = "virt-admins" + +# [[kanidm.map_group]] +# local = "admins" +# with = "system-admins" + + diff --git a/unix_integration/resolver/src/idprovider/interface.rs b/unix_integration/resolver/src/idprovider/interface.rs index 8b8e6120a..9fb32ae12 100644 --- a/unix_integration/resolver/src/idprovider/interface.rs +++ b/unix_integration/resolver/src/idprovider/interface.rs @@ -215,6 +215,10 @@ pub trait IdProvider { /// Force this provider offline immediately. async fn mark_offline(&self); + /// Determine if this provider has a configured extension of a local system group + /// with remote members. + fn has_map_group(&self, local: &str) -> Option<&Id>; + /// This is similar to a "domain join" process. What do we actually need to pass here /// for this to work for kanidm or himmelblau? Should we make it take a generic? /* diff --git a/unix_integration/resolver/src/idprovider/kanidm.rs b/unix_integration/resolver/src/idprovider/kanidm.rs index ecb2a20c0..63cedb4d5 100644 --- a/unix_integration/resolver/src/idprovider/kanidm.rs +++ b/unix_integration/resolver/src/idprovider/kanidm.rs @@ -1,6 +1,7 @@ use crate::db::KeyStoreTxn; -use crate::unix_config::KanidmConfig; +use crate::unix_config::{GroupMap, KanidmConfig}; use async_trait::async_trait; +use hashbrown::HashMap; use kanidm_client::{ClientError, KanidmClient, StatusCode}; use kanidm_proto::internal::OperationError; use kanidm_proto::v1::{UnixGroupToken, UnixUserToken}; @@ -41,6 +42,9 @@ struct KanidmProviderInternal { pub struct KanidmProvider { inner: Mutex, + // Because this value doesn't change, to support fast + // lookup we store the extension map here. + map_group: HashMap, } impl KanidmProvider { @@ -91,6 +95,13 @@ impl KanidmProvider { let pam_allow_groups = config.pam_allowed_login_groups.iter().cloned().collect(); + let map_group = config + .map_group + .iter() + .cloned() + .map(|GroupMap { local, with }| (local, Id::Name(with))) + .collect(); + Ok(KanidmProvider { inner: Mutex::new(KanidmProviderInternal { state: CacheState::OfflineNextCheck(now), @@ -99,6 +110,7 @@ impl KanidmProvider { crypto_policy, pam_allow_groups, }), + map_group, }) } } @@ -279,6 +291,10 @@ impl IdProvider for KanidmProvider { inner.state = CacheState::OfflineNextCheck(now); } + fn has_map_group(&self, local: &str) -> Option<&Id> { + self.map_group.get(local) + } + async fn mark_offline(&self) { let mut inner = self.inner.lock().await; inner.state = CacheState::Offline; diff --git a/unix_integration/resolver/src/resolver.rs b/unix_integration/resolver/src/resolver.rs index c6f59550d..bd0908129 100644 --- a/unix_integration/resolver/src/resolver.rs +++ b/unix_integration/resolver/src/resolver.rs @@ -691,11 +691,22 @@ impl Resolver { pub async fn get_nssgroups(&self) -> Result, ()> { let mut r = self.system_provider.get_nssgroups().await; - // Get all the system -> extension maps. - - // For each sysgroup. - // if there is an extension. - // locate it, and resolve + extend. + // Extend all the local groups if maps exist. + for nss_group in r.iter_mut() { + for client in self.clients.iter() { + if let Some(extend_group_id) = client.has_map_group(&nss_group.name) { + let (_, token) = self.get_cached_grouptoken(extend_group_id).await?; + if let Some(token) = token { + let members = self.get_groupmembers(token.uuid).await; + nss_group.members.extend(members); + debug!( + "extended group {} with members from {}", + nss_group.name, token.name + ); + } + } + } + } let l = self.get_cached_grouptokens().await?; r.reserve(l.len()); @@ -711,8 +722,26 @@ impl Resolver { } async fn get_nssgroup(&self, grp_id: Id) -> Result, ()> { - if let Some(nss_group) = self.system_provider.get_nssgroup(&grp_id).await { + if let Some(mut nss_group) = self.system_provider.get_nssgroup(&grp_id).await { debug!("system provider satisfied request"); + + for client in self.clients.iter() { + if let Some(extend_group_id) = client.has_map_group(&nss_group.name) { + let token = self.get_grouptoken(extend_group_id.clone()).await?; + if let Some(token) = token { + let members = self.get_groupmembers(token.uuid).await; + nss_group.members.extend(members); + debug!( + "extended group {} with members from {}", + nss_group.name, token.name + ); + } + } + } + + nss_group.members.sort_unstable(); + nss_group.members.dedup(); + return Ok(Some(nss_group)); } diff --git a/unix_integration/resolver/src/unix_config.rs b/unix_integration/resolver/src/unix_config.rs index 3876804eb..1990bd633 100644 --- a/unix_integration/resolver/src/unix_config.rs +++ b/unix_integration/resolver/src/unix_config.rs @@ -58,7 +58,7 @@ struct ConfigV2 { kanidm: Option, } -#[derive(Debug, Deserialize)] +#[derive(Clone, Debug, Deserialize)] pub struct GroupMap { pub local: String, pub with: String, @@ -69,7 +69,7 @@ struct KanidmConfigV2 { conn_timeout: Option, request_timeout: Option, pam_allowed_login_groups: Option>, - extend: Vec, + map_group: Vec, } #[derive(Debug, Deserialize)] @@ -145,7 +145,7 @@ pub struct KanidmConfig { pub conn_timeout: u64, pub request_timeout: u64, pub pam_allowed_login_groups: Vec, - pub extend: Vec, + pub map_group: Vec, } impl Default for UnixdConfig { @@ -287,7 +287,7 @@ impl UnixdConfig { } fn apply_from_config_legacy(self, config: ConfigInt) -> Result { - let extend = config + let map_group = config .allow_local_account_override .iter() .map(|name| GroupMap { @@ -300,7 +300,7 @@ impl UnixdConfig { conn_timeout: config.conn_timeout.unwrap_or(DEFAULT_CONN_TIMEOUT), request_timeout: config.request_timeout.unwrap_or(DEFAULT_CONN_TIMEOUT * 2), pam_allowed_login_groups: config.pam_allowed_login_groups.unwrap_or_default(), - extend, + map_group, }); // Now map the values into our config. @@ -395,7 +395,7 @@ impl UnixdConfig { conn_timeout: kconfig.conn_timeout.unwrap_or(DEFAULT_CONN_TIMEOUT), request_timeout: kconfig.request_timeout.unwrap_or(DEFAULT_CONN_TIMEOUT * 2), pam_allowed_login_groups: kconfig.pam_allowed_login_groups.unwrap_or_default(), - extend: kconfig.extend, + map_group: kconfig.map_group, }) } else { None diff --git a/unix_integration/resolver/tests/cache_layer_test.rs b/unix_integration/resolver/tests/cache_layer_test.rs index a8034592f..b839056e7 100644 --- a/unix_integration/resolver/tests/cache_layer_test.rs +++ b/unix_integration/resolver/tests/cache_layer_test.rs @@ -131,8 +131,8 @@ async fn setup_test(fix_fn: Fixture) -> (Resolver, KanidmClient) { conn_timeout: 1, request_timeout: 1, pam_allowed_login_groups: vec!["allowed_group".to_string()], - extend: vec![GroupMap { - local: "extensible".to_string(), + map_group: vec![GroupMap { + local: "extensible_group".to_string(), with: "testgroup1".to_string(), }], }, @@ -1088,3 +1088,154 @@ async fn test_cache_group_fk_deferred() { // And check we have members in the group, since we came from a userlook up assert_eq!(gt.unwrap().members.len(), 1); } + +#[tokio::test] +/// Test group extension. Groups extension is not the same as "overriding". Extension +/// only allows the *members* of a remote group to supplement the members of the local +/// group. This prevents a remote group changing the gidnumber of the local group and +/// causing breakages. +async fn test_cache_extend_group_members() { + let (cachelayer, _adminclient) = setup_test(fixture(test_fixture)).await; + + cachelayer + .reload_system_identities( + vec![EtcUser { + name: "local_account".to_string(), + uid: 30000, + gid: 30000, + password: Default::default(), + gecos: Default::default(), + homedir: Default::default(), + shell: Default::default(), + }], + None, + vec![EtcGroup { + // This group is configured to allow extension from + // the group "testgroup1" + name: "extensible_group".to_string(), + gid: 30001, + password: Default::default(), + // We have the local account as a member, it should NOT be stomped. + members: vec!["local_account".to_string()], + }], + ) + .await; + + // Force offline. Show we have no groups. + cachelayer.mark_offline().await; + let gt = cachelayer + .get_nssgroup_name("testgroup1") + .await + .expect("Failed to get from cache"); + assert!(gt.is_none()); + + // While offline, extensible_group has only local_account as a member. + let gt = cachelayer + .get_nssgroup_name("extensible_group") + .await + .expect("Failed to get from cache"); + + let gt = gt.unwrap(); + assert_eq!(gt.gid, 30001); + assert_eq!(gt.members.as_slice(), &["local_account".to_string()]); + + // Go online. Group now exists, extensible_group has group members. + // Need to resolve test-account first so that the membership is linked. + cachelayer.mark_next_check_now(SystemTime::now()).await; + assert!(cachelayer.test_connection().await); + + let ut = cachelayer + .get_nssaccount_name("testaccount1") + .await + .expect("Failed to get from cache"); + assert!(ut.is_some()); + + let gt = cachelayer + .get_nssgroup_name("testgroup1") + .await + .expect("Failed to get from cache"); + + let gt = gt.unwrap(); + assert_eq!(gt.gid, 20001); + assert_eq!( + gt.members.as_slice(), + &["testaccount1@idm.example.com".to_string()] + ); + + let gt = cachelayer + .get_nssgroup_name("extensible_group") + .await + .expect("Failed to get from cache"); + + let gt = gt.unwrap(); + // Even though it's extended, still needs to be the local uid/gid + assert_eq!(gt.gid, 30001); + assert_eq!( + gt.members.as_slice(), + &[ + "local_account".to_string(), + "testaccount1@idm.example.com".to_string() + ] + ); + + let groups = cachelayer + .get_nssgroups() + .await + .expect("Failed to get from cache"); + + assert!(groups.iter().any(|group| { + group.name == "extensible_group" + && group.members.as_slice() + == &[ + "local_account".to_string(), + "testaccount1@idm.example.com".to_string(), + ] + })); + + // Go offline. Group cached, extensible_group has members. + cachelayer.mark_offline().await; + + let gt = cachelayer + .get_nssgroup_name("testgroup1") + .await + .expect("Failed to get from cache"); + + let gt = gt.unwrap(); + assert_eq!(gt.gid, 20001); + assert_eq!( + gt.members.as_slice(), + &["testaccount1@idm.example.com".to_string()] + ); + + let gt = cachelayer + .get_nssgroup_name("extensible_group") + .await + .expect("Failed to get from cache"); + + let gt = gt.unwrap(); + // Even though it's extended, still needs to be the local uid/gid + assert_eq!(gt.gid, 30001); + assert_eq!( + gt.members.as_slice(), + &[ + "local_account".to_string(), + "testaccount1@idm.example.com".to_string() + ] + ); + + // clear cache + cachelayer + .clear_cache() + .await + .expect("failed to clear cache"); + + // No longer has testaccount. + let gt = cachelayer + .get_nssgroup_name("extensible_group") + .await + .expect("Failed to get from cache"); + + let gt = gt.unwrap(); + assert_eq!(gt.gid, 30001); + assert_eq!(gt.members.as_slice(), &["local_account".to_string()]); +}