101 idlcache (#224)

Fixes #101, concurrent caching of IDL and Entries. This yields a 10% improvement for test case execution, and 35% for tests run under --release mode. A lot of code around the code base was needed to be touched due to the extra need for mut in some operations and some lifetimes, but the majority of the work was in idl_arc_sqlite.rs, which has the cache layer. There are many performance gains yet to see, but most of those will come through improvement of the concread ARC and it's related BTree implementation.
This commit is contained in:
Firstyear 2020-05-11 21:12:32 +10:00 committed by GitHub
parent d5c46545e3
commit 15bc8d4120
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
49 changed files with 1627 additions and 1009 deletions

1209
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,4 @@
use crate::{ClientError, KanidmClientBuilder, APPLICATION_JSON};
use reqwest;
use reqwest::header::CONTENT_TYPE;
use serde::de::DeserializeOwned;
use serde::Serialize;

View file

@ -4,7 +4,6 @@
#[macro_use]
extern crate log;
use reqwest;
use reqwest::header::CONTENT_TYPE;
use serde::de::DeserializeOwned;
use serde::Serialize;
@ -14,7 +13,6 @@ use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::time::Duration;
use toml;
use uuid::Uuid;
use kanidm_proto::v1::{
@ -24,13 +22,12 @@ use kanidm_proto::v1::{
SetCredentialRequest, SetCredentialResponse, SingleStringRequest, TOTPSecret, UnixGroupToken,
UnixUserToken, UserAuthToken, WhoamiResponse,
};
use serde_json;
pub mod asynchronous;
use crate::asynchronous::KanidmAsyncClient;
pub const APPLICATION_JSON: &'static str = "application/json";
pub const APPLICATION_JSON: &str = "application/json";
#[derive(Debug)]
pub enum ClientError {
@ -570,7 +567,7 @@ impl KanidmClient {
gidnumber: Option<u32>,
) -> Result<(), ClientError> {
let gx = GroupUnixExtend {
gidnumber: gidnumber,
gidnumber,
};
self.perform_post_request(format!("/v1/group/{}/_unix", id).as_str(), gx)
}
@ -733,7 +730,7 @@ impl KanidmClient {
) -> Result<(), ClientError> {
let ux = AccountUnixExtend {
shell: shell.map(|s| s.to_string()),
gidnumber: gidnumber,
gidnumber,
};
self.perform_post_request(format!("/v1/account/{}/_unix", id).as_str(), ux)
}

View file

@ -87,7 +87,7 @@ fn is_attr_writable(rsclient: &KanidmClient, id: &str, attr: &str) -> Option<boo
id,
"k1",
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAeGW1P6Pc2rPq0XqbRaDKBcXZUPRklo0\
L1EyR30CwoP william@amethyst",
L1EyR30CwoP william@amethyst",
)
.is_ok(),
),

View file

@ -1,4 +1,3 @@
use base32;
use std::collections::BTreeMap;
use std::fmt;
use uuid::Uuid;
@ -405,14 +404,13 @@ pub enum TOTPAlgo {
Sha512,
}
impl TOTPAlgo {
pub fn to_string(&self) -> String {
impl fmt::Display for TOTPAlgo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
TOTPAlgo::Sha1 => "SHA1",
TOTPAlgo::Sha256 => "SHA256",
TOTPAlgo::Sha512 => "SHA512",
TOTPAlgo::Sha1 => write!(f, "SHA1"),
TOTPAlgo::Sha256 => write!(f, "SHA256"),
TOTPAlgo::Sha512 => write!(f, "SHA512"),
}
.to_string()
}
}

View file

@ -8,9 +8,7 @@ use kanidm_proto::v1::Modify;
use log::{debug, error, info};
use rayon::prelude::*;
use serde_json;
use structopt::StructOpt;
use zxcvbn;
#[derive(Debug, StructOpt)]
struct ClientOpt {

View file

@ -156,7 +156,7 @@ impl AccountOpt {
}
}
pub fn exec(&self) -> () {
pub fn exec(&self) {
match self {
// id/cred/primary/set
AccountOpt::Credential(acopt) => match acopt {

View file

@ -1,5 +1,4 @@
use kanidm_client::{KanidmClient, KanidmClientBuilder};
use shellexpand;
use std::path::PathBuf;
use structopt::StructOpt;

View file

@ -66,7 +66,7 @@ impl GroupOpt {
}
}
pub fn exec(&self) -> () {
pub fn exec(&self) {
match self {
GroupOpt::List(copt) => {
let client = copt.to_client();

View file

@ -32,7 +32,7 @@ impl SelfOpt {
}
}
pub fn exec(&self) -> () {
pub fn exec(&self) {
match self {
SelfOpt::Whoami(copt) => {
let client = copt.to_client();
@ -91,7 +91,7 @@ impl ClientOpt {
}
}
pub fn exec(&self) -> () {
pub fn exec(&self) {
match self {
ClientOpt::Raw(ropt) => ropt.exec(),
ClientOpt::CSelf(csopt) => csopt.exec(),

View file

@ -67,7 +67,7 @@ impl RawOpt {
}
}
pub fn exec(&self) -> () {
pub fn exec(&self) {
match self {
RawOpt::Search(sopt) => {
let client = sopt.commonopts.to_client();

View file

@ -23,7 +23,7 @@ impl RecycleOpt {
}
}
pub fn exec(&self) -> () {
pub fn exec(&self) {
match self {
RecycleOpt::List(copt) => {
let client = copt.to_client();

View file

@ -3,7 +3,6 @@ use std::path::PathBuf;
use kanidm_client::KanidmClientBuilder;
use log::debug;
use shellexpand;
use structopt::StructOpt;
#[derive(Debug, StructOpt)]

View file

@ -41,7 +41,6 @@ chrono = "0.4"
cookie = "0.13"
regex = "1"
lazy_static = "1.2.0"
# lru = "0.1"
tokio = "0.2"
futures = "0.3"
@ -60,6 +59,7 @@ structopt = { version = "0.3", default-features = false }
time = "0.1"
concread = "0.1"
# concread = { path = "../../concread" }
openssl = "0.10"
sshkeys = "0.1"
@ -68,7 +68,7 @@ rpassword = "4.0"
num_cpus = "1.10"
idlset = { version = "0.1" , features = ["use_smallvec"] }
# idlset = { version = "0.1" }
# idlset = { path = "../../idlset", features = ["use_smallvec"] }
zxcvbn = "2.0"
base64 = "0.12"

View file

@ -52,7 +52,7 @@ pub struct AccessControlSearch {
impl AccessControlSearch {
pub fn try_from(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
value: &Entry<EntrySealed, EntryCommitted>,
) -> Result<Self, OperationError> {
if !value.attribute_value_pres("class", &CLASS_ACS) {
@ -102,7 +102,7 @@ pub struct AccessControlDelete {
impl AccessControlDelete {
pub fn try_from(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
value: &Entry<EntrySealed, EntryCommitted>,
) -> Result<Self, OperationError> {
if !value.attribute_value_pres("class", &CLASS_ACD) {
@ -145,7 +145,7 @@ pub struct AccessControlCreate {
impl AccessControlCreate {
pub fn try_from(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
value: &Entry<EntrySealed, EntryCommitted>,
) -> Result<Self, OperationError> {
if !value.attribute_value_pres("class", &CLASS_ACC) {
@ -203,7 +203,7 @@ pub struct AccessControlModify {
impl AccessControlModify {
pub fn try_from(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
value: &Entry<EntrySealed, EntryCommitted>,
) -> Result<Self, OperationError> {
if !value.attribute_value_pres("class", &CLASS_ACM) {
@ -271,7 +271,7 @@ struct AccessControlProfile {
impl AccessControlProfile {
fn try_from(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
value: &Entry<EntrySealed, EntryCommitted>,
) -> Result<Self, OperationError> {
// Assert we have class access_control_profile
@ -1308,11 +1308,11 @@ mod tests {
// really protects us *a lot* here, but it's nice to have defence and
// layers of validation.
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1327,7 +1327,7 @@ mod tests {
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1342,7 +1342,7 @@ mod tests {
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1360,7 +1360,7 @@ mod tests {
// "\"Self\""
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1384,11 +1384,11 @@ mod tests {
#[test]
fn test_access_acp_delete_parser() {
run_test!(|qs: &QueryServer, audit: &mut AuditScope| {
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1409,7 +1409,7 @@ mod tests {
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1434,12 +1434,12 @@ mod tests {
fn test_access_acp_search_parser() {
run_test!(|qs: &QueryServer, audit: &mut AuditScope| {
// Test that parsing search access controls works.
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
// Missing class acp
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1462,7 +1462,7 @@ mod tests {
// Missing class acs
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1485,7 +1485,7 @@ mod tests {
// Missing attr acp_search_attr
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1507,7 +1507,7 @@ mod tests {
// All good!
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1533,11 +1533,11 @@ mod tests {
fn test_access_acp_modify_parser() {
run_test!(|qs: &QueryServer, audit: &mut AuditScope| {
// Test that parsing modify access controls works.
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1561,7 +1561,7 @@ mod tests {
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1582,7 +1582,7 @@ mod tests {
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1610,11 +1610,11 @@ mod tests {
fn test_access_acp_create_parser() {
run_test!(|qs: &QueryServer, audit: &mut AuditScope| {
// Test that parsing create access controls works.
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
acp_from_entry_err!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1637,7 +1637,7 @@ mod tests {
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1658,7 +1658,7 @@ mod tests {
acp_from_entry_ok!(
audit,
&qs_write,
&mut qs_write,
r#"{
"valid": null,
"state": null,
@ -1688,7 +1688,7 @@ mod tests {
// given a single &str, we can evaluate all types from a single record.
// This is valid, and could exist, IE a rule to allow create, search and modify
// over a single scope.
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
let e: &str = r#"{
"valid": null,
@ -1719,10 +1719,10 @@ mod tests {
}
}"#;
acp_from_entry_ok!(audit, &qs_write, e, AccessControlCreate);
acp_from_entry_ok!(audit, &qs_write, e, AccessControlDelete);
acp_from_entry_ok!(audit, &qs_write, e, AccessControlModify);
acp_from_entry_ok!(audit, &qs_write, e, AccessControlSearch);
acp_from_entry_ok!(audit, &mut qs_write, e, AccessControlCreate);
acp_from_entry_ok!(audit, &mut qs_write, e, AccessControlDelete);
acp_from_entry_ok!(audit, &mut qs_write, e, AccessControlModify);
acp_from_entry_ok!(audit, &mut qs_write, e, AccessControlSearch);
})
}

View file

@ -207,10 +207,10 @@ impl Handler<SearchMessage> for QueryServerReadV1 {
let mut audit = AuditScope::new("search");
let res = audit_segment!(&mut audit, || {
// Begin a read
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
// Make an event from the request
let srch = match SearchEvent::from_message(&mut audit, msg, &qs_read) {
let srch = match SearchEvent::from_message(&mut audit, msg, &mut qs_read) {
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin search: {:?}", e);
@ -221,9 +221,8 @@ impl Handler<SearchMessage> for QueryServerReadV1 {
audit_log!(audit, "Begin event {:?}", srch);
match qs_read.search_ext(&mut audit, &srch) {
Ok(entries) => {
SearchResult::new(&mut audit, &qs_read, entries).map(|ok_sr| ok_sr.response())
}
Ok(entries) => SearchResult::new(&mut audit, &mut qs_read, entries)
.map(|ok_sr| ok_sr.response()),
Err(e) => Err(e),
}
});
@ -286,7 +285,7 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
let res = audit_segment!(&mut audit, || {
// TODO #62: Move this to IdmServer!!!
// Begin a read
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
// Make an event from the whoami request. This will process the event and
// generate a selfuuid search.
@ -297,7 +296,7 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
// this far.
let uat = msg.uat.clone().ok_or(OperationError::NotAuthenticated)?;
let srch = match SearchEvent::from_whoami_request(&mut audit, msg.uat, &qs_read) {
let srch = match SearchEvent::from_whoami_request(&mut audit, msg.uat, &mut qs_read) {
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin whoami: {:?}", e);
@ -315,7 +314,7 @@ impl Handler<WhoamiMessage> for QueryServerReadV1 {
1 => {
let e = entries.pop().expect("Entry length mismatch!!!");
// Now convert to a response, and return
WhoamiResult::new(&mut audit, &qs_read, e, uat)
WhoamiResult::new(&mut audit, &mut qs_read, e, uat)
.map(|ok_wr| ok_wr.response())
}
// Somehow we matched multiple, which should be impossible.
@ -339,10 +338,10 @@ impl Handler<InternalSearchMessage> for QueryServerReadV1 {
fn handle(&mut self, msg: InternalSearchMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_search_message");
let res = audit_segment!(&mut audit, || {
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
// Make an event from the request
let srch = match SearchEvent::from_internal_message(&mut audit, msg, &qs_read) {
let srch = match SearchEvent::from_internal_message(&mut audit, msg, &mut qs_read) {
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin search: {:?}", e);
@ -353,7 +352,7 @@ impl Handler<InternalSearchMessage> for QueryServerReadV1 {
audit_log!(audit, "Begin event {:?}", srch);
match qs_read.search_ext(&mut audit, &srch) {
Ok(entries) => SearchResult::new(&mut audit, &qs_read, entries)
Ok(entries) => SearchResult::new(&mut audit, &mut qs_read, entries)
.map(|ok_sr| ok_sr.into_proto_array()),
Err(e) => Err(e),
}
@ -373,21 +372,22 @@ impl Handler<InternalSearchRecycledMessage> for QueryServerReadV1 {
) -> Self::Result {
let mut audit = AuditScope::new("internal_search_recycle_message");
let res = audit_segment!(&mut audit, || {
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
// Make an event from the request
let srch = match SearchEvent::from_internal_recycle_message(&mut audit, msg, &qs_read) {
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin recycled search: {:?}", e);
return Err(e);
}
};
let srch =
match SearchEvent::from_internal_recycle_message(&mut audit, msg, &mut qs_read) {
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin recycled search: {:?}", e);
return Err(e);
}
};
audit_log!(audit, "Begin event {:?}", srch);
match qs_read.search_ext(&mut audit, &srch) {
Ok(entries) => SearchResult::new(&mut audit, &qs_read, entries)
Ok(entries) => SearchResult::new(&mut audit, &mut qs_read, entries)
.map(|ok_sr| ok_sr.into_proto_array()),
Err(e) => Err(e),
}
@ -403,7 +403,7 @@ impl Handler<InternalRadiusReadMessage> for QueryServerReadV1 {
fn handle(&mut self, msg: InternalRadiusReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_radius_read_message");
let res = audit_segment!(&mut audit, || {
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
let target_uuid = match Uuid::parse_str(msg.uuid_or_name.as_str()) {
Ok(u) => u,
@ -420,7 +420,7 @@ impl Handler<InternalRadiusReadMessage> for QueryServerReadV1 {
&mut audit,
msg.uat,
target_uuid,
&qs_read,
&mut qs_read,
) {
Ok(s) => s,
Err(e) => {
@ -461,7 +461,7 @@ impl Handler<InternalRadiusTokenReadMessage> for QueryServerReadV1 {
) -> Self::Result {
let mut audit = AuditScope::new("internal_radius_token_read_message");
let res = audit_segment!(&mut audit, || {
let idm_read = self.idms.proxy_read();
let mut idm_read = self.idms.proxy_read();
let target_uuid = match Uuid::parse_str(msg.uuid_or_name.as_str()) {
Ok(u) => u,
@ -477,7 +477,7 @@ impl Handler<InternalRadiusTokenReadMessage> for QueryServerReadV1 {
// Make an event from the request
let rate = match RadiusAuthTokenEvent::from_parts(
&mut audit,
&idm_read.qs_read,
&mut idm_read.qs_read,
msg.uat,
target_uuid,
) {
@ -507,7 +507,7 @@ impl Handler<InternalUnixUserTokenReadMessage> for QueryServerReadV1 {
) -> Self::Result {
let mut audit = AuditScope::new("internal_unix_token_read_message");
let res = audit_segment!(&mut audit, || {
let idm_read = self.idms.proxy_read();
let mut idm_read = self.idms.proxy_read();
let target_uuid = Uuid::parse_str(msg.uuid_or_name.as_str()).or_else(|_| {
idm_read
@ -522,7 +522,7 @@ impl Handler<InternalUnixUserTokenReadMessage> for QueryServerReadV1 {
// Make an event from the request
let rate = match UnixUserTokenEvent::from_parts(
&mut audit,
&idm_read.qs_read,
&mut idm_read.qs_read,
msg.uat,
target_uuid,
) {
@ -552,7 +552,7 @@ impl Handler<InternalUnixGroupTokenReadMessage> for QueryServerReadV1 {
) -> Self::Result {
let mut audit = AuditScope::new("internal_unixgroup_token_read_message");
let res = audit_segment!(&mut audit, || {
let idm_read = self.idms.proxy_read();
let mut idm_read = self.idms.proxy_read();
let target_uuid = Uuid::parse_str(msg.uuid_or_name.as_str()).or_else(|_| {
idm_read
@ -567,7 +567,7 @@ impl Handler<InternalUnixGroupTokenReadMessage> for QueryServerReadV1 {
// Make an event from the request
let rate = match UnixGroupTokenEvent::from_parts(
&mut audit,
&idm_read.qs_read,
&mut idm_read.qs_read,
msg.uat,
target_uuid,
) {
@ -593,7 +593,7 @@ impl Handler<InternalSshKeyReadMessage> for QueryServerReadV1 {
fn handle(&mut self, msg: InternalSshKeyReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_sshkey_read_message");
let res = audit_segment!(&mut audit, || {
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
let target_uuid = match Uuid::parse_str(msg.uuid_or_name.as_str()) {
Ok(u) => u,
@ -610,7 +610,7 @@ impl Handler<InternalSshKeyReadMessage> for QueryServerReadV1 {
&mut audit,
msg.uat,
target_uuid,
&qs_read,
&mut qs_read,
) {
Ok(s) => s,
Err(e) => {
@ -650,7 +650,7 @@ impl Handler<InternalSshKeyTagReadMessage> for QueryServerReadV1 {
fn handle(&mut self, msg: InternalSshKeyTagReadMessage, _: &mut Self::Context) -> Self::Result {
let mut audit = AuditScope::new("internal_sshkey_tag_read_message");
let res = audit_segment!(&mut audit, || {
let qs_read = self.qs.read();
let mut qs_read = self.qs.read();
let InternalSshKeyTagReadMessage {
uat,
@ -669,15 +669,18 @@ impl Handler<InternalSshKeyTagReadMessage> for QueryServerReadV1 {
};
// Make an event from the request
let srch =
match SearchEvent::from_target_uuid_request(&mut audit, uat, target_uuid, &qs_read)
{
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin search: {:?}", e);
return Err(e);
}
};
let srch = match SearchEvent::from_target_uuid_request(
&mut audit,
uat,
target_uuid,
&mut qs_read,
) {
Ok(s) => s,
Err(e) => {
audit_log!(audit, "Failed to begin search: {:?}", e);
return Err(e);
}
};
audit_log!(audit, "Begin event {:?}", srch);
@ -731,7 +734,7 @@ impl Handler<IdmAccountUnixAuthMessage> for QueryServerReadV1 {
// Make an event from the request
let uuae = match UnixUserAuthEvent::from_parts(
&mut audit,
&idm_write.qs_read,
&mut idm_write.qs_read,
msg.uat,
target_uuid,
msg.cred,

View file

@ -323,7 +323,8 @@ impl QueryServerWriteV1 {
};
let mdf =
match ModifyEvent::from_parts(audit, uat, target_uuid, proto_ml, filter, &qs_write) {
match ModifyEvent::from_parts(audit, uat, target_uuid, proto_ml, filter, &mut qs_write)
{
Ok(m) => m,
Err(e) => {
audit_log!(audit, "Failed to begin modify: {:?}", e);
@ -364,7 +365,7 @@ impl QueryServerWriteV1 {
target_uuid,
ml,
filter,
&qs_write,
&mut qs_write,
) {
Ok(m) => m,
Err(e) => {
@ -389,7 +390,7 @@ impl Handler<CreateMessage> for QueryServerWriteV1 {
let res = audit_segment!(&mut audit, || {
let mut qs_write = self.qs.write(duration_from_epoch_now());
let crt = match CreateEvent::from_message(&mut audit, msg, &qs_write) {
let crt = match CreateEvent::from_message(&mut audit, msg, &mut qs_write) {
Ok(c) => c,
Err(e) => {
audit_log!(audit, "Failed to begin create: {:?}", e);
@ -416,7 +417,7 @@ impl Handler<ModifyMessage> for QueryServerWriteV1 {
let mut audit = AuditScope::new("modify");
let res = audit_segment!(&mut audit, || {
let mut qs_write = self.qs.write(duration_from_epoch_now());
let mdf = match ModifyEvent::from_message(&mut audit, msg, &qs_write) {
let mdf = match ModifyEvent::from_message(&mut audit, msg, &mut qs_write) {
Ok(m) => m,
Err(e) => {
audit_log!(audit, "Failed to begin modify: {:?}", e);
@ -443,7 +444,7 @@ impl Handler<DeleteMessage> for QueryServerWriteV1 {
let res = audit_segment!(&mut audit, || {
let mut qs_write = self.qs.write(duration_from_epoch_now());
let del = match DeleteEvent::from_message(&mut audit, msg, &qs_write) {
let del = match DeleteEvent::from_message(&mut audit, msg, &mut qs_write) {
Ok(d) => d,
Err(e) => {
audit_log!(audit, "Failed to begin delete: {:?}", e);
@ -470,7 +471,8 @@ impl Handler<InternalDeleteMessage> for QueryServerWriteV1 {
let res = audit_segment!(&mut audit, || {
let mut qs_write = self.qs.write(duration_from_epoch_now());
let del = match DeleteEvent::from_parts(&mut audit, msg.uat, msg.filter, &qs_write) {
let del = match DeleteEvent::from_parts(&mut audit, msg.uat, msg.filter, &mut qs_write)
{
Ok(d) => d,
Err(e) => {
audit_log!(audit, "Failed to begin delete: {:?}", e);
@ -497,14 +499,18 @@ impl Handler<ReviveRecycledMessage> for QueryServerWriteV1 {
let res = audit_segment!(&mut audit, || {
let mut qs_write = self.qs.write(duration_from_epoch_now());
let rev =
match ReviveRecycledEvent::from_parts(&mut audit, msg.uat, msg.filter, &qs_write) {
Ok(r) => r,
Err(e) => {
audit_log!(audit, "Failed to begin revive: {:?}", e);
return Err(e);
}
};
let rev = match ReviveRecycledEvent::from_parts(
&mut audit,
msg.uat,
msg.filter,
&mut qs_write,
) {
Ok(r) => r,
Err(e) => {
audit_log!(audit, "Failed to begin revive: {:?}", e);
return Err(e);
}
};
audit_log!(audit, "Begin revive event {:?}", rev);
@ -552,7 +558,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
SetCredentialRequest::Password(cleartext) => {
let pce = PasswordChangeEvent::from_parts(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg.uat,
target_uuid,
cleartext,
@ -574,7 +580,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
SetCredentialRequest::GeneratePassword => {
let gpe = GeneratePasswordEvent::from_parts(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg.uat,
target_uuid,
msg.appid,
@ -595,7 +601,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
SetCredentialRequest::TOTPGenerate(label) => {
let gte = GenerateTOTPEvent::from_parts(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg.uat,
target_uuid,
label,
@ -615,7 +621,7 @@ impl Handler<InternalCredentialSetMessage> for QueryServerWriteV1 {
SetCredentialRequest::TOTPVerify(uuid, chal) => {
let vte = VerifyTOTPEvent::from_parts(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg.uat,
target_uuid,
uuid,
@ -652,7 +658,7 @@ impl Handler<IdmAccountSetPasswordMessage> for QueryServerWriteV1 {
let pce = PasswordChangeEvent::from_idm_account_set_password(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg,
)
.map_err(|e| {
@ -697,7 +703,7 @@ impl Handler<InternalRegenerateRadiusMessage> for QueryServerWriteV1 {
let rrse = RegenerateRadiusSecretEvent::from_parts(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg.uat,
target_uuid,
)
@ -742,7 +748,7 @@ impl Handler<PurgeAttributeMessage> for QueryServerWriteV1 {
target_uuid,
msg.attr,
msg.filter,
&qs_write,
&mut qs_write,
) {
Ok(m) => m,
Err(e) => {
@ -788,7 +794,7 @@ impl Handler<RemoveAttributeValueMessage> for QueryServerWriteV1 {
target_uuid,
proto_ml,
msg.filter,
&qs_write,
&mut qs_write,
) {
Ok(m) => m,
Err(e) => {
@ -1021,7 +1027,7 @@ impl Handler<IdmAccountUnixSetCredMessage> for QueryServerWriteV1 {
let upce = UnixPasswordChangeEvent::from_parts(
&mut audit,
&idms_prox_write.qs_write,
&mut idms_prox_write.qs_write,
msg.uat,
target_uuid,
msg.cred,
@ -1049,7 +1055,7 @@ impl Handler<PurgeTombstoneEvent> for QueryServerWriteV1 {
let mut audit = AuditScope::new("purge tombstones");
audit_segment!(&mut audit, || {
audit_log!(audit, "Begin purge tombstone event {:?}", msg);
let qs_write = self.qs.write(duration_from_epoch_now());
let mut qs_write = self.qs.write(duration_from_epoch_now());
let res = qs_write
.purge_tombstones(&mut audit)
@ -1069,7 +1075,7 @@ impl Handler<PurgeRecycledEvent> for QueryServerWriteV1 {
let mut audit = AuditScope::new("purge recycled");
audit_segment!(&mut audit, || {
audit_log!(audit, "Begin purge recycled event {:?}", msg);
let qs_write = self.qs.write(duration_from_epoch_now());
let mut qs_write = self.qs.write(duration_from_epoch_now());
let res = qs_write
.purge_recycled(&mut audit)

View file

@ -5,7 +5,6 @@ use std::time::SystemTime;
use chrono::offset::Utc;
use chrono::DateTime;
use serde_json;
#[macro_export]
macro_rules! audit_log {

View file

@ -0,0 +1,484 @@
use crate::audit::AuditScope;
use crate::be::idl_sqlite::{
IdlSqlite, IdlSqliteReadTransaction, IdlSqliteTransaction, IdlSqliteWriteTransaction,
};
use crate::be::{IdRawEntry, IDL};
use crate::entry::{Entry, EntryCommitted, EntrySealed};
use crate::value::IndexType;
use concread::cache::arc::{Arc, ArcReadTxn, ArcWriteTxn};
use idlset::IDLBitRange;
use kanidm_proto::v1::{ConsistencyError, OperationError};
use uuid::Uuid;
// use std::borrow::Borrow;
const DEFAULT_CACHE_TARGET: usize = 1024;
const DEFAULT_IDL_CACHE_RATIO: usize = 16;
const DEFAULT_CACHE_RMISS: usize = 8;
const DEFAULT_CACHE_WMISS: usize = 8;
#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
struct IdlCacheKey {
a: String,
i: IndexType,
k: String,
}
/*
impl Borrow<(&str, &IndexType, &str)> for IdlCacheKey {
#[inline]
fn borrow(&self) -> &(&str, &IndexType, &str) {
&(self.a.as_str(), &self.i, self.k.as_str())
}
}
impl From<(&str, &IndexType, &str)> for IdlCacheKey {
fn from((a, i, k): (&str, &IndexType, &str)) -> IdlCacheKey {
IdlCacheKey {
a: a.to_string(), i: (*i).clone(), k: k.to_string()
}
}
}
*/
pub struct IdlArcSqlite {
db: IdlSqlite,
entry_cache: Arc<u64, Box<Entry<EntrySealed, EntryCommitted>>>,
idl_cache: Arc<IdlCacheKey, Box<IDLBitRange>>,
}
pub struct IdlArcSqliteReadTransaction<'a> {
db: IdlSqliteReadTransaction,
entry_cache: ArcReadTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>,
idl_cache: ArcReadTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
}
pub struct IdlArcSqliteWriteTransaction<'a> {
db: IdlSqliteWriteTransaction,
entry_cache: ArcWriteTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>,
idl_cache: ArcWriteTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
}
macro_rules! get_identry {
(
$self:expr,
$au:expr,
$idl:expr
) => {{
match $idl {
IDL::Partial(idli) | IDL::Indexed(idli) => {
let mut result: Vec<Entry<_, _>> = Vec::new();
let mut nidl = IDLBitRange::new();
idli.into_iter().for_each(|i| {
// For all the id's in idl.
// is it in the cache?
match $self.entry_cache.get(&i) {
Some(eref) => result.push(eref.as_ref().clone()),
None => unsafe { nidl.push_id(i) },
}
});
// Now, get anything from nidl that is needed.
let mut db_result = $self.db.get_identry($au, &IDL::Partial(nidl))?;
// Clone everything from db_result into the cache.
db_result.iter().for_each(|e| {
$self.entry_cache.insert(e.get_id(), Box::new(e.clone()));
});
// Merge the two vecs
result.append(&mut db_result);
// Return
Ok(result)
}
IDL::ALLIDS => $self.db.get_identry($au, $idl),
}
}};
}
macro_rules! get_identry_raw {
(
$self:expr,
$au:expr,
$idl:expr
) => {{
// As a cache we have no concept of this, so we just bypass to the db.
$self.db.get_identry_raw($au, $idl)
}};
}
macro_rules! exists_idx {
(
$self:expr,
$audit:expr,
$attr:expr,
$itype:expr
) => {{
// As a cache we have no concept of this, so we just bypass to the db.
$self.db.exists_idx($audit, $attr, $itype)
}};
}
macro_rules! get_idl {
(
$self:expr,
$audit:expr,
$attr:expr,
$itype:expr,
$idx_key:expr
) => {{
// TODO: Find a way to implement borrow for this properly
// First attempt to get from this cache.
let cache_key = IdlCacheKey {
a: $attr.to_string(),
i: $itype.clone(),
k: $idx_key.to_string(),
};
let cache_r = $self.idl_cache.get(&cache_key);
// If hit, continue.
if let Some(ref data) = cache_r {
return Ok(Some(data.as_ref().clone()));
}
// If miss, get from db *and* insert to the cache.
let db_r = $self.db.get_idl($audit, $attr, $itype, $idx_key)?;
if let Some(ref idl) = db_r {
$self.idl_cache.insert(cache_key, Box::new(idl.clone()))
}
Ok(db_r)
}};
}
pub trait IdlArcSqliteTransaction {
fn get_identry(
&mut self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError>;
fn get_identry_raw(
&self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<IdRawEntry>, OperationError>;
fn exists_idx(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
) -> Result<bool, OperationError>;
fn get_idl(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
idx_key: &str,
) -> Result<Option<IDLBitRange>, OperationError>;
fn get_db_s_uuid(&self) -> Result<Option<Uuid>, OperationError>;
fn get_db_d_uuid(&self) -> Result<Option<Uuid>, OperationError>;
fn verify(&self) -> Vec<Result<(), ConsistencyError>>;
}
impl<'a> IdlArcSqliteTransaction for IdlArcSqliteReadTransaction<'a> {
fn get_identry(
&mut self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
get_identry!(self, au, idl)
}
fn get_identry_raw(
&self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<IdRawEntry>, OperationError> {
get_identry_raw!(self, au, idl)
}
fn exists_idx(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
) -> Result<bool, OperationError> {
exists_idx!(self, audit, attr, itype)
}
fn get_idl(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
idx_key: &str,
) -> Result<Option<IDLBitRange>, OperationError> {
get_idl!(self, audit, attr, itype, idx_key)
}
fn get_db_s_uuid(&self) -> Result<Option<Uuid>, OperationError> {
self.db.get_db_s_uuid()
}
fn get_db_d_uuid(&self) -> Result<Option<Uuid>, OperationError> {
self.db.get_db_d_uuid()
}
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
self.db.verify()
}
}
impl<'a> IdlArcSqliteTransaction for IdlArcSqliteWriteTransaction<'a> {
fn get_identry(
&mut self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
get_identry!(self, au, idl)
}
fn get_identry_raw(
&self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<IdRawEntry>, OperationError> {
get_identry_raw!(self, au, idl)
}
fn exists_idx(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
) -> Result<bool, OperationError> {
exists_idx!(self, audit, attr, itype)
}
fn get_idl(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
idx_key: &str,
) -> Result<Option<IDLBitRange>, OperationError> {
get_idl!(self, audit, attr, itype, idx_key)
}
fn get_db_s_uuid(&self) -> Result<Option<Uuid>, OperationError> {
self.db.get_db_s_uuid()
}
fn get_db_d_uuid(&self) -> Result<Option<Uuid>, OperationError> {
self.db.get_db_d_uuid()
}
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
self.db.verify()
}
}
impl<'a> IdlArcSqliteWriteTransaction<'a> {
pub fn commit(self, audit: &mut AuditScope) -> Result<(), OperationError> {
let IdlArcSqliteWriteTransaction {
db,
entry_cache,
idl_cache,
} = self;
// Undo the caches in the reverse order.
db.commit(audit).and_then(|r| {
idl_cache.commit();
entry_cache.commit();
Ok(r)
})
}
pub fn get_id2entry_max_id(&self) -> Result<u64, OperationError> {
// TODO: We could cache this too, and have this via the setup call
// to get the init value, using the ArcCell.
self.db.get_id2entry_max_id()
}
pub fn write_identries<'b, I>(
&'b mut self,
au: &mut AuditScope,
entries: I,
) -> Result<(), OperationError>
where
I: Iterator<Item = &'b Entry<EntrySealed, EntryCommitted>>,
{
// Danger! We know that the entry cache is valid to manipulate here
// but rust doesn't know that so it prevents the mut/immut borrow.
let e_cache = unsafe { &mut *(&mut self.entry_cache as *mut ArcWriteTxn<_, _>) };
let m_entries = entries.map(|e| {
e_cache.insert(e.get_id(), Box::new(e.clone()));
e
});
self.db.write_identries(au, m_entries)
}
pub fn write_identries_raw<I>(
&mut self,
au: &mut AuditScope,
entries: I,
) -> Result<(), OperationError>
where
I: Iterator<Item = IdRawEntry>,
{
// Drop the entry cache.
self.entry_cache.clear();
// Write the raw ents
self.db.write_identries_raw(au, entries)
}
pub fn delete_identry<I>(&mut self, au: &mut AuditScope, idl: I) -> Result<(), OperationError>
where
I: Iterator<Item = u64>,
{
// Danger! We know that the entry cache is valid to manipulate here
// but rust doesn't know that so it prevents the mut/immut borrow.
let e_cache = unsafe { &mut *(&mut self.entry_cache as *mut ArcWriteTxn<_, _>) };
let m_idl = idl.map(|i| {
e_cache.remove(i);
i
});
self.db.delete_identry(au, m_idl)
}
pub fn write_idl(
&mut self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
idx_key: &str,
idl: &IDLBitRange,
) -> Result<(), OperationError> {
let cache_key = IdlCacheKey {
a: attr.to_string(),
i: itype.clone(),
k: idx_key.to_string(),
};
// On idl == 0 the db will remove this, and synthesise an empty IDL on a miss
// but we can cache this as a new empty IDL instead, so that we can avoid the
// db lookup on this idl.
if idl.len() == 0 {
self.idl_cache
.insert(cache_key, Box::new(IDLBitRange::new()));
} else {
self.idl_cache.insert(cache_key, Box::new(idl.clone()));
}
self.db.write_idl(audit, attr, itype, idx_key, idl)
}
pub fn create_name2uuid(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.create_name2uuid(audit)
}
pub fn create_uuid2name(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.create_uuid2name(audit)
}
pub fn create_idx(
&self,
audit: &mut AuditScope,
attr: &str,
itype: &IndexType,
) -> Result<(), OperationError> {
// We don't need to affect this, so pass it down.
self.db.create_idx(audit, attr, itype)
}
pub fn list_idxs(&self, audit: &mut AuditScope) -> Result<Vec<String>, OperationError> {
// This is only used in tests, so bypass the cache.
self.db.list_idxs(audit)
}
pub unsafe fn purge_idxs(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.purge_idxs(audit).and_then(|r| {
self.idl_cache.clear();
Ok(r)
})
}
pub unsafe fn purge_id2entry(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.purge_id2entry(audit).and_then(|r| {
self.entry_cache.clear();
Ok(r)
})
}
pub fn write_db_s_uuid(&self, nsid: Uuid) -> Result<(), OperationError> {
self.db.write_db_s_uuid(nsid)
}
pub fn write_db_d_uuid(&self, nsid: Uuid) -> Result<(), OperationError> {
self.db.write_db_d_uuid(nsid)
}
pub(crate) fn get_db_index_version(&self) -> i64 {
self.db.get_db_index_version()
}
pub(crate) fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> {
self.db.set_db_index_version(v)
}
pub fn setup(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.setup(audit)
}
}
impl IdlArcSqlite {
pub fn new(audit: &mut AuditScope, path: &str, pool_size: u32) -> Result<Self, OperationError> {
let db = IdlSqlite::new(audit, path, pool_size)?;
let entry_cache = Arc::new(
DEFAULT_CACHE_TARGET,
pool_size as usize,
DEFAULT_CACHE_RMISS,
DEFAULT_CACHE_WMISS,
);
// The idl cache should have smaller items, and is critical for fast searches
// so we allow it to have a higher ratio of items relative to the entries.
let idl_cache = Arc::new(
DEFAULT_CACHE_TARGET * DEFAULT_IDL_CACHE_RATIO,
pool_size as usize,
DEFAULT_CACHE_RMISS,
DEFAULT_CACHE_WMISS,
);
Ok(IdlArcSqlite {
db,
entry_cache,
idl_cache,
})
}
pub fn read(&self) -> IdlArcSqliteReadTransaction {
// IMPORTANT! Always take entrycache FIRST
let entry_cache_read = self.entry_cache.read();
let idl_cache_read = self.idl_cache.read();
let db_read = self.db.read();
IdlArcSqliteReadTransaction {
db: db_read,
entry_cache: entry_cache_read,
idl_cache: idl_cache_read,
}
}
pub fn write(&self) -> IdlArcSqliteWriteTransaction {
// IMPORTANT! Always take entrycache FIRST
let entry_cache_write = self.entry_cache.write();
let idl_cache_write = self.idl_cache.write();
let db_write = self.db.write();
IdlArcSqliteWriteTransaction {
db: db_write,
entry_cache: entry_cache_write,
idl_cache: idl_cache_write,
}
}
}

View file

@ -1,5 +1,6 @@
use crate::audit::AuditScope;
use crate::be::{IdEntry, IDL};
use crate::be::{IdRawEntry, IDL};
use crate::entry::{Entry, EntryCommitted, EntrySealed};
use crate::value::IndexType;
use idlset::IDLBitRange;
use kanidm_proto::v1::{ConsistencyError, OperationError};
@ -7,7 +8,7 @@ use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::OptionalExtension;
use rusqlite::NO_PARAMS;
use std::convert::TryFrom;
use std::convert::{TryFrom, TryInto};
use uuid::Uuid;
// use uuid::Uuid;
@ -15,6 +16,46 @@ use uuid::Uuid;
const DBV_ID2ENTRY: &str = "id2entry";
const DBV_INDEXV: &str = "indexv";
#[derive(Debug)]
pub struct IdSqliteEntry {
id: i64,
data: Vec<u8>,
}
impl TryFrom<IdSqliteEntry> for IdRawEntry {
type Error = OperationError;
fn try_from(value: IdSqliteEntry) -> Result<Self, Self::Error> {
if value.id <= 0 {
return Err(OperationError::InvalidEntryID);
}
Ok(IdRawEntry {
id: value
.id
.try_into()
.map_err(|_| OperationError::InvalidEntryID)?,
data: value.data,
})
}
}
impl TryFrom<IdRawEntry> for IdSqliteEntry {
type Error = OperationError;
fn try_from(value: IdRawEntry) -> Result<Self, Self::Error> {
if value.id <= 0 {
return Err(OperationError::InvalidEntryID);
}
Ok(IdSqliteEntry {
id: value
.id
.try_into()
.map_err(|_| OperationError::InvalidEntryID)?,
data: value.data,
})
}
}
#[derive(Clone)]
pub struct IdlSqlite {
pool: Pool<SqliteConnectionManager>,
@ -33,7 +74,22 @@ pub struct IdlSqliteWriteTransaction {
pub trait IdlSqliteTransaction {
fn get_conn(&self) -> &r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
fn get_identry(&self, au: &mut AuditScope, idl: &IDL) -> Result<Vec<IdEntry>, OperationError> {
fn get_identry(
&self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
self.get_identry_raw(au, idl)?
.into_iter()
.map(|ide| ide.into_entry())
.collect()
}
fn get_identry_raw(
&self,
au: &mut AuditScope,
idl: &IDL,
) -> Result<Vec<IdRawEntry>, OperationError> {
// is the idl allids?
match idl {
IDL::ALLIDS => {
@ -45,7 +101,7 @@ pub trait IdlSqliteTransaction {
);
let id2entry_iter = try_audit!(
au,
stmt.query_map(NO_PARAMS, |row| Ok(IdEntry {
stmt.query_map(NO_PARAMS, |row| Ok(IdSqliteEntry {
id: row.get(0)?,
data: row.get(1)?,
})),
@ -58,6 +114,10 @@ pub trait IdlSqliteTransaction {
audit_log!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
.and_then(|ise| {
// Convert the idsqlite to id raw
ise.try_into()
})
})
.collect()
}
@ -78,7 +138,7 @@ pub trait IdlSqliteTransaction {
let iid = i64::try_from(id).map_err(|_| OperationError::InvalidEntryID)?;
let id2entry_iter = stmt
.query_map(&[&iid], |row| {
Ok(IdEntry {
Ok(IdSqliteEntry {
id: row.get(0)?,
data: row.get(1)?,
})
@ -94,6 +154,10 @@ pub trait IdlSqliteTransaction {
audit_log!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
.and_then(|ise| {
// Convert the idsqlite to id raw
ise.try_into()
})
})
.collect();
let mut r = r?;
@ -246,12 +310,14 @@ pub trait IdlSqliteTransaction {
})
}
#[allow(clippy::let_and_return)]
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
let mut stmt = match self.get_conn().prepare("PRAGMA integrity_check;") {
Ok(r) => r,
Err(_) => return vec![Err(ConsistencyError::SqliteIntegrityFailure)],
};
// Allow this as it actually extends the life of stmt
let r = match stmt.query(NO_PARAMS) {
Ok(mut rows) => {
match rows.next() {
@ -361,7 +427,7 @@ impl IdlSqliteWriteTransaction {
})
}
pub fn get_id2entry_max_id(&self) -> Result<i64, OperationError> {
pub fn get_id2entry_max_id(&self) -> Result<u64, OperationError> {
let mut stmt = self
.conn
.prepare("SELECT MAX(id) as id_max FROM id2entry")
@ -372,23 +438,50 @@ impl IdlSqliteWriteTransaction {
.exists(NO_PARAMS)
.map_err(|_| OperationError::SQLiteError)?;
Ok(if v {
if v {
// We have some rows, let get max!
let i: Option<i64> = stmt
.query_row(NO_PARAMS, |row| row.get(0))
.map_err(|_| OperationError::SQLiteError)?;
i.unwrap_or(0)
.try_into()
.map_err(|_| OperationError::InvalidEntryID)
} else {
// No rows are present, return a 0.
0
})
Ok(0)
}
}
pub fn write_identries(
pub fn write_identries<'b, I>(
&'b self,
au: &mut AuditScope,
entries: I,
) -> Result<(), OperationError>
where
I: Iterator<Item = &'b Entry<EntrySealed, EntryCommitted>>,
{
let raw_entries: Result<Vec<_>, _> = entries
.map(|e| {
let dbe = e.to_dbentry();
let data = serde_cbor::to_vec(&dbe).map_err(|_| OperationError::SerdeCborError)?;
Ok(IdRawEntry {
id: e.get_id(),
data,
})
})
.collect();
self.write_identries_raw(au, raw_entries?.into_iter())
}
pub fn write_identries_raw<I>(
&self,
au: &mut AuditScope,
entries: Vec<IdEntry>,
) -> Result<(), OperationError> {
mut entries: I,
) -> Result<(), OperationError>
where
I: Iterator<Item = IdRawEntry>,
{
let mut stmt = try_audit!(
au,
self.conn
@ -399,18 +492,21 @@ impl IdlSqliteWriteTransaction {
try_audit!(
au,
entries.iter().try_for_each(|ser_ent| {
entries.try_for_each(|e| {
let ser_ent = IdSqliteEntry::try_from(e)?;
stmt.execute_named(&[(":id", &ser_ent.id), (":data", &ser_ent.data)])
// remove the updated usize
.map(|_| ())
}),
"RusqliteError: {:?}",
OperationError::SQLiteError
.map_err(|_| OperationError::SQLiteError)
})
);
Ok(())
}
pub fn delete_identry(&self, au: &mut AuditScope, idl: Vec<i64>) -> Result<(), OperationError> {
pub fn delete_identry<I>(&self, au: &mut AuditScope, mut idl: I) -> Result<(), OperationError>
where
I: Iterator<Item = u64>,
{
let mut stmt = try_audit!(
au,
self.conn.prepare("DELETE FROM id2entry WHERE id = :id"),
@ -418,8 +514,21 @@ impl IdlSqliteWriteTransaction {
OperationError::SQLiteError
);
idl.iter().try_for_each(|id| {
stmt.execute(&[&id])
idl.try_for_each(|id| {
let iid: i64 = id
.try_into()
.map_err(|_| OperationError::InvalidEntryID)
.and_then(|i| {
if i > 0 {
Ok(i)
} else {
Err(OperationError::InvalidEntryID)
}
})?;
debug_assert!(iid > 0);
stmt.execute(&[&iid])
.map(|_| ())
.map_err(|_| OperationError::SQLiteError)
})

View file

@ -1,10 +1,9 @@
use serde_cbor;
use serde_json;
use std::convert::TryFrom;
use std::fs;
use crate::value::IndexType;
use std::collections::BTreeSet;
use std::sync::Arc;
use crate::audit::AuditScope;
use crate::be::dbentry::DbEntry;
@ -17,15 +16,17 @@ use uuid::Uuid;
pub mod dbentry;
pub mod dbvalue;
mod idl_arc_sqlite;
mod idl_sqlite;
use crate::be::idl_sqlite::{
IdlSqlite, IdlSqliteReadTransaction, IdlSqliteTransaction, IdlSqliteWriteTransaction,
use crate::be::idl_arc_sqlite::{
IdlArcSqlite, IdlArcSqliteReadTransaction, IdlArcSqliteTransaction,
IdlArcSqliteWriteTransaction,
};
const FILTER_TEST_THRESHOLD: usize = 8;
#[derive(Debug)]
#[derive(Debug, Clone)]
pub enum IDL {
ALLIDS,
Partial(IDLBitRange),
@ -33,28 +34,27 @@ pub enum IDL {
}
#[derive(Debug)]
pub struct IdEntry {
// TODO #20: for now this is i64 to make sqlite work, but entry is u64 for indexing reasons!
id: i64,
pub struct IdRawEntry {
id: u64,
data: Vec<u8>,
}
#[derive(Clone)]
pub struct Backend {
idlayer: IdlSqlite,
idlayer: Arc<IdlArcSqlite>,
}
pub struct BackendReadTransaction {
idlayer: IdlSqliteReadTransaction,
pub struct BackendReadTransaction<'a> {
idlayer: IdlArcSqliteReadTransaction<'a>,
}
pub struct BackendWriteTransaction {
pub struct BackendWriteTransaction<'a> {
idxmeta: BTreeSet<(String, IndexType)>,
// idxcache: IdxCache,
idlayer: IdlSqliteWriteTransaction,
idlayer: IdlArcSqliteWriteTransaction<'a>,
}
impl IdEntry {
impl IdRawEntry {
fn into_entry(self) -> Result<Entry<EntrySealed, EntryCommitted>, OperationError> {
let db_e = serde_cbor::from_slice(self.data.as_slice())
.map_err(|_| OperationError::SerdeCborError)?;
@ -64,12 +64,12 @@ impl IdEntry {
}
pub trait BackendTransaction {
type IdlLayerType: IdlSqliteTransaction;
fn get_idlayer(&self) -> &Self::IdlLayerType;
type IdlLayerType: IdlArcSqliteTransaction;
fn get_idlayer(&mut self) -> &mut Self::IdlLayerType;
/// Recursively apply a filter, transforming into IDL's on the way.
fn filter2idl(
&self,
&mut self,
au: &mut AuditScope,
filt: &FilterResolved,
thres: usize,
@ -296,7 +296,7 @@ pub trait BackendTransaction {
// Take filter, and AuditScope ref?
fn search(
&self,
&mut self,
au: &mut AuditScope,
filt: &Filter<FilterValidResolved>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
@ -312,12 +312,7 @@ pub trait BackendTransaction {
// Also get if the filter was 100% resolved or not.
let idl = self.filter2idl(au, filt.to_inner(), FILTER_TEST_THRESHOLD)?;
let raw_entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
let entries: Result<Vec<_>, _> = raw_entries
.into_iter()
.map(|ide| ide.into_entry())
.collect();
let entries = try_audit!(au, entries);
let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
// Do other things
// Now, de-serialise the raw_entries back to entries, and populate their ID's
@ -359,7 +354,7 @@ pub trait BackendTransaction {
/// load any candidates if they match. This is heavily used in uuid
/// refint and attr uniqueness.
fn exists(
&self,
&mut self,
au: &mut AuditScope,
filt: &Filter<FilterValidResolved>,
) -> Result<bool, OperationError> {
@ -377,12 +372,7 @@ pub trait BackendTransaction {
match &idl {
IDL::Indexed(idl) => Ok(idl.len() > 0),
_ => {
let raw_entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
let entries: Result<Vec<_>, _> = raw_entries
.into_iter()
.map(|ide| ide.into_entry())
.collect();
let entries = try_audit!(au, entries);
let entries = try_audit!(au, self.get_idlayer().get_identry(au, &idl));
// if not 100% resolved query, apply the filter test.
let entries_filtered: Vec<_> = entries
@ -396,16 +386,16 @@ pub trait BackendTransaction {
}) // end audit segment
}
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
fn verify(&mut self) -> Vec<Result<(), ConsistencyError>> {
// Vec::new()
self.get_idlayer().verify()
}
fn backup(&self, audit: &mut AuditScope, dst_path: &str) -> Result<(), OperationError> {
fn backup(&mut self, audit: &mut AuditScope, dst_path: &str) -> Result<(), OperationError> {
// load all entries into RAM, may need to change this later
// if the size of the database compared to RAM is an issue
let idl = IDL::ALLIDS;
let raw_entries: Vec<IdEntry> = self.get_idlayer().get_identry(audit, &idl)?;
let raw_entries: Vec<IdRawEntry> = self.get_idlayer().get_identry_raw(audit, &idl)?;
let entries: Result<Vec<DbEntry>, _> = raw_entries
.iter()
@ -439,21 +429,23 @@ pub trait BackendTransaction {
}
}
impl BackendTransaction for BackendReadTransaction {
type IdlLayerType = IdlSqliteReadTransaction;
fn get_idlayer(&self) -> &IdlSqliteReadTransaction {
&self.idlayer
impl<'a> BackendTransaction for BackendReadTransaction<'a> {
type IdlLayerType = IdlArcSqliteReadTransaction<'a>;
fn get_idlayer(&mut self) -> &mut IdlArcSqliteReadTransaction<'a> {
&mut self.idlayer
}
}
impl BackendTransaction for BackendWriteTransaction {
type IdlLayerType = IdlSqliteWriteTransaction;
fn get_idlayer(&self) -> &IdlSqliteWriteTransaction {
&self.idlayer
impl<'a> BackendTransaction for BackendWriteTransaction<'a> {
type IdlLayerType = IdlArcSqliteWriteTransaction<'a>;
fn get_idlayer(&mut self) -> &mut IdlArcSqliteWriteTransaction<'a> {
&mut self.idlayer
}
}
impl BackendWriteTransaction {
impl<'a> BackendWriteTransaction<'a> {
pub fn create(
&mut self,
au: &mut AuditScope,
@ -483,22 +475,7 @@ impl BackendWriteTransaction {
})
.collect();
let identries: Result<Vec<_>, _> = c_entries
.iter()
.map(|e| {
let dbe = e.to_dbentry();
let data =
serde_cbor::to_vec(&dbe).map_err(|_| OperationError::SerdeCborError)?;
Ok(IdEntry {
id: i64::try_from(e.get_id())
.map_err(|_| OperationError::InvalidEntryID)?,
data: data,
})
})
.collect();
self.idlayer.write_identries(au, identries?)?;
self.idlayer.write_identries(au, c_entries.iter())?;
// Now update the indexes as required.
for e in c_entries.iter() {
@ -510,7 +487,7 @@ impl BackendWriteTransaction {
}
pub fn modify(
&self,
&mut self,
au: &mut AuditScope,
pre_entries: &[Entry<EntrySealed, EntryCommitted>],
post_entries: &[Entry<EntrySealed, EntryCommitted>],
@ -525,13 +502,12 @@ impl BackendWriteTransaction {
assert!(post_entries.len() == pre_entries.len());
/*
// Assert the Id's exist on the entry, and serialise them.
// Now, that means the ID must be > 0!!!
let ser_entries: Result<Vec<IdEntry>, _> = post_entries
.iter()
.map(|e| {
let db_e = e.to_dbentry();
let id = i64::try_from(e.get_id())
.map_err(|_| OperationError::InvalidEntryID)
.and_then(|id| {
@ -542,9 +518,7 @@ impl BackendWriteTransaction {
}
})?;
let data = serde_cbor::to_vec(&db_e).map_err(|_| OperationError::SerdeCborError)?;
Ok(IdEntry { id, data })
Ok(IdEntry { id, data: e.clone() })
})
.collect();
@ -557,9 +531,10 @@ impl BackendWriteTransaction {
if post_entries.len() != ser_entries.len() {
return Err(OperationError::InvalidEntryState);
}
*/
// Now, given the list of id's, update them
self.idlayer.write_identries(au, ser_entries)?;
self.idlayer.write_identries(au, post_entries.iter())?;
// Finally, we now reindex all the changed entries. We do this by iterating and zipping
// over the set, because we know the list is in the same order.
@ -570,7 +545,7 @@ impl BackendWriteTransaction {
}
pub fn delete(
&self,
&mut self,
au: &mut AuditScope,
entries: &[Entry<EntrySealed, EntryCommitted>],
) -> Result<(), OperationError> {
@ -584,28 +559,8 @@ impl BackendWriteTransaction {
return Err(OperationError::EmptyRequest);
}
// Assert the Id's exist on the entry.
let id_list: Result<Vec<i64>, _> = entries
.iter()
.map(|e| {
i64::try_from(e.get_id())
.map_err(|_| OperationError::InvalidEntryID)
.and_then(|id| {
if id == 0 {
Err(OperationError::InvalidEntryID)
} else {
Ok(id)
}
})
})
.collect();
let id_list = try_audit!(au, id_list);
// Simple: If the list of id's is not the same as the input list, we are missing id's
if entries.len() != id_list.len() {
return Err(OperationError::InvalidEntryState);
}
// Assert the id's exist on the entry.
let id_list = entries.iter().map(|e| e.get_id());
// Now, given the list of id's, delete them.
self.idlayer.delete_identry(au, id_list)?;
@ -626,7 +581,7 @@ impl BackendWriteTransaction {
// At the end, we flush those cchange outs in a single run.
// For create this is probably a
fn entry_index(
&self,
&mut self,
audit: &mut AuditScope,
pre: Option<&Entry<EntrySealed, EntryCommitted>>,
post: Option<&Entry<EntrySealed, EntryCommitted>>,
@ -651,7 +606,15 @@ impl BackendWriteTransaction {
}
};
let idx_diff = Entry::idx_diff(&self.idxmeta, pre, post);
// Extremely Cursed - Okay, we know that self.idxmeta will NOT be changed
// in this function, but we need to borrow self as mut for the caches in
// get_idl to work. As a result, this causes a double borrow. To work around
// this we discard the lifetime on idxmeta, because we know that it will
// remain constant for the life of the operation.
let idxmeta = unsafe { &(*(&self.idxmeta as *const _)) };
let idx_diff = Entry::idx_diff(&idxmeta, pre, post);
idx_diff.iter()
.try_for_each(|act| {
@ -697,7 +660,7 @@ impl BackendWriteTransaction {
#[allow(dead_code)]
fn missing_idxs(
&self,
&mut self,
audit: &mut AuditScope,
) -> Result<Vec<(String, IndexType)>, OperationError> {
let idx_table_list = self.idlayer.list_idxs(audit)?;
@ -736,14 +699,18 @@ impl BackendWriteTransaction {
.try_for_each(|(attr, itype)| self.idlayer.create_idx(audit, attr, itype))
}
pub fn upgrade_reindex(&self, audit: &mut AuditScope, v: i64) -> Result<(), OperationError> {
pub fn upgrade_reindex(
&mut self,
audit: &mut AuditScope,
v: i64,
) -> Result<(), OperationError> {
if self.get_db_index_version() < v {
self.reindex(audit)?;
}
self.set_db_index_version(v)
}
pub fn reindex(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
pub fn reindex(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
// Purge the idxs
unsafe { self.idlayer.purge_idxs(audit)? };
@ -754,12 +721,7 @@ impl BackendWriteTransaction {
// Future idea: Do this in batches of X amount to limit memory
// consumption.
let idl = IDL::ALLIDS;
let raw_entries = try_audit!(audit, self.idlayer.get_identry(audit, &idl));
let entries: Result<Vec<_>, _> = raw_entries
.into_iter()
.map(|ide| ide.into_entry())
.collect();
let entries = try_audit!(audit, entries);
let entries = try_audit!(audit, self.idlayer.get_identry(audit, &idl));
// WHEN do we update name2uuid and uuid2name?
// Do they become attrs of the idx_cache? Should that be a struct?
@ -773,13 +735,13 @@ impl BackendWriteTransaction {
}
#[cfg(test)]
pub fn purge_idxs(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
pub fn purge_idxs(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
unsafe { self.idlayer.purge_idxs(audit) }
}
#[cfg(test)]
pub fn load_test_idl(
&self,
&mut self,
audit: &mut AuditScope,
attr: &String,
itype: &IndexType,
@ -847,18 +809,17 @@ impl BackendWriteTransaction {
// Now, we setup all the entries with new ids.
let mut id_max = 0;
let identries: Result<Vec<IdEntry>, _> = dbentries
let identries: Result<Vec<IdRawEntry>, _> = dbentries
.iter()
.map(|ser_db_e| {
.map(|e| {
id_max += 1;
let data =
serde_cbor::to_vec(&ser_db_e).map_err(|_| OperationError::SerdeCborError)?;
Ok(IdEntry { id: id_max, data })
let data = serde_cbor::to_vec(&e).map_err(|_| OperationError::SerdeCborError)?;
Ok(IdRawEntry { id: id_max, data })
})
.collect();
self.idlayer.write_identries(audit, identries?)?;
self.idlayer
.write_identries_raw(audit, identries?.into_iter())?;
// for debug
/*
@ -892,7 +853,7 @@ impl BackendWriteTransaction {
Ok(nsid)
}
pub fn get_db_s_uuid(&self) -> Uuid {
pub fn get_db_s_uuid(&mut self) -> Uuid {
match self
.get_idlayer()
.get_db_s_uuid()
@ -909,7 +870,7 @@ impl BackendWriteTransaction {
Ok(nsid)
}
pub fn get_db_d_uuid(&self) -> Uuid {
pub fn get_db_d_uuid(&mut self) -> Uuid {
match self
.get_idlayer()
.get_db_d_uuid()
@ -920,11 +881,11 @@ impl BackendWriteTransaction {
}
}
fn get_db_index_version(&self) -> i64 {
fn get_db_index_version(&mut self) -> i64 {
self.get_idlayer().get_db_index_version()
}
fn set_db_index_version(&self, v: i64) -> Result<(), OperationError> {
fn set_db_index_version(&mut self, v: i64) -> Result<(), OperationError> {
self.get_idlayer().set_db_index_version(v)
}
}
@ -935,7 +896,7 @@ impl Backend {
// this has a ::memory() type, but will path == "" work?
audit_segment!(audit, || {
let be = Backend {
idlayer: IdlSqlite::new(audit, path, pool_size)?,
idlayer: Arc::new(IdlArcSqlite::new(audit, path, pool_size)?),
};
// Now complete our setup with a txn

View file

@ -1,4 +1,3 @@
use num_cpus;
use rand::prelude::*;
use std::fmt;
use std::path::PathBuf;

View file

@ -33,7 +33,7 @@ pub const CHANGELOG_MAX_AGE: u64 = 86400;
pub const RECYCLEBIN_MAX_AGE: u64 = 300;
#[cfg(not(test))]
/// In production we allow 1 week
pub const RECYCLEBIN_MAX_AGE: u64 = 604800;
pub const RECYCLEBIN_MAX_AGE: u64 = 604_800;
// 5 minute auth session window.
pub const AUTH_SESSION_TIMEOUT: u64 = 300;

View file

@ -730,7 +730,7 @@ async fn account_post_id_unix_auth(
let uat = get_current_user(&session);
let id = path.into_inner();
let m_obj = IdmAccountUnixAuthMessage {
uat: uat,
uat,
uuid_or_name: id,
cred: obj.into_inner().value,
};
@ -1096,7 +1096,7 @@ pub fn backup_server_core(config: Configuration, dst_path: &str) {
};
let mut audit = AuditScope::new("backend_backup");
let be_ro_txn = be.read();
let mut be_ro_txn = be.read();
let r = be_ro_txn.backup(&mut audit, dst_path);
debug!("{}", audit);
match r {
@ -1157,7 +1157,7 @@ pub fn restore_server_core(config: Configuration, dst_path: &str) {
info!("Start reindex phase ...");
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write
.reindex(&mut audit)
.and_then(|_| qs_write.commit(&mut audit));
@ -1195,7 +1195,7 @@ pub fn reindex_server_core(config: Configuration) {
let idxmeta = { schema.write().get_idxmeta_set() };
// Reindex only the core schema attributes to bootstrap the process.
let be_wr_txn = be.write(idxmeta);
let mut be_wr_txn = be.write(idxmeta);
let r = be_wr_txn
.reindex(&mut audit)
.and_then(|_| be_wr_txn.commit(&mut audit));
@ -1222,7 +1222,7 @@ pub fn reindex_server_core(config: Configuration) {
info!("Start Index Phase 2 ...");
let qs_write = qs.write(duration_from_epoch_now());
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write
.reindex(&mut audit)
.and_then(|_| qs_write.commit(&mut audit));

View file

@ -1,5 +1,4 @@
use crate::be::dbvalue::{DbCredV1, DbPasswordV1};
use base64;
use openssl::hash::MessageDigest;
use openssl::pkcs5::pbkdf2_hmac;
use rand::prelude::*;

View file

@ -148,7 +148,7 @@ impl TOTP {
.map_err(|_| TOTPError::HmacError)?;
let otp = u32::from_be_bytes(bytes);
Ok((otp & 0x7fffffff) % 1_000_000)
Ok((otp & 0x7fff_ffff) % 1_000_000)
}
pub fn do_totp_duration_from_epoch(&self, time: &Duration) -> Result<u32, TOTPError> {

View file

@ -304,7 +304,7 @@ impl Entry<EntryInit, EntryNew> {
pub fn from_proto_entry(
audit: &mut AuditScope,
e: &ProtoEntry,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
// Why not the trait? In the future we may want to extend
// this with server aware functions for changes of the
@ -339,7 +339,7 @@ impl Entry<EntryInit, EntryNew> {
pub fn from_proto_entry_str(
audit: &mut AuditScope,
es: &str,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
audit_log!(audit, "Parsing -> {}", es);
// str -> Proto entry
@ -1282,7 +1282,7 @@ impl Entry<EntryReduced, EntryCommitted> {
pub fn to_pe(
&self,
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<ProtoEntry, OperationError> {
// Turn values -> Strings.
let attrs: Result<_, _> = self

View file

@ -35,7 +35,7 @@ pub struct SearchResult {
impl SearchResult {
pub fn new(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
entries: Vec<Entry<EntryReduced, EntryCommitted>>,
) -> Result<Self, OperationError> {
let entries: Result<_, _> = entries
@ -104,7 +104,7 @@ pub struct Event {
impl Event {
pub fn from_ro_request(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
user_uuid: &str,
) -> Result<Self, OperationError> {
// Do we need to check or load the entry from the user_uuid?
@ -125,7 +125,7 @@ impl Event {
pub fn from_ro_uat(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
uat: Option<UserAuthToken>,
) -> Result<Self, OperationError> {
audit_log!(audit, "from_ro_uat -> {:?}", uat);
@ -146,7 +146,7 @@ impl Event {
pub fn from_rw_uat(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
) -> Result<Self, OperationError> {
audit_log!(audit, "from_rw_uat -> {:?}", uat);
@ -167,7 +167,7 @@ impl Event {
pub fn from_rw_request(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
user_uuid: &str,
) -> Result<Self, OperationError> {
// Do we need to check or load the entry from the user_uuid?
@ -241,7 +241,7 @@ impl SearchEvent {
pub fn from_message(
audit: &mut AuditScope,
msg: SearchMessage,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
match Filter::from_ro(audit, &msg.req.filter, qs) {
Ok(f) => Ok(SearchEvent {
@ -267,7 +267,7 @@ impl SearchEvent {
pub fn from_internal_message(
audit: &mut AuditScope,
msg: InternalSearchMessage,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
let r_attrs: Option<BTreeSet<String>> = msg.attrs.map(|vs| {
vs.into_iter()
@ -302,7 +302,7 @@ impl SearchEvent {
pub fn from_internal_recycle_message(
audit: &mut AuditScope,
msg: InternalSearchRecycledMessage,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
let r_attrs: Option<BTreeSet<String>> = msg.attrs.map(|vs| {
vs.into_iter()
@ -336,7 +336,7 @@ impl SearchEvent {
pub fn from_whoami_request(
audit: &mut AuditScope,
uat: Option<UserAuthToken>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
Ok(SearchEvent {
event: Event::from_ro_uat(audit, qs, uat)?,
@ -355,7 +355,7 @@ impl SearchEvent {
audit: &mut AuditScope,
uat: Option<UserAuthToken>,
target_uuid: Uuid,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
Ok(SearchEvent {
event: Event::from_ro_uat(audit, qs, uat)?,
@ -471,7 +471,7 @@ impl CreateEvent {
pub fn from_message(
audit: &mut AuditScope,
msg: CreateMessage,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
let rentries: Result<Vec<_>, _> = msg
.req
@ -553,7 +553,7 @@ impl DeleteEvent {
pub fn from_message(
audit: &mut AuditScope,
msg: DeleteMessage,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
match Filter::from_rw(audit, &msg.req.filter, qs) {
Ok(f) => Ok(DeleteEvent {
@ -575,7 +575,7 @@ impl DeleteEvent {
audit: &mut AuditScope,
uat: Option<UserAuthToken>,
filter: Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
Ok(DeleteEvent {
event: Event::from_rw_uat(audit, qs, uat)?,
@ -643,7 +643,7 @@ impl ModifyEvent {
pub fn from_message(
audit: &mut AuditScope,
msg: ModifyMessage,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
match Filter::from_rw(audit, &msg.req.filter, qs) {
Ok(f) => match ModifyList::from(audit, &msg.req.modlist, qs) {
@ -674,7 +674,7 @@ impl ModifyEvent {
target_uuid: Uuid,
proto_ml: ProtoModifyList,
filter: Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid)));
// Add any supplemental conditions we have.
@ -705,7 +705,7 @@ impl ModifyEvent {
target_uuid: Uuid,
ml: ModifyList<ModifyInvalid>,
filter: Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid)));
// Add any supplemental conditions we have.
@ -733,7 +733,7 @@ impl ModifyEvent {
target_uuid: Uuid,
attr: String,
filter: Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
let ml = ModifyList::new_purge(attr.as_str());
let f_uuid = filter_all!(f_eq("uuid", PartialValue::new_uuid(target_uuid)));
@ -967,7 +967,7 @@ pub struct WhoamiResult {
impl WhoamiResult {
pub fn new(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
e: Entry<EntryReduced, EntryCommitted>,
uat: UserAuthToken,
) -> Result<Self, OperationError> {
@ -1039,7 +1039,7 @@ impl ReviveRecycledEvent {
audit: &mut AuditScope,
uat: Option<UserAuthToken>,
filter: Filter<FilterInvalid>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
Ok(ReviveRecycledEvent {
event: Event::from_rw_uat(audit, qs, uat)?,

View file

@ -375,7 +375,7 @@ impl Filter<FilterInvalid> {
pub fn from_ro(
audit: &mut AuditScope,
f: &ProtoFilter,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
Ok(Filter {
state: FilterInvalid {
@ -387,7 +387,7 @@ impl Filter<FilterInvalid> {
pub fn from_rw(
audit: &mut AuditScope,
f: &ProtoFilter,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
Ok(Filter {
state: FilterInvalid {
@ -564,7 +564,7 @@ impl FilterComp {
fn from_ro(
audit: &mut AuditScope,
f: &ProtoFilter,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
Ok(match f {
ProtoFilter::Eq(a, v) => FilterComp::Eq(a.clone(), qs.clone_partialvalue(audit, a, v)?),
@ -590,7 +590,7 @@ impl FilterComp {
fn from_rw(
audit: &mut AuditScope,
f: &ProtoFilter,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
Ok(match f {
ProtoFilter::Eq(a, v) => FilterComp::Eq(a.clone(), qs.clone_partialvalue(audit, a, v)?),

View file

@ -96,7 +96,7 @@ impl Account {
pub(crate) fn try_from_entry_ro(
au: &mut AuditScope,
value: Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
let groups = Group::try_from_account_entry_ro(au, &value, qs)?;
try_from_entry!(value, groups)
@ -105,7 +105,7 @@ impl Account {
pub(crate) fn try_from_entry_rw(
au: &mut AuditScope,
value: Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
let groups = Group::try_from_account_entry_rw(au, &value, qs)?;
try_from_entry!(value, groups)

View file

@ -14,9 +14,9 @@ use std::time::Duration;
// auth policies would exist, but each credHandler has to be a whole
// encapsulated unit of function.
const BAD_PASSWORD_MSG: &'static str = "incorrect password";
const BAD_TOTP_MSG: &'static str = "incorrect totp";
const BAD_AUTH_TYPE_MSG: &'static str = "invalid authentication method in this context";
const BAD_PASSWORD_MSG: &str = "incorrect password";
const BAD_TOTP_MSG: &str = "incorrect totp";
const BAD_AUTH_TYPE_MSG: &str = "invalid authentication method in this context";
enum CredState {
Success(Vec<Claim>),

View file

@ -27,7 +27,7 @@ impl PasswordChangeEvent {
pub fn from_idm_account_set_password(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
msg: IdmAccountSetPasswordMessage,
) -> Result<Self, OperationError> {
let e = Event::from_rw_uat(audit, qs, msg.uat)?;
@ -43,7 +43,7 @@ impl PasswordChangeEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
cleartext: String,
@ -79,7 +79,7 @@ impl UnixPasswordChangeEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
cleartext: String,
@ -104,7 +104,7 @@ pub struct GeneratePasswordEvent {
impl GeneratePasswordEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
appid: Option<String>,
@ -128,7 +128,7 @@ pub struct RegenerateRadiusSecretEvent {
impl RegenerateRadiusSecretEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
) -> Result<Self, OperationError> {
@ -154,7 +154,7 @@ pub struct RadiusAuthTokenEvent {
impl RadiusAuthTokenEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
) -> Result<Self, OperationError> {
@ -180,7 +180,7 @@ pub struct UnixUserTokenEvent {
impl UnixUserTokenEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
) -> Result<Self, OperationError> {
@ -206,7 +206,7 @@ pub struct UnixGroupTokenEvent {
impl UnixGroupTokenEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
) -> Result<Self, OperationError> {
@ -242,7 +242,7 @@ impl UnixUserAuthEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
cleartext: String,
@ -267,7 +267,7 @@ pub struct GenerateTOTPEvent {
impl GenerateTOTPEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
label: String,
@ -304,7 +304,7 @@ pub struct VerifyTOTPEvent {
impl VerifyTOTPEvent {
pub fn from_parts(
audit: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
uat: Option<UserAuthToken>,
target: Uuid,
session: Uuid,

View file

@ -56,7 +56,7 @@ impl Group {
pub fn try_from_account_entry_red_ro(
au: &mut AuditScope,
value: &Entry<EntryReduced, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Vec<Self>, OperationError> {
try_from_account_e!(au, value, qs)
}
@ -64,7 +64,7 @@ impl Group {
pub fn try_from_account_entry_ro(
au: &mut AuditScope,
value: &Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Vec<Self>, OperationError> {
try_from_account_e!(au, value, qs)
}
@ -72,7 +72,7 @@ impl Group {
pub fn try_from_account_entry_rw(
au: &mut AuditScope,
value: &Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Vec<Self>, OperationError> {
try_from_account_e!(au, value, qs)
}

View file

@ -25,7 +25,7 @@ impl RadiusAccount {
pub(crate) fn try_from_entry_reduced(
au: &mut AuditScope,
value: Entry<EntryReduced, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
if !value.attribute_value_pres("class", &PVCLASS_ACCOUNT) {
return Err(OperationError::InvalidAccountState(

View file

@ -29,7 +29,6 @@ use concread::collections::bptree::*;
use rand::prelude::*;
use std::time::Duration;
use uuid::Uuid;
use zxcvbn;
pub struct IdmServer {
// There is a good reason to keep this single thread - it
@ -48,15 +47,15 @@ pub struct IdmServerWriteTransaction<'a> {
// the idm in memory structures (maybe the query server too). This is
// things like authentication
sessions: BptreeMapWriteTxn<'a, Uuid, AuthSession>,
pub qs_read: QueryServerReadTransaction,
pub qs_read: QueryServerReadTransaction<'a>,
// thread/server id
sid: SID,
}
pub struct IdmServerProxyReadTransaction {
pub struct IdmServerProxyReadTransaction<'a> {
// This contains read-only methods, like getting users, groups
// and other structured content.
pub qs_read: QueryServerReadTransaction,
pub qs_read: QueryServerReadTransaction<'a>,
}
pub struct IdmServerProxyWriteTransaction<'a> {
@ -87,7 +86,7 @@ impl IdmServer {
sessions: self.sessions.write(),
// qs: &self.qs,
qs_read: self.qs.read(),
sid: sid,
sid,
}
}
@ -105,7 +104,7 @@ impl IdmServer {
IdmServerProxyWriteTransaction {
mfareg_sessions: self.mfareg_sessions.write(),
qs_write: self.qs.write(ts),
sid: sid,
sid,
}
}
}
@ -184,7 +183,7 @@ impl<'a> IdmServerWriteTransaction<'a> {
// typing and functionality so we can assess what auth types can
// continue, and helps to keep non-needed entry specific data
// out of the LRU.
let account = Account::try_from_entry_ro(au, entry, &self.qs_read)?;
let account = Account::try_from_entry_ro(au, entry, &mut self.qs_read)?;
let auth_session = AuthSession::new(account, init.appid.clone());
// Get the set of mechanisms that can proceed. This is tied
@ -246,7 +245,7 @@ impl<'a> IdmServerWriteTransaction<'a> {
// Get their account
let account = try_audit!(
au,
UnixUserAccount::try_from_entry_ro(au, account_entry, &self.qs_read)
UnixUserAccount::try_from_entry_ro(au, account_entry, &mut self.qs_read)
);
// Validate the unix_pw - this checks the account/cred lock states.
@ -259,9 +258,9 @@ impl<'a> IdmServerWriteTransaction<'a> {
}
}
impl IdmServerProxyReadTransaction {
impl<'a> IdmServerProxyReadTransaction<'a> {
pub fn get_radiusauthtoken(
&self,
&mut self,
au: &mut AuditScope,
rate: &RadiusAuthTokenEvent,
) -> Result<RadiusAuthToken, OperationError> {
@ -273,14 +272,14 @@ impl IdmServerProxyReadTransaction {
);
let account = try_audit!(
au,
RadiusAccount::try_from_entry_reduced(au, account_entry, &self.qs_read)
RadiusAccount::try_from_entry_reduced(au, account_entry, &mut self.qs_read)
);
account.to_radiusauthtoken()
}
pub fn get_unixusertoken(
&self,
&mut self,
au: &mut AuditScope,
uute: &UnixUserTokenEvent,
) -> Result<UnixUserToken, OperationError> {
@ -292,13 +291,13 @@ impl IdmServerProxyReadTransaction {
let account = try_audit!(
au,
UnixUserAccount::try_from_entry_reduced(au, account_entry, &self.qs_read)
UnixUserAccount::try_from_entry_reduced(au, account_entry, &mut self.qs_read)
);
account.to_unixusertoken()
}
pub fn get_unixgrouptoken(
&self,
&mut self,
au: &mut AuditScope,
uute: &UnixGroupTokenEvent,
) -> Result<UnixGroupToken, OperationError> {
@ -324,7 +323,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
}
fn check_password_quality(
&self,
&mut self,
au: &mut AuditScope,
cleartext: &str,
related_inputs: &[&str],
@ -382,7 +381,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
}
fn target_to_account(
&self,
&mut self,
au: &mut AuditScope,
target: &Uuid,
) -> Result<Account, OperationError> {
@ -390,7 +389,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
let account_entry = try_audit!(au, self.qs_write.internal_search_uuid(au, target));
let account = try_audit!(
au,
Account::try_from_entry_rw(au, account_entry, &self.qs_write)
Account::try_from_entry_rw(au, account_entry, &mut self.qs_write)
);
// Ask if tis all good - this step checks pwpolicy and such
@ -461,7 +460,7 @@ impl<'a> IdmServerProxyWriteTransaction<'a> {
// Assert the account is unix and valid.
let account = try_audit!(
au,
UnixUserAccount::try_from_entry_rw(au, account_entry, &self.qs_write)
UnixUserAccount::try_from_entry_rw(au, account_entry, &mut self.qs_write)
);
// Ask if tis all good - this step checks pwpolicy and such
@ -1015,7 +1014,7 @@ mod tests {
.expect("Failed to reset radius credential 1");
idms_prox_write.commit(au).expect("failed to commit");
let idms_prox_read = idms.proxy_read();
let mut idms_prox_read = idms.proxy_read();
let rate = RadiusAuthTokenEvent::new_internal(UUID_ADMIN.clone());
let tok_r = idms_prox_read
.get_radiusauthtoken(au, &rate)
@ -1093,7 +1092,7 @@ mod tests {
idms_prox_write.commit(au).expect("failed to commit");
let idms_prox_read = idms.proxy_read();
let mut idms_prox_read = idms.proxy_read();
let ugte = UnixGroupTokenEvent::new_internal(
Uuid::parse_str("01609135-a1c4-43d5-966b-a28227644445")

View file

@ -95,7 +95,7 @@ impl UnixUserAccount {
pub(crate) fn try_from_entry_rw(
au: &mut AuditScope,
value: Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
let groups = UnixGroup::try_from_account_entry_rw(au, &value, qs)?;
try_from_entry!(value, groups)
@ -104,7 +104,7 @@ impl UnixUserAccount {
pub(crate) fn try_from_entry_ro(
au: &mut AuditScope,
value: Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
let groups = UnixGroup::try_from_account_entry_ro(au, &value, qs)?;
try_from_entry!(value, groups)
@ -113,7 +113,7 @@ impl UnixUserAccount {
pub(crate) fn try_from_entry_reduced(
au: &mut AuditScope,
value: Entry<EntryReduced, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Self, OperationError> {
let groups = UnixGroup::try_from_account_entry_red_ro(au, &value, qs)?;
try_from_entry!(value, groups)
@ -130,7 +130,7 @@ impl UnixUserAccount {
gidnumber: self.gidnumber,
uuid: self.uuid.to_hyphenated_ref().to_string(),
shell: self.shell.clone(),
groups: groups,
groups,
sshkeys: self.sshkeys.clone(),
})
}
@ -295,7 +295,7 @@ impl UnixGroup {
pub fn try_from_account_entry_rw(
au: &mut AuditScope,
value: &Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Vec<Self>, OperationError> {
try_from_account_group_e!(au, value, qs)
}
@ -303,7 +303,7 @@ impl UnixGroup {
pub fn try_from_account_entry_ro(
au: &mut AuditScope,
value: &Entry<EntrySealed, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Vec<Self>, OperationError> {
try_from_account_group_e!(au, value, qs)
}
@ -311,7 +311,7 @@ impl UnixGroup {
pub fn try_from_account_entry_red_ro(
au: &mut AuditScope,
value: &Entry<EntryReduced, EntryCommitted>,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<Vec<Self>, OperationError> {
try_from_account_group_e!(au, value, qs)
}

View file

@ -44,7 +44,7 @@ impl Modify {
pub fn from(
audit: &mut AuditScope,
m: &ProtoModify,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
Ok(match m {
ProtoModify::Present(a, v) => Modify::Present(a.clone(), qs.clone_value(audit, a, v)?),
@ -106,7 +106,7 @@ impl ModifyList<ModifyInvalid> {
pub fn from(
audit: &mut AuditScope,
ml: &ProtoModifyList,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
) -> Result<Self, OperationError> {
// For each ProtoModify, do a from.
let inner: Result<Vec<_>, _> = ml

View file

@ -164,7 +164,7 @@ impl Plugin for AttrUnique {
fn verify(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
// Only check live entries, not recycled.
let filt_in = filter!(f_pres("class"));

View file

@ -220,7 +220,7 @@ impl Plugin for Base {
fn verify(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
// Verify all uuid's are unique?
// Probably the literally worst thing ...
@ -329,7 +329,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
let cands = qs
.internal_search(
au,
@ -424,7 +424,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
let cands = qs
.internal_search(
au,

View file

@ -68,7 +68,7 @@ mod tests {
#[test]
fn test_domain_generate_uuid() {
run_test!(|server: &QueryServer, au: &mut AuditScope| {
let server_txn = server.write(duration_from_epoch_now());
let mut server_txn = server.write(duration_from_epoch_now());
let uuid_domain = Uuid::parse_str(UUID_DOMAIN_INFO)
.expect("Unable to parse constant UUID_DOMAIN_INFO");
let e_dom = server_txn

View file

@ -108,7 +108,7 @@ mod tests {
fn check_gid(
au: &mut AuditScope,
qs_write: &QueryServerWriteTransaction,
qs_write: &mut QueryServerWriteTransaction,
uuid: &str,
gid: u32,
) {
@ -143,7 +143,7 @@ mod tests {
preload,
create,
None,
|au, qs_write: &QueryServerWriteTransaction| check_gid(
|au, qs_write: &mut QueryServerWriteTransaction| check_gid(
au,
qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
@ -178,7 +178,7 @@ mod tests {
preload,
create,
None,
|au, qs_write: &QueryServerWriteTransaction| check_gid(
|au, qs_write: &mut QueryServerWriteTransaction| check_gid(
au,
qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
@ -212,7 +212,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iutf8s("testperson"))),
modlist!([m_pres("class", &Value::new_class("posixgroup"))]),
None,
|au, qs_write: &QueryServerWriteTransaction| check_gid(
|au, qs_write: &mut QueryServerWriteTransaction| check_gid(
au,
qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
@ -245,7 +245,7 @@ mod tests {
filter!(f_eq("name", PartialValue::new_iutf8s("testperson"))),
modlist!([m_purge("gidnumber")]),
None,
|au, qs_write: &QueryServerWriteTransaction| check_gid(
|au, qs_write: &mut QueryServerWriteTransaction| check_gid(
au,
qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244",
@ -283,7 +283,7 @@ mod tests {
m_pres("gidnumber", &Value::new_uint32(2000))
]),
None,
|au, qs_write: &QueryServerWriteTransaction| check_gid(
|au, qs_write: &mut QueryServerWriteTransaction| check_gid(
au,
qs_write,
"83a0927f-3de1-45ec-bea0-2f7b997ef244",

View file

@ -63,7 +63,7 @@ macro_rules! run_create_test {
let r = qs_write.create(&mut au_test, &ce);
debug!("r: {:?}", r);
assert!(r == $expect);
$check(&mut au_test, &qs_write);
$check(&mut au_test, &mut qs_write);
match r {
Ok(_) => {
qs_write.commit(&mut au_test).expect("commit failure!");
@ -119,7 +119,7 @@ macro_rules! run_modify_test {
{
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write.modify(&mut au_test, &me);
$check(&mut au_test, &qs_write);
$check(&mut au_test, &mut qs_write);
debug!("{:?}", r);
assert!(r == $expect);
match r {
@ -176,7 +176,7 @@ macro_rules! run_delete_test {
{
let mut qs_write = qs.write(duration_from_epoch_now());
let r = qs_write.delete(&mut au_test, &de);
$check(&mut au_test, &qs_write);
$check(&mut au_test, &mut qs_write);
assert!(r == $expect);
match r {
Ok(_) => {

View file

@ -290,7 +290,7 @@ impl Plugin for MemberOf {
fn verify(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
let mut r = Vec::new();
@ -522,7 +522,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_memberof!(au, qs, UUID_B, UUID_A);
@ -553,7 +553,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -605,7 +605,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_memberof!(au, qs, UUID_A, UUID_A);
@ -663,7 +663,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_memberof!(au, qs, UUID_A, UUID_A);
@ -728,7 +728,7 @@ mod tests {
Value::new_refer_s(&UUID_B).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_memberof!(au, qs, UUID_B, UUID_A);
@ -763,7 +763,7 @@ mod tests {
Value::new_refer_s(&UUID_B).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -816,7 +816,7 @@ mod tests {
Value::new_refer_s(&UUID_C).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -872,7 +872,7 @@ mod tests {
Value::new_refer_s(&UUID_A).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_memberof!(au, qs, UUID_A, UUID_A);
@ -938,7 +938,7 @@ mod tests {
Value::new_refer_s(&UUID_A).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_memberof!(au, qs, UUID_A, UUID_A);
@ -1006,7 +1006,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_B).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_B, UUID_A);
@ -1044,7 +1044,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_B).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -1101,7 +1101,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_C).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -1168,7 +1168,7 @@ mod tests {
PartialValue::new_refer_s(&UUID_A).unwrap()
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -1259,7 +1259,7 @@ mod tests {
),
]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -1322,7 +1322,7 @@ mod tests {
preload,
filter!(f_eq("uuid", PartialValue::new_uuids(&UUID_A).unwrap())),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_B, UUID_A);
@ -1356,7 +1356,7 @@ mod tests {
preload,
filter!(f_eq("uuid", PartialValue::new_uuids(&UUID_A).unwrap())),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_B, UUID_A);
@ -1400,7 +1400,7 @@ mod tests {
preload,
filter!(f_eq("uuid", PartialValue::new_uuids(&UUID_B).unwrap())),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_A);
@ -1453,7 +1453,7 @@ mod tests {
preload,
filter!(f_eq("uuid", PartialValue::new_uuids(&UUID_A).unwrap())),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_B, UUID_A);
@ -1519,7 +1519,7 @@ mod tests {
preload,
filter!(f_eq("uuid", PartialValue::new_uuids(&UUID_B).unwrap())),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
// V-- this uuid is
// V-- memberof this UUID
assert_not_memberof!(au, qs, UUID_A, UUID_B);

View file

@ -102,7 +102,7 @@ trait Plugin {
fn verify(
_au: &mut AuditScope,
_qs: &QueryServerReadTransaction,
_qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
debug!("plugin {} has an unimplemented verify!", Self::id());
vec![Err(ConsistencyError::Unknown)]
@ -425,7 +425,7 @@ impl Plugins {
pub fn run_verify(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
let mut results = Vec::new();
run_verify_plugin!(au, qs, &mut results, base::Base);

View file

@ -258,7 +258,7 @@ mod tests {
Value::from(IMPORT_HASH)
)]),
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
let e = qs
.internal_search_uuid(
au,

View file

@ -30,7 +30,7 @@ pub struct ReferentialIntegrity;
impl ReferentialIntegrity {
fn check_uuid_exists(
au: &mut AuditScope,
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
rtype: &str,
uuid_value: &Value,
) -> Result<(), OperationError> {
@ -189,7 +189,7 @@ impl Plugin for ReferentialIntegrity {
fn verify(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
// Get all entries as cand
// build a cand-uuid set
@ -312,7 +312,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
let cands = qs
.internal_search(
au,
@ -350,7 +350,7 @@ mod tests {
preload,
create,
None,
|au: &mut AuditScope, qs: &QueryServerWriteTransaction| {
|au: &mut AuditScope, qs: &mut QueryServerWriteTransaction| {
let cands = qs
.internal_search(
au,
@ -592,7 +592,7 @@ mod tests {
preload,
filter!(f_eq("name", PartialValue::new_iutf8s("testgroup_a"))),
None,
|_au: &mut AuditScope, _qs: &QueryServerWriteTransaction| {}
|_au: &mut AuditScope, _qs: &mut QueryServerWriteTransaction| {}
);
}
@ -638,7 +638,7 @@ mod tests {
preload,
filter!(f_eq("name", PartialValue::new_iutf8s("testgroup_b"))),
None,
|_au: &mut AuditScope, _qs: &QueryServerWriteTransaction| {}
|_au: &mut AuditScope, _qs: &mut QueryServerWriteTransaction| {}
);
}
@ -666,7 +666,7 @@ mod tests {
preload,
filter!(f_eq("name", PartialValue::new_iutf8s("testgroup_b"))),
None,
|_au: &mut AuditScope, _qs: &QueryServerWriteTransaction| {}
|_au: &mut AuditScope, _qs: &mut QueryServerWriteTransaction| {}
);
}
}

View file

@ -43,7 +43,7 @@ impl Spn {
fn get_domain_name_ro(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Result<String, OperationError> {
qs.internal_search_uuid(au, &UUID_DOMAIN_INFO_T)
.and_then(|e| {
@ -200,7 +200,7 @@ impl Plugin for Spn {
fn verify(
au: &mut AuditScope,
qs: &QueryServerReadTransaction,
qs: &mut QueryServerReadTransaction,
) -> Vec<Result<(), ConsistencyError>> {
// Verify that all items with spn's have valid spns.
// We need to consider the case that an item has a different origin domain too,

View file

@ -64,7 +64,7 @@ lazy_static! {
/// [`QueryServerWriteTransaction`]: struct.QueryServerWriteTransaction.html
pub trait QueryServerTransaction {
type BackendTransactionType: BackendTransaction;
fn get_be_txn(&self) -> &Self::BackendTransactionType;
fn get_be_txn(&mut self) -> &mut Self::BackendTransactionType;
type SchemaTransactionType: SchemaTransaction;
fn get_schema(&self) -> &Self::SchemaTransactionType;
@ -82,7 +82,7 @@ pub trait QueryServerTransaction {
/// [`access`]: ../access/index.html
/// [`fn search`]: trait.QueryServerTransaction.html#method.search
fn search_ext(
&self,
&mut self,
au: &mut AuditScope,
se: &SearchEvent,
) -> Result<Vec<Entry<EntryReduced, EntryCommitted>>, OperationError> {
@ -105,7 +105,7 @@ pub trait QueryServerTransaction {
}
fn search(
&self,
&mut self,
au: &mut AuditScope,
se: &SearchEvent,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
@ -154,7 +154,7 @@ pub trait QueryServerTransaction {
Ok(acp_res)
}
fn exists(&self, au: &mut AuditScope, ee: &ExistsEvent) -> Result<bool, OperationError> {
fn exists(&mut self, au: &mut AuditScope, ee: &ExistsEvent) -> Result<bool, OperationError> {
let mut audit_be = AuditScope::new("backend_exists");
let schema = self.get_schema();
@ -186,7 +186,7 @@ pub trait QueryServerTransaction {
//
// Remember, we don't care if the name is invalid, because search
// will validate/normalise the filter we construct for us. COOL!
fn name_to_uuid(&self, audit: &mut AuditScope, name: &str) -> Result<Uuid, OperationError> {
fn name_to_uuid(&mut self, audit: &mut AuditScope, name: &str) -> Result<Uuid, OperationError> {
// For now this just constructs a filter and searches, but later
// we could actually improve this to contact the backend and do
// index searches, completely bypassing id2entry.
@ -222,7 +222,7 @@ pub trait QueryServerTransaction {
}
fn uuid_to_name(
&self,
&mut self,
audit: &mut AuditScope,
uuid: &Uuid,
) -> Result<Option<Value>, OperationError> {
@ -271,7 +271,11 @@ pub trait QueryServerTransaction {
Ok(Some(name_res))
}
fn posixid_to_uuid(&self, audit: &mut AuditScope, name: &str) -> Result<Uuid, OperationError> {
fn posixid_to_uuid(
&mut self,
audit: &mut AuditScope,
name: &str,
) -> Result<Uuid, OperationError> {
let f_name = Some(f_eq("name", PartialValue::new_iutf8s(name)));
let f_spn = PartialValue::new_spn_s(name).map(|v| f_eq("spn", v));
@ -310,7 +314,7 @@ pub trait QueryServerTransaction {
// From internal, generate an exists event and dispatch
fn internal_exists(
&self,
&mut self,
au: &mut AuditScope,
filter: Filter<FilterInvalid>,
) -> Result<bool, OperationError> {
@ -329,7 +333,7 @@ pub trait QueryServerTransaction {
}
fn internal_search(
&self,
&mut self,
audit: &mut AuditScope,
filter: Filter<FilterInvalid>,
) -> Result<Vec<Entry<EntrySealed, EntryCommitted>>, OperationError> {
@ -344,7 +348,7 @@ pub trait QueryServerTransaction {
}
fn impersonate_search_valid(
&self,
&mut self,
audit: &mut AuditScope,
f_valid: Filter<FilterValid>,
f_intent_valid: Filter<FilterValid>,
@ -359,7 +363,7 @@ pub trait QueryServerTransaction {
// this applys ACP to filter result entries.
fn impersonate_search_ext_valid(
&self,
&mut self,
audit: &mut AuditScope,
f_valid: Filter<FilterValid>,
f_intent_valid: Filter<FilterValid>,
@ -374,7 +378,7 @@ pub trait QueryServerTransaction {
// Who they are will go here
fn impersonate_search(
&self,
&mut self,
audit: &mut AuditScope,
filter: Filter<FilterInvalid>,
filter_intent: Filter<FilterInvalid>,
@ -390,7 +394,7 @@ pub trait QueryServerTransaction {
}
fn impersonate_search_ext(
&self,
&mut self,
audit: &mut AuditScope,
filter: Filter<FilterInvalid>,
filter_intent: Filter<FilterInvalid>,
@ -408,7 +412,7 @@ pub trait QueryServerTransaction {
// Get a single entry by it's UUID. This is heavily relied on for internal
// server operations, especially in login and acp checks for acp.
fn internal_search_uuid(
&self,
&mut self,
audit: &mut AuditScope,
uuid: &Uuid,
) -> Result<Entry<EntrySealed, EntryCommitted>, OperationError> {
@ -434,7 +438,7 @@ pub trait QueryServerTransaction {
}
fn impersonate_search_ext_uuid(
&self,
&mut self,
audit: &mut AuditScope,
uuid: &Uuid,
event: &Event,
@ -458,7 +462,7 @@ pub trait QueryServerTransaction {
/// Do a schema aware conversion from a String:String to String:Value for modification
/// present.
fn clone_value(
&self,
&mut self,
audit: &mut AuditScope,
attr: &str,
value: &str,
@ -535,7 +539,7 @@ pub trait QueryServerTransaction {
}
fn clone_partialvalue(
&self,
&mut self,
audit: &mut AuditScope,
attr: &str,
value: &str,
@ -622,7 +626,7 @@ pub trait QueryServerTransaction {
// In the opposite direction, we can resolve values for presentation
fn resolve_value(
&self,
&mut self,
audit: &mut AuditScope,
value: &Value,
) -> Result<String, OperationError> {
@ -640,8 +644,8 @@ pub trait QueryServerTransaction {
}
}
pub struct QueryServerReadTransaction {
be_txn: BackendReadTransaction,
pub struct QueryServerReadTransaction<'a> {
be_txn: BackendReadTransaction<'a>,
// Anything else? In the future, we'll need to have a schema transaction
// type, maybe others?
schema: SchemaReadTransaction,
@ -651,11 +655,11 @@ pub struct QueryServerReadTransaction {
// Actually conduct a search request
// This is the core of the server, as it processes the entire event
// applies all parts required in order and more.
impl QueryServerTransaction for QueryServerReadTransaction {
type BackendTransactionType = BackendReadTransaction;
impl<'a> QueryServerTransaction for QueryServerReadTransaction<'a> {
type BackendTransactionType = BackendReadTransaction<'a>;
fn get_be_txn(&self) -> &BackendReadTransaction {
&self.be_txn
fn get_be_txn(&mut self) -> &mut BackendReadTransaction<'a> {
&mut self.be_txn
}
type SchemaTransactionType = SchemaReadTransaction;
@ -671,11 +675,11 @@ impl QueryServerTransaction for QueryServerReadTransaction {
}
}
impl QueryServerReadTransaction {
impl<'a> QueryServerReadTransaction<'a> {
// Verify the data content of the server is as expected. This will probably
// call various functions for validation, including possibly plugin
// verifications.
fn verify(&self, au: &mut AuditScope) -> Vec<Result<(), ConsistencyError>> {
fn verify(&mut self, au: &mut AuditScope) -> Vec<Result<(), ConsistencyError>> {
let mut audit = AuditScope::new("verify");
// If we fail after backend, we need to return NOW because we can't
@ -724,7 +728,7 @@ pub struct QueryServerWriteTransaction<'a> {
committed: bool,
d_uuid: Uuid,
cid: Cid,
be_txn: BackendWriteTransaction,
be_txn: BackendWriteTransaction<'a>,
schema: SchemaWriteTransaction<'a>,
accesscontrols: AccessControlsWriteTransaction<'a>,
// We store a set of flags that indicate we need a reload of
@ -735,10 +739,10 @@ pub struct QueryServerWriteTransaction<'a> {
}
impl<'a> QueryServerTransaction for QueryServerWriteTransaction<'a> {
type BackendTransactionType = BackendWriteTransaction;
type BackendTransactionType = BackendWriteTransaction<'a>;
fn get_be_txn(&self) -> &BackendWriteTransaction {
&self.be_txn
fn get_be_txn(&mut self) -> &mut BackendWriteTransaction<'a> {
&mut self.be_txn
}
type SchemaTransactionType = SchemaWriteTransaction<'a>;
@ -767,7 +771,7 @@ pub struct QueryServer {
impl QueryServer {
pub fn new(be: Backend, schema: Schema) -> Self {
let (s_uuid, d_uuid) = {
let wr = be.write(BTreeSet::new());
let mut wr = be.write(BTreeSet::new());
(wr.get_db_s_uuid(), wr.get_db_d_uuid())
};
info!("Server ID -> {:?}", s_uuid);
@ -806,7 +810,7 @@ impl QueryServer {
// which today I don't think we have ... yet.
committed: false,
d_uuid: self.d_uuid,
cid: cid,
cid,
be_txn: self.be.write(idxmeta),
schema: schema_write,
accesscontrols: self.accesscontrols.write(),
@ -831,7 +835,7 @@ impl QueryServer {
// reloading to occur, which causes the idxmeta to update, and allows validation
// of the schema in the subsequent steps as we proceed.
let reindex_write_1 = self.write(ts);
let mut reindex_write_1 = self.write(ts);
reindex_write_1
.upgrade_reindex(audit, SYSTEM_INDEX_VERSION)
.and_then(|_| reindex_write_1.commit(audit))?;
@ -857,7 +861,7 @@ impl QueryServer {
// reindex and set to version + 1, this way when we bump the version
// we are essetially pushing this version id back up to step write_1
let reindex_write_2 = self.write(ts);
let mut reindex_write_2 = self.write(ts);
reindex_write_2
.upgrade_reindex(audit, SYSTEM_INDEX_VERSION + 1)
.and_then(|_| reindex_write_2.commit(audit))?;
@ -869,7 +873,7 @@ impl QueryServer {
}
pub fn verify(&self, au: &mut AuditScope) -> Vec<Result<(), ConsistencyError>> {
let r_txn = self.read();
let mut r_txn = self.read();
r_txn.verify(au)
}
}
@ -1135,7 +1139,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
res
}
pub fn purge_tombstones(&self, au: &mut AuditScope) -> Result<(), OperationError> {
pub fn purge_tombstones(&mut self, au: &mut AuditScope) -> Result<(), OperationError> {
// delete everything that is a tombstone.
// TODO #68: Has an appropriate amount of time/condition past (ie replication events?)
@ -1177,7 +1181,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
res
}
pub fn purge_recycled(&self, au: &mut AuditScope) -> Result<(), OperationError> {
pub fn purge_recycled(&mut self, au: &mut AuditScope) -> Result<(), OperationError> {
// Send everything that is recycled to tombstone
// Search all recycled
@ -2038,7 +2042,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
self.internal_modify(audit, filt, modl)
}
pub fn reindex(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
pub fn reindex(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
// initiate a be reindex here. This could have been from first run checking
// the versions, or it could just be from the cli where an admin needs to do an
// indexing.
@ -2046,7 +2050,7 @@ impl<'a> QueryServerWriteTransaction<'a> {
}
pub(crate) fn upgrade_reindex(
&self,
&mut self,
audit: &mut AuditScope,
v: i64,
) -> Result<(), OperationError> {
@ -2551,7 +2555,7 @@ mod tests {
assert!(server_txn.commit(audit).is_ok());
// New txn, push the cid forward.
let server_txn = server.write(time_p2);
let mut server_txn = server.write(time_p2);
// Now purge
assert!(server_txn.purge_tombstones(audit).is_ok());
@ -2680,7 +2684,7 @@ mod tests {
assert!(server_txn.commit(audit).is_ok());
// Now, establish enough time for the recycled items to be purged.
let server_txn = server.write(time_p2);
let mut server_txn = server.write(time_p2);
// purge to tombstone, now that time has passed.
assert!(server_txn.purge_recycled(audit).is_ok());
@ -3201,7 +3205,7 @@ mod tests {
}
fn check_entry_has_mo(
qs: &QueryServerWriteTransaction,
qs: &mut QueryServerWriteTransaction,
audit: &mut AuditScope,
name: &str,
mo: &str,
@ -3289,7 +3293,7 @@ mod tests {
assert!(server_txn.revive_recycled(audit, &rev1).is_ok());
// check u1 contains MO ->
assert!(check_entry_has_mo(
&server_txn,
&mut server_txn,
audit,
"u1",
"cca2bbfc-5b43-43f3-be9e-f5b03b3defec"
@ -3304,13 +3308,13 @@ mod tests {
};
assert!(server_txn.revive_recycled(audit, &rev2).is_ok());
assert!(check_entry_has_mo(
&server_txn,
&mut server_txn,
audit,
"u2",
"e44cf9cd-9941-44cb-a02f-307b6e15ac54"
));
assert!(check_entry_has_mo(
&server_txn,
&mut server_txn,
audit,
"u2",
"d3132e6e-18ce-4b87-bee1-1d25e4bfe96d"
@ -3329,7 +3333,7 @@ mod tests {
assert!(server_txn.revive_recycled(audit, &rev3).is_ok());
assert!(
check_entry_has_mo(
&server_txn,
&mut server_txn,
audit,
"u3",
"36048117-e479-45ed-aeb5-611e8d83d5b1"
@ -3346,7 +3350,7 @@ mod tests {
assert!(server_txn.revive_recycled(audit, &rev4a).is_ok());
assert!(
check_entry_has_mo(
&server_txn,
&mut server_txn,
audit,
"u4",
"d5c59ac6-c533-4b00-989f-d0e183f07bab"
@ -3363,7 +3367,7 @@ mod tests {
assert!(server_txn.revive_recycled(audit, &rev4b).is_ok());
assert!(
check_entry_has_mo(
&server_txn,
&mut server_txn,
audit,
"u4",
"d5c59ac6-c533-4b00-989f-d0e183f07bab"

View file

@ -20,7 +20,7 @@ lazy_static! {
}
#[allow(non_camel_case_types)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, Hash)]
pub enum IndexType {
EQUALITY,
PRESENCE,