212 262 db idl fixes (#269)

Fixes #212 and Fixes #262. This is largely a performance improvement and security improvement to how the databases are handled. The major cases are create/modify/reindex performance,
This commit is contained in:
Firstyear 2020-06-19 10:00:54 +10:00 committed by GitHub
parent fec28e03e2
commit 0b15477ef4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 431 additions and 186 deletions

89
Cargo.lock generated
View file

@ -350,9 +350,9 @@ dependencies = [
[[package]]
name = "adler32"
version = "1.0.4"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"
checksum = "567b077b825e468cc974f0020d4082ee6e03132512f207ef1a02fd5d00d1f32d"
[[package]]
name = "aho-corasick"
@ -435,13 +435,14 @@ dependencies = [
[[package]]
name = "backtrace"
version = "0.3.48"
version = "0.3.49"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0df2f85c8a2abbe3b7d7e748052fdd9b76a0458fdeb16ad4223f5eca78c7c130"
checksum = "05100821de9e028f12ae3d189176b41ee198341eb8f369956407fea2f5cc666c"
dependencies = [
"addr2line",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
@ -476,9 +477,9 @@ checksum = "b41b7ea54a0c9d92199de89e20e58d49f02f8e699814ef3fdf266f6f748d15c7"
[[package]]
name = "base64"
version = "0.12.1"
version = "0.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53d1ccbaf7d9ec9537465a97bf19edc1a4e158ecb49fc16178202238c569cc42"
checksum = "e223af0dc48c96d4f8342ec01a4974f139df863896b316681efd36742f22cc67"
[[package]]
name = "bit-set"
@ -647,9 +648,9 @@ dependencies = [
[[package]]
name = "concread"
version = "0.1.14"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ca3d5adc408121d96cb5e3ca77656ca0b6e96429c244086d300ca9ed6a2fd1d"
checksum = "39051fb0b539c35c50dfaaa9e703a05d185ea15c29449392ca929370be9ac4fb"
dependencies = [
"crossbeam",
"crossbeam-epoch",
@ -922,9 +923,9 @@ dependencies = [
[[package]]
name = "derive_more"
version = "0.99.7"
version = "0.99.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2127768764f1556535c01b5326ef94bd60ff08dcfbdc544d53e69ed155610f5d"
checksum = "bc655351f820d774679da6cdc23355a93de496867d8203496675162e17b1d671"
dependencies = [
"proc-macro2",
"quote",
@ -969,9 +970,9 @@ checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0"
[[package]]
name = "dtoa"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4358a9e11b9a09cf52383b451b49a169e8d797b68aa02301ff586d70d9661ea3"
checksum = "134951f4028bdadb9b84baf4232681efbf277da25144b9b0ad65df75946c422b"
[[package]]
name = "either"
@ -1469,9 +1470,9 @@ dependencies = [
[[package]]
name = "itoa"
version = "0.4.5"
version = "0.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8b7a7c0c47db5545ed3fef7468ee7bb5b74691498139e4b3f6a20685dc6dd8e"
checksum = "dc6f3ad7b9d11a0c00842ff8de1b60ee58661048eb8049ed33c73594f359d7e6"
[[package]]
name = "js-sys"
@ -1491,7 +1492,7 @@ dependencies = [
"actix-rt",
"actix-session",
"actix-web",
"base64 0.12.1",
"base64 0.12.2",
"cargo-husky",
"chrono",
"concread",
@ -1984,9 +1985,9 @@ dependencies = [
[[package]]
name = "object"
version = "0.19.0"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cbca9424c482ee628fa549d9c812e2cd22f1180b9222c9200fdfa6eb31aecb2"
checksum = "1ab52be62400ca80aa00285d25253d7f7c437b7375c4de678f5405d3afe82ca5"
[[package]]
name = "once_cell"
@ -2079,9 +2080,9 @@ dependencies = [
[[package]]
name = "paste"
version = "0.1.16"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d508492eeb1e5c38ee696371bf7b9fc33c83d46a7d451606b96458fbbbdc2dec"
checksum = "026c63fe245362be0322bfec5a9656d458d13f9cfb1785d1b38458b9968e8080"
dependencies = [
"paste-impl",
"proc-macro-hack",
@ -2089,14 +2090,11 @@ dependencies = [
[[package]]
name = "paste-impl"
version = "0.1.16"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84f328a6a63192b333fce5fbb4be79db6758a4d518dfac6d54412f1492f72d32"
checksum = "7b9281a268ec213237dcd2aa3c3d0f46681b04ced37c1616fd36567a9e6954b0"
dependencies = [
"proc-macro-hack",
"proc-macro2",
"quote",
"syn",
]
[[package]]
@ -2306,10 +2304,11 @@ dependencies = [
[[package]]
name = "rayon"
version = "1.3.0"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db6ce3297f9c85e16621bb8cca38a06779ffc31bb8184e1be4bed2be4678a098"
checksum = "62f02856753d04e03e26929f820d0a0a337ebe71f849801eea335d464b349080"
dependencies = [
"autocfg",
"crossbeam-deque",
"either",
"rayon-core",
@ -2317,9 +2316,9 @@ dependencies = [
[[package]]
name = "rayon-core"
version = "1.7.0"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08a89b46efaf957e52b18062fb2f4660f8b8a4dde1807ca002690868ef2c85a9"
checksum = "e92e15d89083484e11353891f1af602cc661426deb9564c298b270c726973280"
dependencies = [
"crossbeam-deque",
"crossbeam-queue",
@ -2387,7 +2386,7 @@ version = "0.10.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b82c9238b305f26f53443e3a4bc8528d64b8d0bee408ec949eb7bf5635ec680"
dependencies = [
"base64 0.12.1",
"base64 0.12.2",
"bytes",
"cookie 0.12.0",
"cookie_store",
@ -2431,9 +2430,9 @@ dependencies = [
[[package]]
name = "ring"
version = "0.16.14"
version = "0.16.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06b3fefa4f12272808f809a0af618501fdaba41a58963c5fb72238ab0be09603"
checksum = "952cd6b98c85bbc30efa1ba5783b8abf12fec8b3287ffa52605b9432313e34e4"
dependencies = [
"cc",
"libc",
@ -2781,9 +2780,9 @@ checksum = "6446ced80d6c486436db5c078dde11a9f73d42b57fb273121e160b84f63d894c"
[[package]]
name = "structopt"
version = "0.3.14"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "863246aaf5ddd0d6928dfeb1a9ca65f505599e4e1b399935ef7e75107516b4ef"
checksum = "de2f5e239ee807089b62adce73e48c625e0ed80df02c7ab3f068f5db5281065c"
dependencies = [
"clap",
"lazy_static",
@ -2792,9 +2791,9 @@ dependencies = [
[[package]]
name = "structopt-derive"
version = "0.4.7"
version = "0.4.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d239ca4b13aee7a2142e6795cbd69e457665ff8037aed33b3effdc430d2f927a"
checksum = "510413f9de616762a4fbeab62509bf15c729603b72d7cd71280fbca431b1c118"
dependencies = [
"heck",
"proc-macro-error",
@ -2871,18 +2870,18 @@ dependencies = [
[[package]]
name = "thiserror"
version = "1.0.19"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b13f926965ad00595dd129fa12823b04bbf866e9085ab0a5f2b05b850fbfc344"
checksum = "7dfdd070ccd8ccb78f4ad66bf1982dc37f620ef696c6b5028fe2ed83dd3d0d08"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.19"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "893582086c2f98cde18f906265a65b5030a074b1046c674ae898be6519a7f479"
checksum = "bd80fc12f73063ac132ac92aceea36734f04a1d93c1240c6944e23a3b8841793"
dependencies = [
"proc-macro2",
"quote",
@ -2965,6 +2964,12 @@ dependencies = [
"serde_json",
]
[[package]]
name = "tinyvec"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "53953d2d3a5ad81d9f844a32f14ebb121f50b650cd59d0ee2a07cf13c617efed"
[[package]]
name = "tokio"
version = "0.2.21"
@ -3134,11 +3139,11 @@ dependencies = [
[[package]]
name = "unicode-normalization"
version = "0.1.12"
version = "0.1.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4"
checksum = "6fb19cf769fa8c6a80a162df694621ebeb4dafb606470b2b2fce0be40a98a977"
dependencies = [
"smallvec",
"tinyvec",
]
[[package]]

View file

@ -1,4 +1,10 @@
// cargo install cargo-audit
// cargo install carga-outdated
* cargo audit
* cargo outdated
* bump all cargo.toml versions
find kani* -name Cargo.toml -exec cat '{}' \; | grep -e '^version ='

View file

@ -74,6 +74,7 @@ pub enum OperationError {
InvalidAttributeName(String),
InvalidAttribute(String),
InvalidDBState,
InvalidCacheState,
InvalidValueState,
InvalidEntryID,
InvalidRequestState,

View file

@ -1,4 +1,5 @@
use kanidm_proto::v1::{UnixGroupToken, UnixUserToken};
use libc::umask;
use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::NO_PARAMS;
@ -24,7 +25,9 @@ pub struct DbTxn<'a> {
impl Db {
pub fn new(path: &str) -> Result<Self, ()> {
let before = unsafe { umask(0o0027) };
let manager = SqliteConnectionManager::file(path);
let _ = unsafe { umask(before) };
// We only build a single thread. If we need more than one, we'll
// need to re-do this to account for path = "" for debug.
let builder1 = Pool::builder().max_size(1);
@ -71,6 +74,14 @@ impl<'a> DbTxn<'a> {
}
pub fn migrate(&self) -> Result<(), ()> {
self.conn.set_prepared_statement_cache_capacity(16);
self.conn
.prepare_cached("PRAGMA journal_mode=WAL;")
.and_then(|mut wal_stmt| wal_stmt.query(NO_PARAMS).map(|_| ()))
.map_err(|e| {
error!("sqlite account_t create error -> {:?}", e);
})?;
// Setup two tables - one for accounts, one for groups.
// correctly index the columns.
// Optional pw hash field
@ -174,7 +185,7 @@ impl<'a> DbTxn<'a> {
fn get_account_data_name(&self, account_id: &str) -> Result<Vec<(Vec<u8>, i64)>, ()> {
let mut stmt = self.conn
.prepare(
.prepare_cached(
"SELECT token, expiry FROM account_t WHERE uuid = :account_id OR name = :account_id OR spn = :account_id"
)
.map_err(|e| {
@ -200,7 +211,7 @@ impl<'a> DbTxn<'a> {
fn get_account_data_gid(&self, gid: u32) -> Result<Vec<(Vec<u8>, i64)>, ()> {
let mut stmt = self
.conn
.prepare("SELECT token, expiry FROM account_t WHERE gidnumber = :gid")
.prepare_cached("SELECT token, expiry FROM account_t WHERE gidnumber = :gid")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
})?;
@ -252,7 +263,7 @@ impl<'a> DbTxn<'a> {
pub fn get_accounts(&self) -> Result<Vec<UnixUserToken>, ()> {
let mut stmt = self
.conn
.prepare("SELECT token FROM account_t")
.prepare_cached("SELECT token FROM account_t")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
})?;
@ -326,7 +337,7 @@ impl<'a> DbTxn<'a> {
if updated == 0 {
let mut stmt = self.conn
.prepare("INSERT INTO account_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry) ON CONFLICT(uuid) DO UPDATE SET name=excluded.name, spn=excluded.name, gidnumber=excluded.gidnumber, token=excluded.token, expiry=excluded.expiry")
.prepare_cached("INSERT INTO account_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry) ON CONFLICT(uuid) DO UPDATE SET name=excluded.name, spn=excluded.name, gidnumber=excluded.gidnumber, token=excluded.token, expiry=excluded.expiry")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
})?;
@ -352,7 +363,7 @@ impl<'a> DbTxn<'a> {
// First remove everything that already exists:
let mut stmt = self
.conn
.prepare("DELETE FROM memberof_t WHERE a_uuid = :a_uuid")
.prepare_cached("DELETE FROM memberof_t WHERE a_uuid = :a_uuid")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
})?;
@ -366,7 +377,7 @@ impl<'a> DbTxn<'a> {
let mut stmt = self
.conn
.prepare("INSERT INTO memberof_t (a_uuid, g_uuid) VALUES (:a_uuid, :g_uuid)")
.prepare_cached("INSERT INTO memberof_t (a_uuid, g_uuid) VALUES (:a_uuid, :g_uuid)")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
})?;
@ -412,7 +423,9 @@ impl<'a> DbTxn<'a> {
pub fn check_account_password(&self, a_uuid: &str, cred: &str) -> Result<bool, ()> {
let mut stmt = self
.conn
.prepare("SELECT password FROM account_t WHERE uuid = :a_uuid AND password IS NOT NULL")
.prepare_cached(
"SELECT password FROM account_t WHERE uuid = :a_uuid AND password IS NOT NULL",
)
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
})?;
@ -459,7 +472,7 @@ impl<'a> DbTxn<'a> {
fn get_group_data_name(&self, grp_id: &str) -> Result<Vec<(Vec<u8>, i64)>, ()> {
let mut stmt = self.conn
.prepare(
.prepare_cached(
"SELECT token, expiry FROM group_t WHERE uuid = :grp_id OR name = :grp_id OR spn = :grp_id"
)
.map_err(|e| {
@ -485,7 +498,7 @@ impl<'a> DbTxn<'a> {
fn get_group_data_gid(&self, gid: u32) -> Result<Vec<(Vec<u8>, i64)>, ()> {
let mut stmt = self
.conn
.prepare("SELECT token, expiry FROM group_t WHERE gidnumber = :gid")
.prepare_cached("SELECT token, expiry FROM group_t WHERE gidnumber = :gid")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
})?;
@ -537,7 +550,7 @@ impl<'a> DbTxn<'a> {
pub fn get_group_members(&self, g_uuid: &str) -> Result<Vec<UnixUserToken>, ()> {
let mut stmt = self
.conn
.prepare("SELECT account_t.token FROM (account_t, memberof_t) WHERE account_t.uuid = memberof_t.a_uuid AND memberof_t.g_uuid = :g_uuid")
.prepare_cached("SELECT account_t.token FROM (account_t, memberof_t) WHERE account_t.uuid = memberof_t.a_uuid AND memberof_t.g_uuid = :g_uuid")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
})?;
@ -571,7 +584,7 @@ impl<'a> DbTxn<'a> {
pub fn get_groups(&self) -> Result<Vec<UnixGroupToken>, ()> {
let mut stmt = self
.conn
.prepare("SELECT token FROM group_t")
.prepare_cached("SELECT token FROM group_t")
.map_err(|e| {
error!("sqlite select prepare failure -> {:?}", e);
})?;
@ -611,7 +624,7 @@ impl<'a> DbTxn<'a> {
})?;
let mut stmt = self.conn
.prepare("INSERT OR REPLACE INTO group_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry)")
.prepare_cached("INSERT OR REPLACE INTO group_t (uuid, name, spn, gidnumber, token, expiry) VALUES (:uuid, :name, :spn, :gidnumber, :token, :expiry)")
.map_err(|e| {
error!("sqlite prepare error -> {:?}", e);
})?;

View file

@ -29,8 +29,6 @@ path = "src/server/main.rs"
kanidm_proto = { path = "../kanidm_proto", version = "0.1" }
actix = "0.9"
# actix = { version = "0.9", path = "../../actix" }
actix-rt = "1.1"
actix-web = { version = "2.0", features = ["openssl"] }
actix-session = "0.3"

View file

@ -4,4 +4,4 @@ db_path = "/tmp/kanidm.db"
tls_ca = "../insecure/ca.pem"
tls_cert = "../insecure/cert.pem"
tls_key = "../insecure/key.pem"
# log_level = "
log_level = "perffull"

View file

@ -98,11 +98,9 @@ impl fmt::Display for LogTag {
macro_rules! lqueue {
($au:expr, $tag:expr, $($arg:tt)*) => ({
use crate::audit::LogTag;
/*
if cfg!(test) {
println!($($arg)*)
}
*/
if ($au.level & $tag as u32) == $tag as u32 {
use std::fmt;
$au.log_event(

View file

@ -11,6 +11,7 @@ use concread::cowcell::*;
use idlset::IDLBitRange;
use kanidm_proto::v1::{ConsistencyError, OperationError};
use std::collections::BTreeSet;
use std::ops::DerefMut;
use std::time::Duration;
use uuid::Uuid;
@ -66,6 +67,8 @@ pub struct IdlArcSqlite {
idl_cache: Arc<IdlCacheKey, Box<IDLBitRange>>,
name_cache: Arc<NameCacheKey, NameCacheValue>,
op_ts_max: CowCell<Option<Duration>>,
allids: CowCell<IDLBitRange>,
maxid: CowCell<u64>,
}
pub struct IdlArcSqliteReadTransaction<'a> {
@ -73,6 +76,7 @@ pub struct IdlArcSqliteReadTransaction<'a> {
entry_cache: ArcReadTxn<'a, u64, Box<Entry<EntrySealed, EntryCommitted>>>,
idl_cache: ArcReadTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
name_cache: ArcReadTxn<'a, NameCacheKey, NameCacheValue>,
allids: CowCellReadTxn<IDLBitRange>,
}
pub struct IdlArcSqliteWriteTransaction<'a> {
@ -81,6 +85,8 @@ pub struct IdlArcSqliteWriteTransaction<'a> {
idl_cache: ArcWriteTxn<'a, IdlCacheKey, Box<IDLBitRange>>,
name_cache: ArcWriteTxn<'a, NameCacheKey, NameCacheValue>,
op_ts_max: CowCellWriteTxn<'a, Option<Duration>>,
allids: CowCellWriteTxn<'a, IDLBitRange>,
maxid: CowCellWriteTxn<'a, u64>,
}
macro_rules! get_identry {
@ -90,9 +96,9 @@ macro_rules! get_identry {
$idl:expr
) => {{
lperf_trace_segment!($au, "be::idl_arc_sqlite::get_identry", || {
let mut result: Vec<Entry<_, _>> = Vec::new();
match $idl {
IDL::Partial(idli) | IDL::PartialThreshold(idli) | IDL::Indexed(idli) => {
let mut result: Vec<Entry<_, _>> = Vec::new();
let mut nidl = IDLBitRange::new();
idli.into_iter().for_each(|i| {
@ -107,21 +113,37 @@ macro_rules! get_identry {
if !nidl.is_empty() {
// Now, get anything from nidl that is needed.
let mut db_result = $self.db.get_identry($au, &IDL::Partial(nidl))?;
// Clone everything from db_result into the cache.
db_result.iter().for_each(|e| {
$self.entry_cache.insert(e.get_id(), Box::new(e.clone()));
});
// Merge the two vecs
result.append(&mut db_result);
}
// Return
Ok(result)
}
IDL::ALLIDS => $self.db.get_identry($au, $idl),
}
IDL::ALLIDS => {
// VERY similar to above, but we skip adding the entries to the cache
// on miss to prevent scan/invalidation attacks.
let idli = (*$self.allids).clone();
let mut nidl = IDLBitRange::new();
(&idli)
.into_iter()
.for_each(|i| match $self.entry_cache.get(&i) {
Some(eref) => result.push(eref.as_ref().clone()),
None => unsafe { nidl.push_id(i) },
});
if !nidl.is_empty() {
// Now, get anything from nidl that is needed.
let mut db_result = $self.db.get_identry($au, &IDL::Partial(nidl))?;
// Merge the two vecs
result.append(&mut db_result);
}
}
};
// Return
Ok(result)
})
}};
}
@ -466,45 +488,116 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit", || {
let IdlArcSqliteWriteTransaction {
db,
entry_cache,
idl_cache,
name_cache,
mut entry_cache,
mut idl_cache,
mut name_cache,
op_ts_max,
allids,
maxid,
} = self;
// Write any dirty items to the disk.
lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit<entry>", || {
entry_cache
.iter_mut_mark_clean()
.try_for_each(|(k, v)| match v {
Some(e) => db.write_identry(audit, e),
None => db.delete_identry(audit, *k),
})
})
.map_err(|e| {
ladmin_error!(audit, "Failed to sync entry cache to sqlite {:?}", e);
e
})?;
lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit<idl>", || {
idl_cache.iter_mut_mark_clean().try_for_each(|(k, v)| {
match v {
Some(idl) => db.write_idl(audit, k.a.as_str(), &k.i, k.k.as_str(), idl),
None => {
// Due to how we remove items, we always write an empty idl
// to the cache, so this should never be none.
unreachable!();
}
}
})
})
.map_err(|e| {
ladmin_error!(audit, "Failed to sync idl cache to sqlite {:?}", e);
e
})?;
lperf_trace_segment!(audit, "be::idl_arc_sqlite::commit<names>", || {
name_cache
.iter_mut_mark_clean()
.try_for_each(|(k, v)| match (k, v) {
(NameCacheKey::Name2Uuid(k), Some(NameCacheValue::U(v))) => {
db.write_name2uuid_add(audit, k, v)
}
(NameCacheKey::Name2Uuid(k), None) => db.write_name2uuid_rem(audit, k),
(NameCacheKey::Uuid2Spn(uuid), Some(NameCacheValue::S(v))) => {
db.write_uuid2spn(audit, uuid, Some(v))
}
(NameCacheKey::Uuid2Spn(uuid), None) => {
db.write_uuid2spn(audit, uuid, None)
}
(NameCacheKey::Uuid2Rdn(uuid), Some(NameCacheValue::R(v))) => {
db.write_uuid2rdn(audit, uuid, Some(v))
}
(NameCacheKey::Uuid2Rdn(uuid), None) => {
db.write_uuid2rdn(audit, uuid, None)
}
_ => Err(OperationError::InvalidCacheState),
})
})
.map_err(|e| {
ladmin_error!(audit, "Failed to sync name cache to sqlite {:?}", e);
e
})?;
// Undo the caches in the reverse order.
db.commit(audit).and_then(|()| {
op_ts_max.commit();
name_cache.commit();
idl_cache.commit();
entry_cache.commit();
allids.commit();
maxid.commit();
Ok(())
})
})
}
pub fn get_id2entry_max_id(&self) -> Result<u64, OperationError> {
// TODO: We could cache this too, and have this via the setup call
// to get the init value, using the ArcCell.
self.db.get_id2entry_max_id()
Ok(*self.maxid)
}
pub fn set_id2entry_max_id(&mut self, mid: u64) {
assert!(mid > *self.maxid);
*self.maxid = mid;
}
pub fn write_identries<'b, I>(
&'b mut self,
au: &mut AuditScope,
entries: I,
mut entries: I,
) -> Result<(), OperationError>
where
I: Iterator<Item = &'b Entry<EntrySealed, EntryCommitted>>,
{
lperf_trace_segment!(au, "be::idl_arc_sqlite::write_identries", || {
// Danger! We know that the entry cache is valid to manipulate here
// but rust doesn't know that so it prevents the mut/immut borrow.
let e_cache = unsafe { &mut *(&mut self.entry_cache as *mut ArcWriteTxn<_, _>) };
let m_entries = entries.map(|e| {
e_cache.insert(e.get_id(), Box::new(e.clone()));
e
});
self.db.write_identries(au, m_entries)
entries.try_for_each(|e| {
ltrace!(au, "Inserting {:?} to cache", e.get_id());
if e.get_id() == 0 {
Err(OperationError::InvalidEntryID)
} else {
(*self.allids).insert_id(e.get_id());
self.entry_cache
.insert_dirty(e.get_id(), Box::new(e.clone()));
Ok(())
}
})
})
}
@ -522,19 +615,25 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
self.db.write_identries_raw(au, entries)
}
pub fn delete_identry<I>(&mut self, au: &mut AuditScope, idl: I) -> Result<(), OperationError>
pub fn delete_identry<I>(
&mut self,
au: &mut AuditScope,
mut idl: I,
) -> Result<(), OperationError>
where
I: Iterator<Item = u64>,
{
lperf_trace_segment!(au, "be::idl_arc_sqlite::delete_identry", || {
// Danger! We know that the entry cache is valid to manipulate here
// but rust doesn't know that so it prevents the mut/immut borrow.
let e_cache = unsafe { &mut *(&mut self.entry_cache as *mut ArcWriteTxn<_, _>) };
let m_idl = idl.map(|i| {
e_cache.remove(i);
i
});
self.db.delete_identry(au, m_idl)
idl.try_for_each(|i| {
ltrace!(au, "Removing {:?} from cache", i);
if i == 0 {
Err(OperationError::InvalidEntryID)
} else {
(*self.allids).remove_id(i);
self.entry_cache.remove_dirty(i);
Ok(())
}
})
})
}
@ -557,11 +656,13 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
// db lookup on this idl.
if idl.is_empty() {
self.idl_cache
.insert(cache_key, Box::new(IDLBitRange::new()));
.insert_dirty(cache_key, Box::new(IDLBitRange::new()));
} else {
self.idl_cache.insert(cache_key, Box::new(idl.clone()));
self.idl_cache
.insert_dirty(cache_key, Box::new(idl.clone()));
}
self.db.write_idl(audit, attr, itype, idx_key, idl)
// self.db.write_idl(audit, attr, itype, idx_key, idl)
Ok(())
})
}
@ -576,16 +677,21 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
add: BTreeSet<String>,
) -> Result<(), OperationError> {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || {
/*
self.db
.write_name2uuid_add(audit, uuid, &add)
.and_then(|_| {
add.into_iter().for_each(|k| {
let cache_key = NameCacheKey::Name2Uuid(k);
let cache_value = NameCacheValue::U(*uuid);
self.name_cache.insert(cache_key, cache_value)
});
Ok(())
*/
add.into_iter().for_each(|k| {
let cache_key = NameCacheKey::Name2Uuid(k);
let cache_value = NameCacheValue::U(*uuid);
self.name_cache.insert_dirty(cache_key, cache_value)
});
Ok(())
/*
})
*/
})
}
@ -595,13 +701,13 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
rem: BTreeSet<String>,
) -> Result<(), OperationError> {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_name2uuid_add", || {
self.db.write_name2uuid_rem(audit, &rem).and_then(|_| {
rem.into_iter().for_each(|k| {
let cache_key = NameCacheKey::Name2Uuid(k);
self.name_cache.remove(cache_key)
});
Ok(())
})
// self.db.write_name2uuid_rem(audit, &rem).and_then(|_| {
rem.into_iter().for_each(|k| {
let cache_key = NameCacheKey::Name2Uuid(k);
self.name_cache.remove_dirty(cache_key)
});
Ok(())
// })
})
}
@ -616,18 +722,22 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
k: Option<Value>,
) -> Result<(), OperationError> {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_uuid2spn", || {
/*
self.db
.write_uuid2spn(audit, uuid, k.as_ref())
.and_then(|_| {
let cache_key = NameCacheKey::Uuid2Spn(*uuid);
match k {
Some(v) => self
.name_cache
.insert(cache_key, NameCacheValue::S(Box::new(v))),
None => self.name_cache.remove(cache_key),
}
Ok(())
*/
let cache_key = NameCacheKey::Uuid2Spn(*uuid);
match k {
Some(v) => self
.name_cache
.insert_dirty(cache_key, NameCacheValue::S(Box::new(v))),
None => self.name_cache.remove_dirty(cache_key),
}
Ok(())
/*
})
*/
})
}
@ -642,16 +752,22 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
k: Option<String>,
) -> Result<(), OperationError> {
lperf_trace_segment!(audit, "be::idl_arc_sqlite::write_uuid2rdn", || {
/*
self.db
.write_uuid2rdn(audit, uuid, k.as_ref())
.and_then(|_| {
let cache_key = NameCacheKey::Uuid2Rdn(*uuid);
match k {
Some(s) => self.name_cache.insert(cache_key, NameCacheValue::R(s)),
None => self.name_cache.remove(cache_key),
}
Ok(())
*/
let cache_key = NameCacheKey::Uuid2Rdn(*uuid);
match k {
Some(s) => self
.name_cache
.insert_dirty(cache_key, NameCacheValue::R(s)),
None => self.name_cache.remove_dirty(cache_key),
}
Ok(())
/*
})
*/
})
}
@ -712,8 +828,17 @@ impl<'a> IdlArcSqliteWriteTransaction<'a> {
self.db.set_db_index_version(v)
}
pub fn setup(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db.setup(audit)
pub fn setup(&mut self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.db
.setup(audit)
.and_then(|()| self.db.get_allids(audit))
.map(|mut ids| {
std::mem::swap(self.allids.deref_mut(), &mut ids);
})
.and_then(|()| self.db.get_id2entry_max_id())
.map(|mid| {
*self.maxid = mid;
})
}
}
@ -742,6 +867,10 @@ impl IdlArcSqlite {
DEFAULT_CACHE_WMISS,
);
let allids = CowCell::new(IDLBitRange::new());
let maxid = CowCell::new(0);
let op_ts_max = CowCell::new(None);
Ok(IdlArcSqlite {
@ -750,6 +879,8 @@ impl IdlArcSqlite {
idl_cache,
name_cache,
op_ts_max,
allids,
maxid,
})
}
@ -758,12 +889,15 @@ impl IdlArcSqlite {
let entry_cache_read = self.entry_cache.read();
let idl_cache_read = self.idl_cache.read();
let name_cache_read = self.name_cache.read();
let allids_read = self.allids.read();
let db_read = self.db.read();
IdlArcSqliteReadTransaction {
db: db_read,
entry_cache: entry_cache_read,
idl_cache: idl_cache_read,
name_cache: name_cache_read,
allids: allids_read,
}
}
@ -773,6 +907,8 @@ impl IdlArcSqlite {
let idl_cache_write = self.idl_cache.write();
let name_cache_write = self.name_cache.write();
let op_ts_max_write = self.op_ts_max.write();
let allids_write = self.allids.write();
let maxid_write = self.maxid.write();
let db_write = self.db.write();
IdlArcSqliteWriteTransaction {
db: db_write,
@ -780,6 +916,8 @@ impl IdlArcSqlite {
idl_cache: idl_cache_write,
name_cache: name_cache_write,
op_ts_max: op_ts_max_write,
allids: allids_write,
maxid: maxid_write,
}
}

View file

@ -8,7 +8,6 @@ use r2d2::Pool;
use r2d2_sqlite::SqliteConnectionManager;
use rusqlite::OptionalExtension;
use rusqlite::NO_PARAMS;
use std::collections::BTreeSet;
use std::convert::{TryFrom, TryInto};
use std::time::Duration;
use uuid::Uuid;
@ -17,6 +16,8 @@ use uuid::Uuid;
const DBV_ID2ENTRY: &str = "id2entry";
const DBV_INDEXV: &str = "indexv";
// TODO: Needs to change over time as number of indexes grows?
const PREPARE_STMT_CACHE: usize = 256;
#[derive(Debug)]
pub struct IdSqliteEntry {
@ -99,7 +100,7 @@ pub trait IdlSqliteTransaction {
IDL::ALLIDS => {
let mut stmt = self
.get_conn()
.prepare("SELECT id, data FROM id2entry")
.prepare_cached("SELECT id, data FROM id2entry")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -131,7 +132,7 @@ pub trait IdlSqliteTransaction {
IDL::Partial(idli) | IDL::PartialThreshold(idli) | IDL::Indexed(idli) => {
let mut stmt = self
.get_conn()
.prepare("SELECT id, data FROM id2entry WHERE id = :idl")
.prepare_cached("SELECT id, data FROM id2entry WHERE id = :idl")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -191,7 +192,7 @@ pub trait IdlSqliteTransaction {
let tname = format!("idx_{}_{}", itype.as_idx_str(), attr);
let mut stmt = self
.get_conn()
.prepare("SELECT COUNT(name) from sqlite_master where name = :tname")
.prepare_cached("SELECT COUNT(name) from sqlite_master where name = :tname")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -229,10 +230,13 @@ pub trait IdlSqliteTransaction {
itype.as_idx_str(),
attr
);
let mut stmt = self.get_conn().prepare(query.as_str()).map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let mut stmt = self
.get_conn()
.prepare_cached(query.as_str())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let idl_raw: Option<Vec<u8>> = stmt
.query_row_named(&[(":idx_key", &idx_key)], |row| row.get(0))
// We don't mind if it doesn't exist
@ -264,7 +268,7 @@ pub trait IdlSqliteTransaction {
// The table exists - lets now get the actual index itself.
let mut stmt = self
.get_conn()
.prepare("SELECT uuid FROM idx_name2uuid WHERE name = :name")
.prepare_cached("SELECT uuid FROM idx_name2uuid WHERE name = :name")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -295,7 +299,7 @@ pub trait IdlSqliteTransaction {
// The table exists - lets now get the actual index itself.
let mut stmt = self
.get_conn()
.prepare("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid")
.prepare_cached("SELECT spn FROM idx_uuid2spn WHERE uuid = :uuid")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -336,7 +340,7 @@ pub trait IdlSqliteTransaction {
// The table exists - lets now get the actual index itself.
let mut stmt = self
.get_conn()
.prepare("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid")
.prepare_cached("SELECT rdn FROM idx_uuid2rdn WHERE uuid = :uuid")
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -411,7 +415,7 @@ pub trait IdlSqliteTransaction {
// This allow is critical as it resolves a life time issue in stmt.
#[allow(clippy::let_and_return)]
fn verify(&self) -> Vec<Result<(), ConsistencyError>> {
let mut stmt = match self.get_conn().prepare("PRAGMA integrity_check;") {
let mut stmt = match self.get_conn().prepare_cached("PRAGMA integrity_check;") {
Ok(r) => r,
Err(_) => return vec![Err(ConsistencyError::SqliteIntegrityFailure)],
};
@ -525,7 +529,7 @@ impl IdlSqliteWriteTransaction {
pub fn get_id2entry_max_id(&self) -> Result<u64, OperationError> {
let mut stmt = self
.conn
.prepare("SELECT MAX(id) as id_max FROM id2entry")
.prepare_cached("SELECT MAX(id) as id_max FROM id2entry")
.map_err(|_| OperationError::SQLiteError)?;
// This exists checks for if any rows WERE returned
// that way we know to shortcut or not.
@ -547,6 +551,7 @@ impl IdlSqliteWriteTransaction {
}
}
/*
pub fn write_identries<'b, I>(
&'b self,
au: &mut AuditScope,
@ -571,6 +576,23 @@ impl IdlSqliteWriteTransaction {
self.write_identries_raw(au, raw_entries?.into_iter())
})
}
*/
pub fn write_identry(
&self,
au: &mut AuditScope,
entry: &Entry<EntrySealed, EntryCommitted>,
) -> Result<(), OperationError> {
let dbe = entry.to_dbentry();
let data = serde_cbor::to_vec(&dbe).map_err(|_| OperationError::SerdeCborError)?;
let raw_entries = std::iter::once(IdRawEntry {
id: entry.get_id(),
data,
});
self.write_identries_raw(au, raw_entries)
}
pub fn write_identries_raw<I>(
&self,
@ -582,7 +604,7 @@ impl IdlSqliteWriteTransaction {
{
let mut stmt = self
.conn
.prepare("INSERT OR REPLACE INTO id2entry (id, data) VALUES(:id, :data)")
.prepare_cached("INSERT OR REPLACE INTO id2entry (id, data) VALUES(:id, :data)")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -601,14 +623,15 @@ impl IdlSqliteWriteTransaction {
})
}
pub fn delete_identry<I>(&self, au: &mut AuditScope, mut idl: I) -> Result<(), OperationError>
/*
pub fn delete_identries<I>(&self, au: &mut AuditScope, mut idl: I) -> Result<(), OperationError>
where
I: Iterator<Item = u64>,
{
lperf_trace_segment!(au, "be::idl_sqlite::delete_identry", || {
lperf_trace_segment!(au, "be::idl_sqlite::delete_identries", || {
let mut stmt = self
.conn
.prepare("DELETE FROM id2entry WHERE id = :id")
.prepare_cached("DELETE FROM id2entry WHERE id = :id")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -635,6 +658,37 @@ impl IdlSqliteWriteTransaction {
})
})
}
*/
pub fn delete_identry(&self, au: &mut AuditScope, id: u64) -> Result<(), OperationError> {
// lperf_trace_segment!(au, "be::idl_sqlite::delete_identry", || {
let mut stmt = self
.conn
.prepare_cached("DELETE FROM id2entry WHERE id = :id")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let iid: i64 = id
.try_into()
.map_err(|_| OperationError::InvalidEntryID)
.and_then(|i| {
if i > 0 {
Ok(i)
} else {
Err(OperationError::InvalidEntryID)
}
})?;
debug_assert!(iid > 0);
stmt.execute(&[&iid]).map(|_| ()).map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
// })
}
pub fn write_idl(
&self,
@ -656,7 +710,7 @@ impl IdlSqliteWriteTransaction {
);
self.conn
.prepare(query.as_str())
.prepare_cached(query.as_str())
.and_then(|mut stmt| stmt.execute_named(&[(":key", &idx_key)]))
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
@ -678,7 +732,7 @@ impl IdlSqliteWriteTransaction {
);
self.conn
.prepare(query.as_str())
.prepare_cached(query.as_str())
.and_then(|mut stmt| {
stmt.execute_named(&[(":key", &idx_key), (":idl", &idl_raw)])
})
@ -708,42 +762,36 @@ impl IdlSqliteWriteTransaction {
pub fn write_name2uuid_add(
&self,
audit: &mut AuditScope,
name: &str,
uuid: &Uuid,
add: &BTreeSet<String>,
) -> Result<(), OperationError> {
let uuids = uuid.to_hyphenated_ref().to_string();
add.iter().try_for_each(|k| {
self.conn
.execute_named(
"INSERT OR REPLACE INTO idx_name2uuid (name, uuid) VALUES(:name, :uuid)",
&[(":name", &k), (":uuid", &uuids)],
)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
})
self.conn
.prepare_cached(
"INSERT OR REPLACE INTO idx_name2uuid (name, uuid) VALUES(:name, :uuid)",
)
.and_then(|mut stmt| stmt.execute_named(&[(":name", &name), (":uuid", &uuids)]))
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
}
pub fn write_name2uuid_rem(
&self,
audit: &mut AuditScope,
rem: &BTreeSet<String>,
name: &str,
) -> Result<(), OperationError> {
rem.iter().try_for_each(|k| {
self.conn
.execute_named(
"DELETE FROM idx_name2uuid WHERE name = :name",
&[(":name", &k)],
)
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
})
self.conn
.prepare_cached("DELETE FROM idx_name2uuid WHERE name = :name")
.and_then(|mut stmt| stmt.execute_named(&[(":name", &name)]))
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
}
pub fn create_uuid2spn(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
@ -772,10 +820,10 @@ impl IdlSqliteWriteTransaction {
let data =
serde_cbor::to_vec(&dbv1).map_err(|_e| OperationError::SerdeCborError)?;
self.conn
.execute_named(
.prepare_cached(
"INSERT OR REPLACE INTO idx_uuid2spn (uuid, spn) VALUES(:uuid, :spn)",
&[(":uuid", &uuids), (":spn", &data)],
)
.and_then(|mut stmt| stmt.execute_named(&[(":uuid", &uuids), (":spn", &data)]))
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
@ -784,10 +832,8 @@ impl IdlSqliteWriteTransaction {
}
None => self
.conn
.execute_named(
"DELETE FROM idx_uuid2spn WHERE uuid = :uuid",
&[(":uuid", &uuids)],
)
.prepare_cached("DELETE FROM idx_uuid2spn WHERE uuid = :uuid")
.and_then(|mut stmt| stmt.execute_named(&[(":uuid", &uuids)]))
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
@ -819,10 +865,10 @@ impl IdlSqliteWriteTransaction {
match k {
Some(k) => self
.conn
.execute_named(
.prepare_cached(
"INSERT OR REPLACE INTO idx_uuid2rdn (uuid, rdn) VALUES(:uuid, :rdn)",
&[(":uuid", &uuids), (":rdn", &k)],
)
.and_then(|mut stmt| stmt.execute_named(&[(":uuid", &uuids), (":rdn", &k)]))
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
@ -830,10 +876,8 @@ impl IdlSqliteWriteTransaction {
}),
None => self
.conn
.execute_named(
"DELETE FROM idx_uuid2rdn WHERE uuid = :uuid",
&[(":uuid", &uuids)],
)
.prepare_cached("DELETE FROM idx_uuid2rdn WHERE uuid = :uuid")
.and_then(|mut stmt| stmt.execute_named(&[(":uuid", &uuids)]))
.map(|_| ())
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
@ -871,7 +915,9 @@ impl IdlSqliteWriteTransaction {
pub fn list_idxs(&self, audit: &mut AuditScope) -> Result<Vec<String>, OperationError> {
let mut stmt = self
.get_conn()
.prepare("SELECT name from sqlite_master where type='table' and name LIKE 'idx_%'")
.prepare_cached(
"SELECT name from sqlite_master where type='table' and name LIKE 'idx_%'",
)
.map_err(|e| {
ladmin_error!(audit, "SQLite Error {:?}", e);
OperationError::SQLiteError
@ -899,7 +945,7 @@ impl IdlSqliteWriteTransaction {
idx_table_list.iter().try_for_each(|idx_table| {
ltrace!(audit, "removing idx_table -> {:?}", idx_table);
self.conn
.prepare(format!("DROP TABLE {}", idx_table).as_str())
.prepare_cached(format!("DROP TABLE {}", idx_table).as_str())
.and_then(|mut stmt| stmt.execute(NO_PARAMS).map(|_| ()))
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
@ -1026,13 +1072,44 @@ impl IdlSqliteWriteTransaction {
})
}
pub(crate) fn get_allids(&self, au: &mut AuditScope) -> Result<IDLBitRange, OperationError> {
ltrace!(au, "Building allids...");
let mut stmt = self
.conn
.prepare_cached("SELECT id FROM id2entry")
.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
let res = stmt.query_map(NO_PARAMS, |row| row.get(0)).map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})?;
res.map(|v| {
v.map_err(|e| {
ladmin_error!(au, "SQLite Error {:?}", e);
OperationError::SQLiteError
})
.and_then(|id: i64| {
// Convert the idsqlite to id raw
id.try_into().map_err(|e| {
ladmin_error!(au, "I64 Parse Error {:?}", e);
OperationError::SQLiteError
})
})
})
.collect()
}
pub fn setup(&self, audit: &mut AuditScope) -> Result<(), OperationError> {
self.conn
.set_prepared_statement_cache_capacity(PREPARE_STMT_CACHE);
// Enable WAL mode, which is just faster and better.
//
// We have to use stmt + prepare because execute can't handle
// We have to use stmt + prepare_cached because execute can't handle
// the "wal" row on result when this works!
self.conn
.prepare("PRAGMA journal_mode=WAL;")
.prepare_cached("PRAGMA journal_mode=WAL;")
.and_then(|mut wal_stmt| wal_stmt.query(NO_PARAMS).map(|_| ()))
.map_err(|e| {
ladmin_error!(audit, "sqlite error {:?}", e);
@ -1181,7 +1258,8 @@ impl IdlSqlite {
// a single DB thread, else we cause consistency issues.
builder1.max_size(1)
} else {
builder1.max_size(pool_size)
// Have to add 1 for the write thread.
builder1.max_size(pool_size + 1)
};
// Look at max_size and thread_pool here for perf later
let pool = builder2.build(manager).map_err(|e| {

View file

@ -647,6 +647,8 @@ impl<'a> BackendWriteTransaction<'a> {
self.idlayer.write_identries(au, c_entries.iter())?;
self.idlayer.set_id2entry_max_id(id_max);
// Now update the indexes as required.
for e in c_entries.iter() {
self.entry_index(au, None, Some(e))?
@ -751,6 +753,8 @@ impl<'a> BackendWriteTransaction<'a> {
//
// At the end, we flush those cchange outs in a single run.
// For create this is probably a
// TODO: Can this be improved?
#[allow(clippy::cognitive_complexity)]
fn entry_index(
&mut self,
audit: &mut AuditScope,
@ -1193,7 +1197,7 @@ impl Backend {
// access any parts of
// the indexing subsystem here.
let r = {
let idl_write = be.idlayer.write();
let mut idl_write = be.idlayer.write();
idl_write.setup(audit).and_then(|_| idl_write.commit(audit))
};

View file

@ -72,6 +72,8 @@ impl TryFrom<&Credential> for CredHandler {
}
}
// TODO: Can this be improved?
#[allow(clippy::cognitive_complexity)]
impl CredHandler {
pub fn validate(
&mut self,

View file

@ -42,6 +42,8 @@ impl Plugin for Base {
// contains who is creating them
// the schema of the running instance
// TODO: Can this be improved?
#[allow(clippy::cognitive_complexity)]
fn pre_create_transform(
au: &mut AuditScope,
qs: &mut QueryServerWriteTransaction,