Simplify moar

This commit is contained in:
Silas Brack 2026-03-07 13:23:38 +01:00
parent 7f3ec69cf6
commit 07490efc28
14 changed files with 261 additions and 1061 deletions

216
src/db.rs
View file

@ -1,11 +1,8 @@
use rusqlite::{params, Connection, OpenFlags};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use crate::error::AppError;
// --- Record type ---
#[derive(Debug, Clone)]
pub struct Record {
pub key: String,
@ -13,174 +10,95 @@ pub struct Record {
pub size: Option<i64>,
}
// --- SQLite setup ---
fn apply_pragmas(conn: &Connection) {
conn.execute_batch(
"
PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA busy_timeout = 5000;
PRAGMA temp_store = memory;
PRAGMA cache_size = -64000;
PRAGMA mmap_size = 268435456;
PRAGMA page_size = 4096;
",
"PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA busy_timeout = 5000;
PRAGMA temp_store = memory;
PRAGMA cache_size = -64000;
PRAGMA mmap_size = 268435456;
PRAGMA page_size = 4096;",
)
.expect("failed to set pragmas");
}
fn open_readonly(path: &str) -> Connection {
let conn = Connection::open_with_flags(
path,
OpenFlags::SQLITE_OPEN_READ_ONLY
| OpenFlags::SQLITE_OPEN_NO_MUTEX
| OpenFlags::SQLITE_OPEN_URI,
)
.expect("failed to open read connection");
apply_pragmas(&conn);
conn
fn parse_volumes(s: &str) -> Vec<String> {
serde_json::from_str(s).unwrap_or_default()
}
fn open_readwrite(path: &str) -> Connection {
let conn = Connection::open_with_flags(
path,
OpenFlags::SQLITE_OPEN_READ_WRITE
| OpenFlags::SQLITE_OPEN_CREATE
| OpenFlags::SQLITE_OPEN_NO_MUTEX
| OpenFlags::SQLITE_OPEN_URI,
)
.expect("failed to open write connection");
apply_pragmas(&conn);
conn
fn encode_volumes(v: &[String]) -> String {
serde_json::to_string(v).unwrap()
}
fn create_tables(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE IF NOT EXISTS kv (
key TEXT PRIMARY KEY,
volumes TEXT NOT NULL,
size INTEGER,
created_at INTEGER DEFAULT (unixepoch())
);
",
)
.expect("failed to create tables");
}
fn parse_volumes(volumes_json: &str) -> Vec<String> {
serde_json::from_str(volumes_json).unwrap_or_default()
}
fn encode_volumes(volumes: &[String]) -> String {
serde_json::to_string(volumes).unwrap()
}
// --- ReadPool ---
/// A single SQLite connection behind a mutex, used for both reads and writes.
#[derive(Clone)]
pub struct ReadPool {
conns: Vec<Arc<Mutex<Connection>>>,
next: Arc<AtomicUsize>,
pub struct Db {
conn: Arc<Mutex<Connection>>,
}
impl ReadPool {
pub fn new(path: &str, size: usize) -> Self {
let conns = (0..size)
.map(|_| Arc::new(Mutex::new(open_readonly(path))))
.collect();
Self {
conns,
next: Arc::new(AtomicUsize::new(0)),
}
impl Db {
pub fn new(path: &str) -> Self {
let conn = Connection::open_with_flags(
path,
OpenFlags::SQLITE_OPEN_READ_WRITE
| OpenFlags::SQLITE_OPEN_CREATE
| OpenFlags::SQLITE_OPEN_NO_MUTEX
| OpenFlags::SQLITE_OPEN_URI,
)
.expect("failed to open database");
apply_pragmas(&conn);
conn.execute_batch(
"CREATE TABLE IF NOT EXISTS kv (
key TEXT PRIMARY KEY,
volumes TEXT NOT NULL,
size INTEGER,
created_at INTEGER DEFAULT (unixepoch())
);",
)
.expect("failed to create tables");
Self { conn: Arc::new(Mutex::new(conn)) }
}
pub async fn query<T, F>(&self, f: F) -> Result<T, AppError>
where
T: Send + 'static,
F: FnOnce(&Connection) -> Result<T, AppError> + Send + 'static,
{
let idx = self.next.fetch_add(1, Ordering::Relaxed) % self.conns.len();
let conn = self.conns[idx].clone();
pub async fn get(&self, key: &str) -> Result<Record, AppError> {
let conn = self.conn.clone();
let key = key.to_string();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
f(&conn)
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv WHERE key = ?1")?;
Ok(stmt.query_row(params![key], |row| {
let vj: String = row.get(1)?;
Ok(Record { key: row.get(0)?, volumes: parse_volumes(&vj), size: row.get(2)? })
})?)
})
.await
.unwrap()
}
}
// --- Read query functions ---
pub fn get(conn: &Connection, key: &str) -> Result<Record, AppError> {
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv WHERE key = ?1")?;
Ok(stmt.query_row(params![key], |row| {
let volumes_json: String = row.get(1)?;
Ok(Record {
key: row.get(0)?,
volumes: parse_volumes(&volumes_json),
size: row.get(2)?,
pub async fn list_keys(&self, prefix: &str) -> Result<Vec<String>, AppError> {
let conn = self.conn.clone();
let pattern = format!("{prefix}%");
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let mut stmt = conn.prepare_cached("SELECT key FROM kv WHERE key LIKE ?1 ORDER BY key")?;
let keys = stmt
.query_map(params![pattern], |row| row.get(0))?
.collect::<Result<Vec<String>, _>>()?;
Ok(keys)
})
})?)
}
pub fn list_keys(conn: &Connection, prefix: &str) -> Result<Vec<String>, AppError> {
let mut stmt = conn.prepare_cached("SELECT key FROM kv WHERE key LIKE ?1 ORDER BY key")?;
let pattern = format!("{prefix}%");
let keys = stmt
.query_map(params![pattern], |row| row.get(0))?
.collect::<Result<Vec<String>, _>>()?;
Ok(keys)
}
pub fn all_records(conn: &Connection) -> Result<Vec<Record>, AppError> {
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv")?;
let records = stmt
.query_map([], |row| {
let volumes_json: String = row.get(1)?;
Ok(Record {
key: row.get(0)?,
volumes: parse_volumes(&volumes_json),
size: row.get(2)?,
})
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(records)
}
// --- WriterHandle ---
#[derive(Clone)]
pub struct WriterHandle {
conn: Arc<Mutex<Connection>>,
}
impl WriterHandle {
pub fn new(path: &str) -> Self {
let conn = open_readwrite(path);
create_tables(&conn);
Self {
conn: Arc::new(Mutex::new(conn)),
}
.await
.unwrap()
}
pub async fn put(
&self,
key: String,
volumes: Vec<String>,
size: Option<i64>,
) -> Result<(), AppError> {
pub async fn put(&self, key: String, volumes: Vec<String>, size: Option<i64>) -> Result<(), AppError> {
let conn = self.conn.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
let volumes_json = encode_volumes(&volumes);
conn.prepare_cached(
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
)?
.execute(params![key, volumes_json, size])?;
.execute(params![key, encode_volumes(&volumes), size])?;
Ok(())
})
.await
@ -199,10 +117,7 @@ impl WriterHandle {
.unwrap()
}
pub async fn bulk_put(
&self,
records: Vec<(String, Vec<String>, Option<i64>)>,
) -> Result<(), AppError> {
pub async fn bulk_put(&self, records: Vec<(String, Vec<String>, Option<i64>)>) -> Result<(), AppError> {
let conn = self.conn.clone();
tokio::task::spawn_blocking(move || {
let conn = conn.lock().unwrap();
@ -211,12 +126,23 @@ impl WriterHandle {
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
)?;
for (key, volumes, size) in &records {
let volumes_json = encode_volumes(volumes);
stmt.execute(params![key, volumes_json, size])?;
stmt.execute(params![key, encode_volumes(volumes), size])?;
}
Ok(())
})
.await
.unwrap()
}
pub fn all_records_sync(&self) -> Result<Vec<Record>, AppError> {
let conn = self.conn.lock().unwrap();
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv")?;
let records = stmt
.query_map([], |row| {
let vj: String = row.get(1)?;
Ok(Record { key: row.get(0)?, volumes: parse_volumes(&vj), size: row.get(2)? })
})?
.collect::<Result<Vec<_>, _>>()?;
Ok(records)
}
}