222 lines
6.3 KiB
Rust
222 lines
6.3 KiB
Rust
use rusqlite::{params, Connection, OpenFlags};
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
use std::sync::{Arc, Mutex};
|
|
|
|
use crate::error::AppError;
|
|
|
|
// --- Record type ---
|
|
|
|
#[derive(Debug, Clone)]
|
|
pub struct Record {
|
|
pub key: String,
|
|
pub volumes: Vec<String>,
|
|
pub size: Option<i64>,
|
|
}
|
|
|
|
// --- SQLite setup ---
|
|
|
|
fn apply_pragmas(conn: &Connection) {
|
|
conn.execute_batch(
|
|
"
|
|
PRAGMA journal_mode = WAL;
|
|
PRAGMA synchronous = NORMAL;
|
|
PRAGMA busy_timeout = 5000;
|
|
PRAGMA temp_store = memory;
|
|
PRAGMA cache_size = -64000;
|
|
PRAGMA mmap_size = 268435456;
|
|
PRAGMA page_size = 4096;
|
|
",
|
|
)
|
|
.expect("failed to set pragmas");
|
|
}
|
|
|
|
fn open_readonly(path: &str) -> Connection {
|
|
let conn = Connection::open_with_flags(
|
|
path,
|
|
OpenFlags::SQLITE_OPEN_READ_ONLY
|
|
| OpenFlags::SQLITE_OPEN_NO_MUTEX
|
|
| OpenFlags::SQLITE_OPEN_URI,
|
|
)
|
|
.expect("failed to open read connection");
|
|
apply_pragmas(&conn);
|
|
conn
|
|
}
|
|
|
|
fn open_readwrite(path: &str) -> Connection {
|
|
let conn = Connection::open_with_flags(
|
|
path,
|
|
OpenFlags::SQLITE_OPEN_READ_WRITE
|
|
| OpenFlags::SQLITE_OPEN_CREATE
|
|
| OpenFlags::SQLITE_OPEN_NO_MUTEX
|
|
| OpenFlags::SQLITE_OPEN_URI,
|
|
)
|
|
.expect("failed to open write connection");
|
|
apply_pragmas(&conn);
|
|
conn
|
|
}
|
|
|
|
fn create_tables(conn: &Connection) {
|
|
conn.execute_batch(
|
|
"
|
|
CREATE TABLE IF NOT EXISTS kv (
|
|
key TEXT PRIMARY KEY,
|
|
volumes TEXT NOT NULL,
|
|
size INTEGER,
|
|
created_at INTEGER DEFAULT (unixepoch())
|
|
);
|
|
",
|
|
)
|
|
.expect("failed to create tables");
|
|
}
|
|
|
|
fn parse_volumes(volumes_json: &str) -> Vec<String> {
|
|
serde_json::from_str(volumes_json).unwrap_or_default()
|
|
}
|
|
|
|
fn encode_volumes(volumes: &[String]) -> String {
|
|
serde_json::to_string(volumes).unwrap()
|
|
}
|
|
|
|
// --- ReadPool ---
|
|
|
|
#[derive(Clone)]
|
|
pub struct ReadPool {
|
|
conns: Vec<Arc<Mutex<Connection>>>,
|
|
next: Arc<AtomicUsize>,
|
|
}
|
|
|
|
impl ReadPool {
|
|
pub fn new(path: &str, size: usize) -> Self {
|
|
let conns = (0..size)
|
|
.map(|_| Arc::new(Mutex::new(open_readonly(path))))
|
|
.collect();
|
|
Self {
|
|
conns,
|
|
next: Arc::new(AtomicUsize::new(0)),
|
|
}
|
|
}
|
|
|
|
pub async fn query<T, F>(&self, f: F) -> Result<T, AppError>
|
|
where
|
|
T: Send + 'static,
|
|
F: FnOnce(&Connection) -> Result<T, AppError> + Send + 'static,
|
|
{
|
|
let idx = self.next.fetch_add(1, Ordering::Relaxed) % self.conns.len();
|
|
let conn = self.conns[idx].clone();
|
|
tokio::task::spawn_blocking(move || {
|
|
let conn = conn.lock().unwrap();
|
|
f(&conn)
|
|
})
|
|
.await
|
|
.unwrap()
|
|
}
|
|
}
|
|
|
|
// --- Read query functions ---
|
|
|
|
pub fn get(conn: &Connection, key: &str) -> Result<Record, AppError> {
|
|
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv WHERE key = ?1")?;
|
|
Ok(stmt.query_row(params![key], |row| {
|
|
let volumes_json: String = row.get(1)?;
|
|
Ok(Record {
|
|
key: row.get(0)?,
|
|
volumes: parse_volumes(&volumes_json),
|
|
size: row.get(2)?,
|
|
})
|
|
})?)
|
|
}
|
|
|
|
pub fn list_keys(conn: &Connection, prefix: &str) -> Result<Vec<String>, AppError> {
|
|
let mut stmt = conn.prepare_cached("SELECT key FROM kv WHERE key LIKE ?1 ORDER BY key")?;
|
|
let pattern = format!("{prefix}%");
|
|
let keys = stmt
|
|
.query_map(params![pattern], |row| row.get(0))?
|
|
.collect::<Result<Vec<String>, _>>()?;
|
|
Ok(keys)
|
|
}
|
|
|
|
pub fn all_records(conn: &Connection) -> Result<Vec<Record>, AppError> {
|
|
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv")?;
|
|
let records = stmt
|
|
.query_map([], |row| {
|
|
let volumes_json: String = row.get(1)?;
|
|
Ok(Record {
|
|
key: row.get(0)?,
|
|
volumes: parse_volumes(&volumes_json),
|
|
size: row.get(2)?,
|
|
})
|
|
})?
|
|
.collect::<Result<Vec<_>, _>>()?;
|
|
Ok(records)
|
|
}
|
|
|
|
// --- WriterHandle ---
|
|
|
|
#[derive(Clone)]
|
|
pub struct WriterHandle {
|
|
conn: Arc<Mutex<Connection>>,
|
|
}
|
|
|
|
impl WriterHandle {
|
|
pub fn new(path: &str) -> Self {
|
|
let conn = open_readwrite(path);
|
|
create_tables(&conn);
|
|
Self {
|
|
conn: Arc::new(Mutex::new(conn)),
|
|
}
|
|
}
|
|
|
|
pub async fn put(
|
|
&self,
|
|
key: String,
|
|
volumes: Vec<String>,
|
|
size: Option<i64>,
|
|
) -> Result<(), AppError> {
|
|
let conn = self.conn.clone();
|
|
tokio::task::spawn_blocking(move || {
|
|
let conn = conn.lock().unwrap();
|
|
let volumes_json = encode_volumes(&volumes);
|
|
conn.prepare_cached(
|
|
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
|
|
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
|
|
)?
|
|
.execute(params![key, volumes_json, size])?;
|
|
Ok(())
|
|
})
|
|
.await
|
|
.unwrap()
|
|
}
|
|
|
|
pub async fn delete(&self, key: String) -> Result<(), AppError> {
|
|
let conn = self.conn.clone();
|
|
tokio::task::spawn_blocking(move || {
|
|
let conn = conn.lock().unwrap();
|
|
conn.prepare_cached("DELETE FROM kv WHERE key = ?1")?
|
|
.execute(params![key])?;
|
|
Ok(())
|
|
})
|
|
.await
|
|
.unwrap()
|
|
}
|
|
|
|
pub async fn bulk_put(
|
|
&self,
|
|
records: Vec<(String, Vec<String>, Option<i64>)>,
|
|
) -> Result<(), AppError> {
|
|
let conn = self.conn.clone();
|
|
tokio::task::spawn_blocking(move || {
|
|
let conn = conn.lock().unwrap();
|
|
let mut stmt = conn.prepare_cached(
|
|
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
|
|
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
|
|
)?;
|
|
for (key, volumes, size) in &records {
|
|
let volumes_json = encode_volumes(volumes);
|
|
stmt.execute(params![key, volumes_json, size])?;
|
|
}
|
|
Ok(())
|
|
})
|
|
.await
|
|
.unwrap()
|
|
}
|
|
}
|