Idk broq
This commit is contained in:
parent
dc1f4bd19d
commit
2c66fa50d8
9 changed files with 1125 additions and 960 deletions
382
src/db.rs
382
src/db.rs
|
|
@ -1,182 +1,200 @@
|
|||
use rusqlite::{params, Connection, OpenFlags};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Record {
|
||||
pub key: String,
|
||||
pub volumes: Vec<String>,
|
||||
pub size: Option<i64>,
|
||||
}
|
||||
|
||||
fn apply_pragmas(conn: &Connection) {
|
||||
conn.execute_batch(
|
||||
"PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA busy_timeout = 5000;
|
||||
PRAGMA temp_store = memory;
|
||||
PRAGMA cache_size = -64000;
|
||||
PRAGMA mmap_size = 268435456;",
|
||||
)
|
||||
.expect("failed to set pragmas");
|
||||
}
|
||||
|
||||
fn parse_volumes(s: &str) -> Vec<String> {
|
||||
serde_json::from_str(s).unwrap_or_default()
|
||||
}
|
||||
|
||||
fn encode_volumes(v: &[String]) -> String {
|
||||
serde_json::to_string(v).unwrap()
|
||||
}
|
||||
|
||||
/// A single SQLite connection behind a mutex, used for both reads and writes.
|
||||
#[derive(Clone)]
|
||||
pub struct Db {
|
||||
conn: Arc<Mutex<Connection>>,
|
||||
}
|
||||
|
||||
impl Db {
|
||||
pub fn new(path: &str) -> Self {
|
||||
let conn = Connection::open_with_flags(
|
||||
path,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE
|
||||
| OpenFlags::SQLITE_OPEN_CREATE
|
||||
| OpenFlags::SQLITE_OPEN_NO_MUTEX
|
||||
| OpenFlags::SQLITE_OPEN_URI,
|
||||
)
|
||||
.expect("failed to open database");
|
||||
apply_pragmas(&conn);
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS kv (
|
||||
key TEXT PRIMARY KEY,
|
||||
volumes TEXT NOT NULL,
|
||||
size INTEGER,
|
||||
created_at INTEGER DEFAULT (unixepoch())
|
||||
);",
|
||||
)
|
||||
.expect("failed to create tables");
|
||||
Self { conn: Arc::new(Mutex::new(conn)) }
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: &str) -> Result<Record, AppError> {
|
||||
let conn = self.conn.clone();
|
||||
let key = key.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv WHERE key = ?1")?;
|
||||
Ok(stmt.query_row(params![key], |row| {
|
||||
let vj: String = row.get(1)?;
|
||||
Ok(Record { key: row.get(0)?, volumes: parse_volumes(&vj), size: row.get(2)? })
|
||||
})?)
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn list_keys(&self, prefix: &str) -> Result<Vec<String>, AppError> {
|
||||
let conn = self.conn.clone();
|
||||
let prefix = prefix.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
if prefix.is_empty() {
|
||||
let mut stmt = conn.prepare_cached("SELECT key FROM kv ORDER BY key")?;
|
||||
let keys = stmt
|
||||
.query_map([], |row| row.get(0))?
|
||||
.collect::<Result<Vec<String>, _>>()?;
|
||||
return Ok(keys);
|
||||
}
|
||||
// Compute exclusive upper bound: increment last non-0xFF byte
|
||||
let upper = {
|
||||
let mut bytes = prefix.as_bytes().to_vec();
|
||||
let mut result = None;
|
||||
while let Some(last) = bytes.pop() {
|
||||
if last < 0xFF {
|
||||
bytes.push(last + 1);
|
||||
result = Some(String::from_utf8_lossy(&bytes).into_owned());
|
||||
break;
|
||||
}
|
||||
}
|
||||
result
|
||||
};
|
||||
let keys = match &upper {
|
||||
Some(end) => {
|
||||
let mut stmt = conn.prepare_cached(
|
||||
"SELECT key FROM kv WHERE key >= ?1 AND key < ?2 ORDER BY key",
|
||||
)?;
|
||||
stmt.query_map(params![prefix, end], |row| row.get(0))?
|
||||
.collect::<Result<Vec<String>, _>>()?
|
||||
}
|
||||
None => {
|
||||
let mut stmt = conn.prepare_cached(
|
||||
"SELECT key FROM kv WHERE key >= ?1 ORDER BY key",
|
||||
)?;
|
||||
stmt.query_map(params![prefix], |row| row.get(0))?
|
||||
.collect::<Result<Vec<String>, _>>()?
|
||||
}
|
||||
};
|
||||
Ok(keys)
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn put(&self, key: String, volumes: Vec<String>, size: Option<i64>) -> Result<(), AppError> {
|
||||
let conn = self.conn.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.prepare_cached(
|
||||
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
|
||||
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
|
||||
)?
|
||||
.execute(params![key, encode_volumes(&volumes), size])?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn delete(&self, key: String) -> Result<(), AppError> {
|
||||
let conn = self.conn.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.prepare_cached("DELETE FROM kv WHERE key = ?1")?
|
||||
.execute(params![key])?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn bulk_put(&self, records: Vec<(String, Vec<String>, Option<i64>)>) -> Result<(), AppError> {
|
||||
let conn = self.conn.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.execute_batch("BEGIN")?;
|
||||
let mut stmt = conn.prepare_cached(
|
||||
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
|
||||
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
|
||||
)?;
|
||||
for (key, volumes, size) in &records {
|
||||
stmt.execute(params![key, encode_volumes(volumes), size])?;
|
||||
}
|
||||
drop(stmt);
|
||||
conn.execute_batch("COMMIT")?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn all_records_sync(&self) -> Result<Vec<Record>, AppError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv")?;
|
||||
let records = stmt
|
||||
.query_map([], |row| {
|
||||
let vj: String = row.get(1)?;
|
||||
Ok(Record { key: row.get(0)?, volumes: parse_volumes(&vj), size: row.get(2)? })
|
||||
})?
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
Ok(records)
|
||||
}
|
||||
}
|
||||
use rusqlite::{Connection, OpenFlags, params};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Record {
|
||||
pub key: String,
|
||||
pub volumes: Vec<String>,
|
||||
pub size: Option<i64>,
|
||||
}
|
||||
|
||||
fn apply_pragmas(conn: &Connection) {
|
||||
conn.execute_batch(
|
||||
"PRAGMA journal_mode = WAL;
|
||||
PRAGMA synchronous = NORMAL;
|
||||
PRAGMA busy_timeout = 5000;
|
||||
PRAGMA temp_store = memory;
|
||||
PRAGMA cache_size = -64000;
|
||||
PRAGMA mmap_size = 268435456;",
|
||||
)
|
||||
.expect("failed to set pragmas");
|
||||
}
|
||||
|
||||
fn parse_volumes(s: &str) -> Vec<String> {
|
||||
serde_json::from_str(s).unwrap_or_default()
|
||||
}
|
||||
|
||||
fn encode_volumes(v: &[String]) -> String {
|
||||
serde_json::to_string(v).unwrap()
|
||||
}
|
||||
|
||||
/// A single SQLite connection behind a mutex, used for both reads and writes.
|
||||
#[derive(Clone)]
|
||||
pub struct Db {
|
||||
conn: Arc<Mutex<Connection>>,
|
||||
}
|
||||
|
||||
impl Db {
|
||||
pub fn new(path: &str) -> Self {
|
||||
let conn = Connection::open_with_flags(
|
||||
path,
|
||||
OpenFlags::SQLITE_OPEN_READ_WRITE
|
||||
| OpenFlags::SQLITE_OPEN_CREATE
|
||||
| OpenFlags::SQLITE_OPEN_NO_MUTEX
|
||||
| OpenFlags::SQLITE_OPEN_URI,
|
||||
)
|
||||
.expect("failed to open database");
|
||||
apply_pragmas(&conn);
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS kv (
|
||||
key TEXT PRIMARY KEY,
|
||||
volumes TEXT NOT NULL,
|
||||
size INTEGER,
|
||||
created_at INTEGER DEFAULT (unixepoch())
|
||||
);",
|
||||
)
|
||||
.expect("failed to create tables");
|
||||
Self {
|
||||
conn: Arc::new(Mutex::new(conn)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: &str) -> Result<Record, AppError> {
|
||||
let conn = self.conn.clone();
|
||||
let key = key.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
let mut stmt =
|
||||
conn.prepare_cached("SELECT key, volumes, size FROM kv WHERE key = ?1")?;
|
||||
Ok(stmt.query_row(params![key], |row| {
|
||||
let vj: String = row.get(1)?;
|
||||
Ok(Record {
|
||||
key: row.get(0)?,
|
||||
volumes: parse_volumes(&vj),
|
||||
size: row.get(2)?,
|
||||
})
|
||||
})?)
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn list_keys(&self, prefix: &str) -> Result<Vec<String>, AppError> {
|
||||
let conn = self.conn.clone();
|
||||
let prefix = prefix.to_string();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
if prefix.is_empty() {
|
||||
let mut stmt = conn.prepare_cached("SELECT key FROM kv ORDER BY key")?;
|
||||
let keys = stmt
|
||||
.query_map([], |row| row.get(0))?
|
||||
.collect::<Result<Vec<String>, _>>()?;
|
||||
return Ok(keys);
|
||||
}
|
||||
// Compute exclusive upper bound: increment last non-0xFF byte
|
||||
let upper = {
|
||||
let mut bytes = prefix.as_bytes().to_vec();
|
||||
let mut result = None;
|
||||
while let Some(last) = bytes.pop() {
|
||||
if last < 0xFF {
|
||||
bytes.push(last + 1);
|
||||
result = Some(String::from_utf8_lossy(&bytes).into_owned());
|
||||
break;
|
||||
}
|
||||
}
|
||||
result
|
||||
};
|
||||
let keys = match &upper {
|
||||
Some(end) => {
|
||||
let mut stmt = conn.prepare_cached(
|
||||
"SELECT key FROM kv WHERE key >= ?1 AND key < ?2 ORDER BY key",
|
||||
)?;
|
||||
stmt.query_map(params![prefix, end], |row| row.get(0))?
|
||||
.collect::<Result<Vec<String>, _>>()?
|
||||
}
|
||||
None => {
|
||||
let mut stmt =
|
||||
conn.prepare_cached("SELECT key FROM kv WHERE key >= ?1 ORDER BY key")?;
|
||||
stmt.query_map(params![prefix], |row| row.get(0))?
|
||||
.collect::<Result<Vec<String>, _>>()?
|
||||
}
|
||||
};
|
||||
Ok(keys)
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn put(
|
||||
&self,
|
||||
key: String,
|
||||
volumes: Vec<String>,
|
||||
size: Option<i64>,
|
||||
) -> Result<(), AppError> {
|
||||
let conn = self.conn.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.prepare_cached(
|
||||
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
|
||||
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
|
||||
)?
|
||||
.execute(params![key, encode_volumes(&volumes), size])?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn delete(&self, key: String) -> Result<(), AppError> {
|
||||
let conn = self.conn.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.prepare_cached("DELETE FROM kv WHERE key = ?1")?
|
||||
.execute(params![key])?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn bulk_put(
|
||||
&self,
|
||||
records: Vec<(String, Vec<String>, Option<i64>)>,
|
||||
) -> Result<(), AppError> {
|
||||
let conn = self.conn.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let conn = conn.lock().unwrap();
|
||||
conn.execute_batch("BEGIN")?;
|
||||
let mut stmt = conn.prepare_cached(
|
||||
"INSERT INTO kv (key, volumes, size) VALUES (?1, ?2, ?3)
|
||||
ON CONFLICT(key) DO UPDATE SET volumes = ?2, size = ?3",
|
||||
)?;
|
||||
for (key, volumes, size) in &records {
|
||||
stmt.execute(params![key, encode_volumes(volumes), size])?;
|
||||
}
|
||||
drop(stmt);
|
||||
conn.execute_batch("COMMIT")?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn all_records_sync(&self) -> Result<Vec<Record>, AppError> {
|
||||
let conn = self.conn.lock().unwrap();
|
||||
let mut stmt = conn.prepare_cached("SELECT key, volumes, size FROM kv")?;
|
||||
let records = stmt
|
||||
.query_map([], |row| {
|
||||
let vj: String = row.get(1)?;
|
||||
Ok(Record {
|
||||
key: row.get(0)?,
|
||||
volumes: parse_volumes(&vj),
|
||||
size: row.get(2)?,
|
||||
})
|
||||
})?
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
Ok(records)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue