Simplify moar

This commit is contained in:
Silas Brack 2026-03-07 13:23:38 +01:00
parent 7f3ec69cf6
commit 07490efc28
14 changed files with 261 additions and 1061 deletions

View file

@ -1,7 +1,7 @@
use std::collections::HashMap;
use crate::config::Config;
use crate::db;
use crate::Args;
#[derive(serde::Deserialize)]
struct NginxEntry {
@ -12,36 +12,20 @@ struct NginxEntry {
size: Option<i64>,
}
/// List all keys on a volume by recursively walking nginx autoindex.
async fn list_volume_keys(volume_url: &str) -> Result<Vec<(String, i64)>, String> {
let http = reqwest::Client::new();
let mut keys = Vec::new();
let mut dirs = vec![String::new()]; // start at root
let mut dirs = vec![String::new()];
while let Some(prefix) = dirs.pop() {
let url = format!("{volume_url}/{prefix}");
let resp = http
.get(&url)
.send()
.await
.map_err(|e| format!("GET {url}: {e}"))?;
let resp = http.get(&url).send().await.map_err(|e| format!("GET {url}: {e}"))?;
if !resp.status().is_success() {
return Err(format!("GET {url}: status {}", resp.status()));
}
let entries: Vec<NginxEntry> = resp
.json()
.await
.map_err(|e| format!("parse {url}: {e}"))?;
let entries: Vec<NginxEntry> = resp.json().await.map_err(|e| format!("parse {url}: {e}"))?;
for entry in entries {
let full_path = if prefix.is_empty() {
entry.name.clone()
} else {
format!("{prefix}{}", entry.name)
};
let full_path = if prefix.is_empty() { entry.name.clone() } else { format!("{prefix}{}", entry.name) };
match entry.entry_type.as_str() {
"directory" => dirs.push(format!("{full_path}/")),
"file" => keys.push((full_path, entry.size.unwrap_or(0))),
@ -49,31 +33,24 @@ async fn list_volume_keys(volume_url: &str) -> Result<Vec<(String, i64)>, String
}
}
}
Ok(keys)
}
pub async fn run(config: &Config) {
let db_path = &config.database.path;
pub async fn run(args: &Args) {
let db_path = &args.db_path;
// Ensure parent directory exists
if let Some(parent) = std::path::Path::new(db_path).parent() {
let _ = std::fs::create_dir_all(parent);
}
// Delete old database
let _ = std::fs::remove_file(db_path);
let _ = std::fs::remove_file(format!("{db_path}-wal"));
let _ = std::fs::remove_file(format!("{db_path}-shm"));
let writer = db::WriterHandle::new(db_path);
let volume_urls = config.volume_urls();
// key -> (volumes, size)
let db = db::Db::new(db_path);
let mut index: HashMap<String, (Vec<String>, i64)> = HashMap::new();
for vol_url in &volume_urls {
for vol_url in &args.volumes {
eprintln!("Scanning {vol_url}...");
match list_volume_keys(vol_url).await {
Ok(keys) => {
@ -81,26 +58,15 @@ pub async fn run(config: &Config) {
for (key, size) in keys {
let entry = index.entry(key).or_insert_with(|| (Vec::new(), size));
entry.0.push(vol_url.clone());
// Use the largest size seen (they should all match)
if size > entry.1 {
entry.1 = size;
}
if size > entry.1 { entry.1 = size; }
}
}
Err(e) => {
eprintln!(" Error scanning {vol_url}: {e}");
}
Err(e) => eprintln!(" Error scanning {vol_url}: {e}"),
}
}
// Batch insert into SQLite
let records: Vec<(String, Vec<String>, Option<i64>)> = index
.into_iter()
.map(|(key, (volumes, size))| (key, volumes, Some(size)))
.collect();
let records: Vec<_> = index.into_iter().map(|(k, (v, s))| (k, v, Some(s))).collect();
let count = records.len();
writer.bulk_put(records).await.expect("bulk_put failed");
db.bulk_put(records).await.expect("bulk_put failed");
eprintln!("Rebuilt index with {count} keys");
}