Clean up comments

This commit is contained in:
Silas Brack 2026-03-08 13:31:44 +01:00
parent 5cdaeddc0e
commit 5daa983034
3 changed files with 5 additions and 31 deletions

View file

@ -30,8 +30,6 @@ fn encode_volumes(v: &[String]) -> String {
serde_json::to_string(v).unwrap()
}
/// Pure: compute exclusive upper bound for prefix range queries.
/// Increments the last byte that isn't 0xFF.
/// Examples: "abc" -> Some("abd"), "ab\xff" -> Some("ac"), "\xff\xff" -> None
pub fn prefix_upper_bound(prefix: &str) -> Option<String> {
let mut bytes = prefix.as_bytes().to_vec();
@ -44,7 +42,6 @@ pub fn prefix_upper_bound(prefix: &str) -> Option<String> {
None
}
/// A single SQLite connection behind a mutex, used for both reads and writes.
#[derive(Clone)]
pub struct Db {
conn: Arc<Mutex<Connection>>,
@ -207,19 +204,17 @@ mod tests {
#[test]
fn test_prefix_upper_bound_range_correctness() {
// The only test that matters: does the bound actually work for range queries?
// All strings starting with "foo" should be >= "foo" and < upper_bound("foo")
let prefix = "foo";
let upper = prefix_upper_bound(prefix).unwrap();
let upper = upper.as_str();
// These should be in range [prefix, upper)
// in range [prefix, upper)
assert!("foo" >= prefix && "foo" < upper);
assert!("foo/bar" >= prefix && "foo/bar" < upper);
assert!("foobar" >= prefix && "foobar" < upper);
assert!("foo\x7f" >= prefix && "foo\x7f" < upper); // high ASCII
assert!("foo\x7f" >= prefix && "foo\x7f" < upper);
// These should be out of range
// out of range
assert!("fop" >= upper);
assert!("fon" < prefix);
}

View file

@ -12,9 +12,7 @@ struct NginxEntry {
size: Option<i64>,
}
/// Pure: merge volume scan results into a unified index.
/// Each scan is (volume_url, list of (key, size) pairs).
/// Returns a map of key -> (volumes containing it, max size seen).
/// If a key has different sizes across volumes, takes the max.
pub fn merge_volume_scans(
scans: &[(String, Vec<(String, i64)>)],
) -> HashMap<String, (Vec<String>, i64)> {
@ -77,7 +75,6 @@ pub async fn run(args: &Args) {
let db = db::Db::new(db_path);
// I/O: scan each volume
let mut scans = Vec::new();
for vol_url in &args.volumes {
eprintln!("Scanning {vol_url}...");
@ -90,7 +87,6 @@ pub async fn run(args: &Args) {
}
}
// Pure: merge scan results
let index = merge_volume_scans(&scans);
let records: Vec<_> = index
@ -108,9 +104,7 @@ mod tests {
#[test]
fn test_merge_takes_max_size() {
// Edge case: same key with different sizes across volumes
// (can happen due to incomplete writes or corruption)
// We take the max size as the authoritative value
// Can happen due to incomplete writes or corruption
let scans = vec![
("http://vol1".to_string(), vec![("key".to_string(), 50)]),
("http://vol2".to_string(), vec![("key".to_string(), 200)]),

View file

@ -15,17 +15,12 @@ pub struct AppState {
pub http: reqwest::Client,
}
/// Result of probing volumes for a key.
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum ProbeResult {
/// Found a healthy volume at this URL
Found(String),
/// All volumes were probed, none healthy
AllFailed,
}
/// Pure function: given volumes and their probe results (true = healthy),
/// returns the first healthy volume URL for the key, or None.
pub fn first_healthy_volume(key: &str, volumes: &[String], results: &[bool]) -> ProbeResult {
for (vol, &healthy) in volumes.iter().zip(results) {
if healthy {
@ -35,8 +30,6 @@ pub fn first_healthy_volume(key: &str, volumes: &[String], results: &[bool]) ->
ProbeResult::AllFailed
}
/// Pure function: shuffle volumes for load balancing.
/// Takes a seed for deterministic testing.
pub fn shuffle_volumes(volumes: Vec<String>, seed: u64) -> Vec<String> {
use rand::seq::SliceRandom;
use rand::SeedableRng;
@ -55,14 +48,12 @@ pub async fn get_key(
return Err(AppError::CorruptRecord { key });
}
// Shuffle for load balancing (random seed in production)
let seed = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.map(|d| d.as_nanos() as u64)
.unwrap_or(0);
let volumes = shuffle_volumes(record.volumes, seed);
// Probe volumes and collect results
let mut results = Vec::with_capacity(volumes.len());
for vol in &volumes {
let url = format!("{vol}/{key}");
@ -78,7 +69,6 @@ pub async fn get_key(
}
};
results.push(healthy);
// Early exit on first healthy volume
if healthy {
break;
}
@ -107,7 +97,6 @@ pub async fn put_key(
});
}
// Fan out PUTs to all target volumes concurrently
let mut handles = Vec::with_capacity(target_volumes.len());
for vol in &target_volumes {
let url = format!("{vol}/{key}");
@ -243,7 +232,3 @@ pub async fn list_keys(
Ok((StatusCode::OK, keys.join("\n")).into_response())
}
// Note: first_healthy_volume and shuffle_volumes are trivial functions
// (essentially .find() and .shuffle()). Testing them would just test
// that standard library functions work. The real test is integration:
// does failover work with actual down volumes?