diff --git a/Cargo.toml b/Cargo.toml index f91ae5fe..7afb0a0b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,40 +12,49 @@ authors.workspace = true rust-version.workspace = true license.workspace = true -[workspace] -members = ["proc_macros"] - [workspace.package] -version = "0.4.1" +version = "0.5.0" edition = "2021" authors = ["NLnet Labs "] license = "BSD-3-Clause" -rust-version = "1.80" +rust-version = "1.82" [dependencies] crossbeam-epoch = "^0.9" crossbeam-utils = "^0.8" +parking_lot_core = "0.9.10" inetnum = "0.1" log = "^0.4" roaring = "0.10.3" -rotonda-macros = { path = "proc_macros", version = "0.4.0" } -routecore = { version = "0.5", features = ["bgp", "bmp", "fsm", "serde"] } - +routecore = { git = "https://github.com/nlnetlabs/routecore", branch = "dev", version = "0.5.2-dev", features = ["bgp", "bmp", "fsm", "serde", "mrt"] } ansi_term = { version = "0.12", optional = true } csv = { version = "1", optional = true } rustyline = { version = "13", optional = true } -parking_lot_core = "0.9.10" +clap = { version = "4.4", optional = true, features = ["derive"] } +rayon = { version = "1.10", optional = true } +memmap2 = { version = "0.9", optional = true } +rand = { version = "0.9" } +lsm-tree = { version = "2.6.6" } +serde = "1.0.216" +serde_derive = "1.0.216" +serde_json = "1.0.133" +num-traits = "0.2.19" +zerocopy = { version = "0.8.17", features = ["derive"] } [dev-dependencies] csv = { version = "1" } env_logger = { version = "0.10" } -rand = "^0.8" [features] cli = ["ansi_term", "rustyline", "csv"] +mrt = ["clap", "rayon"] default = [] [[bin]] name = "cli" required-features = ["cli"] + +[[bin]] +name = "load_mrt" +required-features = ["mrt"] diff --git a/examples/exact_matches.rs b/examples/exact_matches.rs index df65d45e..39ea717b 100644 --- a/examples/exact_matches.rs +++ b/examples/exact_matches.rs @@ -1,10 +1,19 @@ -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; -use rotonda_store::meta_examples::NoMeta; +use inetnum::addr::Prefix; +use rotonda_store::match_options::IncludeHistory; +use rotonda_store::prefix_record::{Record, RouteStatus}; +// use rotonda_store::prelude::multi::*; +use rotonda_store::{ + epoch, + match_options::{MatchOptions, MatchType}, + rib::config::MemoryOnlyConfig, + rib::StarCastRib, + test_types::NoMeta, + IntoIpAddr, +}; fn main() -> Result<(), Box> { let guard = &epoch::pin(); - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = StarCastRib::::try_default()?; let pfxs = vec![ Prefix::new_relaxed( 0b0000_0000_0000_0000_0000_0000_0000_0000_u32.into_ipaddr(), @@ -258,7 +267,11 @@ fn main() -> Result<(), Box> { for pfx in pfxs.into_iter() { println!("insert {}", pfx?); // let p : rotonda_store::Prefix = pfx.into(); - tree_bitmap.insert(&pfx.unwrap(), Record::new(0, 0, RouteStatus::Active, NoMeta::Empty), None)?; + tree_bitmap.insert( + &pfx.unwrap(), + Record::new(0, 0, RouteStatus::Active, NoMeta::Empty), + None, + )?; } println!("------ end of inserts\n"); // println!( @@ -343,9 +356,10 @@ fn main() -> Result<(), Box> { include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, - guard + guard, ); println!("exact match: {:?}", s_spfx); println!("-----------"); diff --git a/examples/exact_matches_single.rs b/examples/exact_matches_single.rs index c0b91e67..ed15c3a6 100644 --- a/examples/exact_matches_single.rs +++ b/examples/exact_matches_single.rs @@ -1,11 +1,16 @@ -use rotonda_store::prelude::*; -use rotonda_store::SingleThreadedStore; -use rotonda_store::meta_examples::NoMeta; +use inetnum::addr::Prefix; +use rotonda_store::{ + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Record, RouteStatus}, + IntoIpAddr, +}; +use rotonda_store::{ + rib::{config::MemoryOnlyConfig, StarCastRib}, + test_types::NoMeta, +}; fn main() -> Result<(), Box> { - let v4 = vec![8]; - let v6 = vec![8]; - let mut tree_bitmap = SingleThreadedStore::::new(v4, v6); + let tree_bitmap = StarCastRib::::try_default()?; let pfxs = vec![ Prefix::new_relaxed( @@ -260,7 +265,11 @@ fn main() -> Result<(), Box> { for pfx in pfxs.into_iter() { println!("insert {}", pfx?); // let p : rotonda_store::Prefix = pfx.into(); - tree_bitmap.insert(&pfx.unwrap(), NoMeta::Empty)?; + tree_bitmap.insert( + &pfx.unwrap(), + Record::new(1, 0, RouteStatus::Active, NoMeta::Empty), + None, + )?; } println!("------ end of inserts\n"); // println!( @@ -336,7 +345,7 @@ fn main() -> Result<(), Box> { // Prefix::new_relaxed(std::net::Ipv4Addr::new(1, 0, 128, 0).into(), 24), ] { println!("search for: {:?}", spfx); - // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); + let guard = &rotonda_store::epoch::pin(); let s_spfx = tree_bitmap.match_prefix( // (&locks.0, &locks.1), &spfx.unwrap(), @@ -345,8 +354,10 @@ fn main() -> Result<(), Box> { include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, + guard, ); println!("exact match: {:?}", s_spfx); println!("-----------"); diff --git a/examples/full_table_multiple_trees_json.rs b/examples/full_table_multiple_trees_json.rs index 4b57927b..3c52ff0c 100644 --- a/examples/full_table_multiple_trees_json.rs +++ b/examples/full_table_multiple_trees_json.rs @@ -1,17 +1,20 @@ -// extern crate self as roto; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; -use rotonda_store::meta_examples::PrefixAs; +use inetnum::addr::Prefix; +use rotonda_store::epoch; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{PrefixRecord, Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::test_types::PrefixAs; use std::error::Error; use std::fs::File; use std::process; -#[create_store(( - [4, 4, 4, 4, 4, 4, 4, 4], - [3,4,5,4] -))] -struct MyStore; +// #[create_store(( +// ([4, 4, 4, 4, 4, 4, 4, 4], 5, 17), +// ([3, 4, 5, 4], 17, 29) +// ))] +// struct MyStore; fn main() -> Result<(), Box> { const CSV_FILE_PATH: &str = "./data/uniq_pfx_asn_dfz_rnd.csv"; @@ -32,7 +35,12 @@ fn main() -> Result<(), Box> { let asn: u32 = record[2].parse().unwrap(); let pfx = PrefixRecord::::new( Prefix::new(net.into(), len)?, - vec![Record::new(0, 0, RouteStatus::Active, PrefixAs(asn))], + vec![Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new(asn.into()), + )], ); pfxs.push(pfx); } @@ -46,7 +54,9 @@ fn main() -> Result<(), Box> { println!("["); for n in 1..6 { let mut rec_vec: Vec> = vec![]; - let tree_bitmap = MyStore::::new()?; + let config = MemoryOnlyConfig; + let tree_bitmap = + StarCastRib::::new_with_config(config)?; if let Err(err) = load_prefixes(&mut rec_vec) { println!("error running example: {}", err); @@ -85,10 +95,11 @@ fn main() -> Result<(), Box> { include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, - guard - ); + guard, + )?; } } } @@ -101,31 +112,29 @@ fn main() -> Result<(), Box> { println!("{{"); println!("\"type\": \"treebitmap_univec\","); - println!( - "\"strides v4 \": {:?},", - &tree_bitmap - .v4 - .store - .get_stride_sizes() - .iter() - .map_while(|s| if s > &0 { Some(*s) } else { None }) - .collect::>() - ); - println!( - "\"strides v6 \": {:?},", - &tree_bitmap - .v6 - .store - .get_stride_sizes() - .iter() - .map_while(|s| if s > &0 { Some(*s) } else { None }) - .collect::>() - ); + // println!( + // "\"strides v4 \": {:?},", + // &tree_bitmap + // .v4 + // .get_stride_sizes() + // .iter() + // .map_while(|s| if s > &0 { Some(*s) } else { None }) + // .collect::>() + // ); + // println!( + // "\"strides v6 \": {:?},", + // &tree_bitmap + // .v6 + // .get_stride_sizes() + // .iter() + // .map_while(|s| if s > &0 { Some(*s) } else { None }) + // .collect::>() + // ); println!("\"run_no\": {},", n); println!("\"inserts_num\": {},", inserts_num); println!("\"insert_duration_nanos\": {},", dur_insert_nanos); println!( - "\"global_prefix_vec_size\": {},", + "\"global_prefix_vec_size\": {:?},", tree_bitmap.prefixes_count() ); println!( diff --git a/examples/more_specifics.rs b/examples/more_specifics.rs index 78941768..9ef16f80 100644 --- a/examples/more_specifics.rs +++ b/examples/more_specifics.rs @@ -1,14 +1,14 @@ -use rotonda_store::meta_examples::PrefixAs; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; - -use rotonda_store::AddressFamily; use inetnum::addr::Prefix; +use rotonda_store::{ + epoch, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Record, RouteStatus}, + rib::{config::MemoryOnlyConfig, StarCastRib}, + test_types::PrefixAs, + IntoIpAddr, +}; fn main() -> Result<(), Box> { - // type StoreType = InMemStorage; - let tree_bitmap = - MultiThreadedStore::::new()?; let pfxs = vec![ Prefix::new_relaxed( 0b0000_0000_0000_0000_0000_0000_0000_0000_u32.into_ipaddr(), @@ -215,7 +215,11 @@ fn main() -> Result<(), Box> { for pfx in pfxs.into_iter() { // println!("insert {:?}", pfx); let p: Prefix = pfx.unwrap(); - tree_bitmap.insert(&p, Record::new(0,0, RouteStatus::Active, PrefixAs(666)), None)?; + StarCastRib::::try_default()?.insert( + &p, + Record::new(0, 0, RouteStatus::Active, PrefixAs::new(666.into())), + None, + )?; } println!("------ end of inserts\n"); // println!( @@ -277,17 +281,20 @@ fn main() -> Result<(), Box> { ] { println!("search for: {:?}", spfx); let guard = &epoch::pin(); - let s_spfx = tree_bitmap.match_prefix( - &spfx.unwrap(), - &MatchOptions { - match_type: MatchType::ExactMatch, - include_withdrawn: false, - include_less_specifics: true, - include_more_specifics: true, - mui: None - }, - guard - ); + let s_spfx = + StarCastRib::::try_default()? + .match_prefix( + &spfx.unwrap(), + &MatchOptions { + match_type: MatchType::ExactMatch, + include_withdrawn: false, + include_less_specifics: true, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + ); println!("em/m-s: {:#?}", s_spfx); println!("-----------"); } diff --git a/examples/multi_no_thread.rs b/examples/multi_no_thread.rs index d3fe3aff..d2a8317f 100644 --- a/examples/multi_no_thread.rs +++ b/examples/multi_no_thread.rs @@ -1,15 +1,21 @@ +use inetnum::addr::Prefix; use log::trace; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; -use rotonda_store::meta_examples::PrefixAs; +use rotonda_store::epoch; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::test_types::PrefixAs; +use rotonda_store::IntoIpAddr; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = + StarCastRib::::try_default()?; // let f = Arc::new(std::sync::atomic::AtomicBool::new(false)); let pfx = Prefix::new_relaxed( @@ -22,14 +28,27 @@ fn main() -> Result<(), Box> { loop { x += 1; // print!("{}-", i); - match tree_bitmap.insert(&pfx.unwrap(), Record::new(0, 0, RouteStatus::Active, PrefixAs(x % 1000)), None) { + match tree_bitmap.insert( + &pfx.unwrap(), + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new((x % 1000).into()), + ), + None, + ) { Ok(_) => {} Err(e) => { println!("{}", e); } }; - if (x % 1_000_000) == 0 { println!("inserts: {}", x); } - if x == 100_000_000 { break; } + if (x % 1_000_000) == 0 { + println!("inserts: {}", x); + } + if x == 100_000_000 { + break; + } } println!("--thread {} done.", 1); @@ -40,14 +59,15 @@ fn main() -> Result<(), Box> { let s_spfx = tree_bitmap.match_prefix( &pfx.unwrap(), &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("query result"); println!("{}", s_spfx); println!("{}", s_spfx.more_specifics.unwrap()); diff --git a/examples/multi_single_thread.rs b/examples/multi_single_thread.rs index 4e9f4765..2d82cc1e 100644 --- a/examples/multi_single_thread.rs +++ b/examples/multi_single_thread.rs @@ -1,20 +1,23 @@ +use inetnum::addr::Prefix; use log::trace; -use std::time::Duration; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::IntoIpAddr; use std::thread; +use std::time::Duration; use rand::Rng; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; - -use rotonda_store::meta_examples::PrefixAs; +use rotonda_store::test_types::PrefixAs; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = + StarCastRib::::try_default()?; // let f = Arc::new(std::sync::atomic::AtomicBool::new(false)); let pfx = Prefix::new_relaxed( @@ -31,7 +34,7 @@ fn main() -> Result<(), Box> { .name(1_u8.to_string()) .spawn(move || -> Result<(), Box> { // while !start_flag.load(std::sync::atomic::Ordering::Acquire) { - let mut rng= rand::thread_rng(); + let mut rng = rand::rng(); println!("park thread {}", 1); thread::park(); @@ -42,11 +45,16 @@ fn main() -> Result<(), Box> { loop { // x += 1; // print!("{}-", i); - let asn: u32 = rng.gen(); + let asn: u32 = rng.random(); match tree_bitmap.insert( &pfx.unwrap(), - Record::new(0, 0, RouteStatus::Active, PrefixAs(asn)), - None + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new(asn.into()), + ), + None, ) { Ok(_) => {} Err(e) => { diff --git a/examples/multi_thread_1.rs b/examples/multi_thread_1.rs index 515aaeb5..9331def9 100644 --- a/examples/multi_thread_1.rs +++ b/examples/multi_thread_1.rs @@ -1,35 +1,49 @@ use std::{sync::Arc, thread}; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; -use rotonda_store::meta_examples::NoMeta; +use inetnum::addr::Prefix; +use rotonda_store::{ + epoch, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Record, RouteStatus}, + rib::{config::MemoryOnlyConfig, StarCastRib}, + test_types::NoMeta, + IntoIpAddr, +}; fn main() -> Result<(), Box> { - let tree_bitmap = Arc::new(MultiThreadedStore::::new()?); + let tree_bitmap = + Arc::new(StarCastRib::::try_default()?); let _: Vec<_> = (0..16) .map(|i: i32| { let tree_bitmap = tree_bitmap.clone(); - thread::Builder::new().name(i.to_string()).spawn(move || { - let pfxs = get_pfx(); + thread::Builder::new() + .name(i.to_string()) + .spawn(move || { + let pfxs = get_pfx(); - for pfx in pfxs.into_iter() { - println!("insert {}", pfx.unwrap()); + for pfx in pfxs.into_iter() { + println!("insert {}", pfx.unwrap()); - match tree_bitmap - .insert( - &pfx.unwrap(), - Record::new(0, 0, RouteStatus::Active, NoMeta::Empty), - None - ) { - Ok(_) => {} - Err(e) => { - println!("{}", e); - } - }; - } - }).unwrap() + match tree_bitmap.insert( + &pfx.unwrap(), + Record::new( + 0, + 0, + RouteStatus::Active, + NoMeta::Empty, + ), + None, + ) { + Ok(_) => {} + Err(e) => { + println!("{}", e); + } + }; + } + }) + .unwrap() }) .map(|t| t.join()) .collect(); @@ -43,14 +57,15 @@ fn main() -> Result<(), Box> { let s_spfx = tree_bitmap.match_prefix( &spfx.unwrap(), &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: false, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("query result"); println!("{}", s_spfx); println!("{}", s_spfx.more_specifics.unwrap()); diff --git a/examples/multi_thread_2.rs b/examples/multi_thread_2.rs index 4fe85aba..e18d7d44 100644 --- a/examples/multi_thread_2.rs +++ b/examples/multi_thread_2.rs @@ -1,18 +1,21 @@ +use inetnum::addr::Prefix; use log::trace; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::test_types::PrefixAs; +use rotonda_store::{epoch, IntoIpAddr}; use std::time::Duration; use std::{sync::Arc, thread}; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; - -use rotonda_store::meta_examples::PrefixAs; - fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = Arc::new(MultiThreadedStore::::new()?); + let tree_bitmap = + Arc::new(StarCastRib::::try_default()?); let f = Arc::new(std::sync::atomic::AtomicBool::new(false)); let pfx = Prefix::new_relaxed( @@ -32,13 +35,21 @@ fn main() -> Result<(), Box> { thread::park(); } - match tree_bitmap.insert(&pfx.unwrap(), Record::new(0, 0, RouteStatus::Active, PrefixAs(i as u32)), None) { + match tree_bitmap.insert( + &pfx.unwrap(), + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new((i as u32).into()), + ), + None, + ) { Ok(_) => {} Err(e) => { println!("{}", e); } }; - }) .unwrap() }); @@ -57,14 +68,15 @@ fn main() -> Result<(), Box> { let s_spfx = tree_bitmap.match_prefix( &pfx.unwrap(), &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("query result"); println!("{}", s_spfx); println!("{}", s_spfx.more_specifics.unwrap()); diff --git a/examples/multi_thread_3.rs b/examples/multi_thread_3.rs index 2dcc85cc..167dfb61 100644 --- a/examples/multi_thread_3.rs +++ b/examples/multi_thread_3.rs @@ -1,18 +1,22 @@ +use inetnum::addr::Prefix; use log::trace; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::{epoch, IntoIpAddr}; use std::time::Duration; use std::{sync::Arc, thread}; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; - -use rotonda_store::meta_examples::PrefixAs; +use rotonda_store::test_types::PrefixAs; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = Arc::new(MultiThreadedStore::::new()?); + let tree_bitmap = + Arc::new(StarCastRib::::try_default()?); let f = Arc::new(std::sync::atomic::AtomicBool::new(false)); let pfx = Prefix::new_relaxed( @@ -20,45 +24,60 @@ fn main() -> Result<(), Box> { 32, ); - let threads = - (0..16).enumerate().map(|(i, _)| { - let tree_bitmap = tree_bitmap.clone(); - // let start_flag = Arc::clone(&f); + let threads = (0..16).enumerate().map(|(i, _)| { + let tree_bitmap = tree_bitmap.clone(); + // let start_flag = Arc::clone(&f); - std::thread::Builder::new().name(i.to_string()).spawn( - move || -> Result<(), Box> { - // while !start_flag.load(std::sync::atomic::Ordering::Acquire) { + std::thread::Builder::new() + .name(i.to_string()) + .spawn( + move || -> Result<(), Box> { + // while !start_flag.load(std::sync::atomic::Ordering::Acquire) { println!("park thread {}", i); thread::park(); - // } + // } - print!("\nstart {} ---", i); - let mut x = 0; - loop { - x += 1; - // print!("{}-", i); - match tree_bitmap - .insert( + print!("\nstart {} ---", i); + let mut x = 0; + loop { + x += 1; + // print!("{}-", i); + match tree_bitmap.insert( &pfx.unwrap(), - Record::new(0,0, RouteStatus::Active, PrefixAs(i as u32)), - None - ) - { - Ok(metrics) => { - if metrics.cas_count > 0 { - println!("{} {:?} {:?} retry count {},", std::thread::current().name().unwrap(), metrics, pfx, metrics.cas_count); + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new((i as u32).into()), + ), + None, + ) { + Ok(metrics) => { + if metrics.cas_count > 0 { + println!( + "{} {:?} {:?} retry count {},", + std::thread::current() + .name() + .unwrap(), + metrics, + pfx, + metrics.cas_count + ); + } } + Err(e) => { + println!("{}", e); + } + }; + if x % 1_000_000 == 0 { + println!("{}", x); } - Err(e) => { - println!("{}", e); - } - }; - if x % 1_000_000 == 0 { println!("{}", x); } - } - // println!("--thread {} done.", i); - }, - ).unwrap() - }); + } + // println!("--thread {} done.", i); + }, + ) + .unwrap() + }); // thread::sleep(Duration::from_secs(60)); @@ -76,18 +95,19 @@ fn main() -> Result<(), Box> { let s_spfx = tree_bitmap.match_prefix( &pfx.unwrap(), &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("query result"); println!("{}", s_spfx); println!("{}", s_spfx.more_specifics.unwrap()); - + println!("-----------"); Ok(()) diff --git a/examples/multi_thread_4.rs b/examples/multi_thread_4.rs index 8283695c..459cc3ed 100644 --- a/examples/multi_thread_4.rs +++ b/examples/multi_thread_4.rs @@ -1,61 +1,52 @@ +use inetnum::addr::Prefix; +use inetnum::asn::Asn; use log::trace; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Meta, Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::IntoIpAddr; use std::time::Duration; use std::{sync::Arc, thread}; +#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub struct BytesPrefixAs(pub [u8; 4]); -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; - -#[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd)] -pub struct ComplexPrefixAs(pub Vec); - -// impl MergeUpdate for ComplexPrefixAs { -// type UserDataIn = (); -// type UserDataOut = (); - -// fn merge_update( -// &mut self, -// update_record: ComplexPrefixAs, -// _: Option<&Self::UserDataIn>, -// ) -> Result<(), Box> { -// self.0 = update_record.0; -// Ok(()) -// } - -// fn clone_merge_update( -// &self, -// update_meta: &Self, -// _: Option<&Self::UserDataIn>, -// ) -> Result<(Self, Self::UserDataOut), Box> -// where -// Self: std::marker::Sized, -// { -// let mut new_meta = update_meta.0.clone(); -// new_meta.push(self.0[0]); -// Ok((ComplexPrefixAs(new_meta), ())) -// } -// } - -impl Meta for ComplexPrefixAs { - type Orderable<'a> = ComplexPrefixAs; +impl AsRef<[u8]> for BytesPrefixAs { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From> for BytesPrefixAs { + fn from(value: Vec) -> Self { + Self(*value.first_chunk::<4>().unwrap()) + } +} + +impl Meta for BytesPrefixAs { + type Orderable<'a> = Asn; type TBI = (); - - fn as_orderable(&self, _tbi: Self::TBI) -> ComplexPrefixAs { - self.clone() + + fn as_orderable(&self, _tbi: Self::TBI) -> Asn { + u32::from_be_bytes(self.0).into() } } -impl std::fmt::Display for ComplexPrefixAs { +impl std::fmt::Display for BytesPrefixAs { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "AS{:?}", self.0) } } + fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = Arc::new(MultiThreadedStore::::new()?); + let tree_bitmap = Arc::new( + StarCastRib::::try_default()?, + ); let f = Arc::new(std::sync::atomic::AtomicBool::new(false)); let pfx = Prefix::new_relaxed( @@ -87,13 +78,22 @@ fn main() -> Result<(), Box> { 0, 0, RouteStatus::Active, - ComplexPrefixAs([i as u32].to_vec()), + BytesPrefixAs((i as u32).to_be_bytes()), ), - None + None, ) { Ok(metrics) => { if metrics.cas_count > 0 { - eprintln!("{} {} {:?} retry count: {},", std::thread::current().name().unwrap(), metrics.prefix_new, pfx, metrics.cas_count); + eprintln!( + "{} {} {:?} + retry count: {},", + std::thread::current() + .name() + .unwrap(), + metrics.prefix_new, + pfx, + metrics.cas_count + ); } } Err(e) => { @@ -103,15 +103,14 @@ fn main() -> Result<(), Box> { if x % 1_000_000 == 0 { println!( - "{:?} {} (prefixes count: {}, nodes count: {}", + "{:?} {} (prefixes count: {:?}, + nodes count: {}", std::thread::current().name(), x, tree_bitmap.prefixes_count(), tree_bitmap.nodes_count() ); } - - } }, ) @@ -127,19 +126,20 @@ fn main() -> Result<(), Box> { println!("------ end of inserts\n"); - let guard = &epoch::pin(); + let guard = &rotonda_store::epoch::pin(); let s_spfx = tree_bitmap.match_prefix( &pfx.unwrap(), &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("query result"); println!("{}", s_spfx); println!("{}", s_spfx.more_specifics.unwrap()); diff --git a/examples/multi_thread_multi_prefix.rs b/examples/multi_thread_multi_prefix.rs index d9385fe3..b38be9f6 100644 --- a/examples/multi_thread_multi_prefix.rs +++ b/examples/multi_thread_multi_prefix.rs @@ -1,6 +1,11 @@ +use inetnum::addr::Prefix; use log::trace; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::IntoIpAddr; -use rotonda_store::prelude::multi::*; use std::sync::atomic::AtomicU32; use std::sync::Arc; use std::thread; @@ -8,16 +13,15 @@ use std::time::Duration; use rand::Rng; -use rotonda_store::meta_examples::PrefixAs; -use rotonda_store::prelude::*; -use rotonda_store::MultiThreadedStore; +use rotonda_store::test_types::PrefixAs; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = Arc::new(MultiThreadedStore::::new()?); + let tree_bitmap = + Arc::new(StarCastRib::::try_default()?); // let pfx = Prefix::new_relaxed( // 0b1111_1111_1111_1111_1111_1111_1111_1111_u32.into_ipaddr(), // 32, @@ -33,7 +37,7 @@ fn main() -> Result<(), Box> { .name(i.to_string()) .spawn( move || -> Result<(), Box> { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); // println!("park thread {}", i); thread::park(); @@ -42,14 +46,21 @@ fn main() -> Result<(), Box> { let mut x = 0; loop { - let pfx = Prefix::new_relaxed(pfx_int.clone().load(std::sync::atomic::Ordering::Relaxed).into_ipaddr(), 32).unwrap(); + let pfx = Prefix::new_relaxed( + pfx_int + .clone() + .load(std::sync::atomic::Ordering::Relaxed) + .into_ipaddr(), + 32, + ) + .unwrap(); let guard = &crossbeam_epoch::pin(); while x < 100 { - let asn = PrefixAs(rng.gen()); + let asn = PrefixAs::new_from_u32(rng.random()); match tree_bitmap.insert( &pfx, Record::new(0, 0, RouteStatus::Active, asn), - None + None, ) { Ok(metrics) => { if metrics.prefix_new { @@ -63,17 +74,21 @@ fn main() -> Result<(), Box> { println!("{}", e); } }; - let _s_spfx = tree_bitmap.match_prefix( - &pfx, - &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, - include_withdrawn: true, - include_less_specifics: true, - include_more_specifics: true, - mui: None - }, - guard, - ).prefix_meta; + let _s_spfx = tree_bitmap + .match_prefix( + &pfx, + &MatchOptions { + match_type: MatchType::ExactMatch, + include_withdrawn: true, + include_less_specifics: true, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + ) + .unwrap() + .records; x += 1; } @@ -83,9 +98,11 @@ fn main() -> Result<(), Box> { thread::park(); // thread::sleep(Duration::from_secs(3)); println!("wake thread {}", i); - println!("prefix count {:?}", tree_bitmap.prefixes_count()); + println!( + "prefix count {:?}", + tree_bitmap.prefixes_count() + ); x = 0; - } }, ) @@ -105,7 +122,7 @@ fn main() -> Result<(), Box> { "increased pfx to {}", pfx_arc.clone().load(std::sync::atomic::Ordering::Relaxed) ); - println!("prefix count: {}", tree_bitmap.prefixes_count()); + println!("prefix count: {:?}", tree_bitmap.prefixes_count()); threads.clone().for_each(|t| { t.thread().unpark(); diff --git a/examples/multi_thread_single_prefix.rs b/examples/multi_thread_single_prefix.rs index c727a67b..801c8aa5 100644 --- a/examples/multi_thread_single_prefix.rs +++ b/examples/multi_thread_single_prefix.rs @@ -1,36 +1,40 @@ use log::trace; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::IntoIpAddr; -use rotonda_store::prelude::multi::*; use std::sync::Arc; use std::thread; use std::time::Duration; use rand::Rng; -use rotonda_store::prelude::*; -use rotonda_store::MultiThreadedStore; -use rotonda_store::meta_examples::PrefixAs; +use rotonda_store::test_types::PrefixAs; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = Arc::new(MultiThreadedStore::::new()?); + let tree_bitmap = + Arc::new(StarCastRib::::try_default()?); - let pfx = Prefix::new_relaxed( + let pfx = inetnum::addr::Prefix::new_relaxed( 0b1111_1111_1111_1111_1111_1111_1111_1111_u32.into_ipaddr(), 32, ); - let threads = (0..16).enumerate().map(|(i, _)| { - let tree_bitmap = tree_bitmap.clone(); + let threads = + (0..16).enumerate().map(|(i, _)| { + let tree_bitmap = tree_bitmap.clone(); - std::thread::Builder::new() + std::thread::Builder::new() .name(i.to_string()) .spawn( move || -> Result<(), Box> { - let mut rng = rand::thread_rng(); + let mut rng = rand::rng(); println!("park thread {}", i); thread::park(); @@ -41,11 +45,11 @@ fn main() -> Result<(), Box> { loop { let guard = &crossbeam_epoch::pin(); while x < 10_000 { - let asn = PrefixAs(rng.gen()); + let asn = PrefixAs::new_from_u32(rng.random()); match tree_bitmap.insert( &pfx.unwrap(), Record::new(0, 0, RouteStatus::Active, asn), - None + None, ) { Ok(metrics) => { if metrics.prefix_new { @@ -62,14 +66,15 @@ fn main() -> Result<(), Box> { let _s_spfx = tree_bitmap.match_prefix( &pfx.unwrap(), &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, - ).prefix_meta; + ).unwrap().records; x += 1; } @@ -78,14 +83,16 @@ fn main() -> Result<(), Box> { guard.flush(); thread::sleep(Duration::from_secs(3)); println!("wake thread {}", i); - println!("prefix count {:?}", tree_bitmap.prefixes_count()); + println!( + "prefix count {:?}", + tree_bitmap.prefixes_count() + ); x = 0; - } }, ) .unwrap() - }); + }); threads.for_each(|t| { t.thread().unpark(); diff --git a/examples/numbers_treebitmap.rs b/examples/numbers_treebitmap.rs index 5bee5fb8..ab321e6d 100644 --- a/examples/numbers_treebitmap.rs +++ b/examples/numbers_treebitmap.rs @@ -1,8 +1,9 @@ -use rotonda_store::meta_examples::PrefixAs; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; +use inetnum::addr::Prefix; +use rotonda_store::prefix_record::{PrefixRecord, Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::test_types::PrefixAs; -use std::sync::atomic::Ordering; use std::env; use std::error::Error; use std::ffi::OsString; @@ -10,11 +11,11 @@ use std::fs::File; use std::net::{IpAddr, Ipv4Addr}; use std::process; -#[create_store(( - [4, 4, 4, 4, 4, 4, 4, 4], - [3, 4, 5, 4] -))] -struct MyStore; +// #[create_store(( +// ([4, 4, 4, 4, 4, 4, 4, 4], 5, 17), +// ([3, 4, 5, 4], 16, 29) +// ))] +// struct MyStore; fn get_first_arg() -> Result> { match env::args_os().nth(1) { @@ -44,7 +45,12 @@ fn load_prefixes( let asn: u32 = record[2].parse().unwrap(); let pfx = PrefixRecord::::new( Prefix::new(net, len)?, - vec![Record::new(0, 0, RouteStatus::Active, PrefixAs(asn))], + vec![Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(asn), + )], ); pfxs.push(pfx); // trie.insert(&pfx); @@ -58,7 +64,8 @@ fn main() -> Result<(), Box> { for _strides in strides_vec.iter() { let mut pfxs: Vec> = vec![]; - let tree_bitmap: MyStore = MyStore::::new()?; + let tree_bitmap = + StarCastRib::::try_default()?; if let Err(err) = load_prefixes(&mut pfxs) { println!("error running example: {}", err); @@ -66,11 +73,9 @@ fn main() -> Result<(), Box> { } for pfx in pfxs.into_iter() { - tree_bitmap.insert( - &pfx.prefix, pfx.meta[0].clone(), None - )?; + tree_bitmap.insert(&pfx.prefix, pfx.meta[0].clone(), None)?; } - + #[cfg(feature = "cli")] println!("{:?}", tree_bitmap.print_funky_stats()); } diff --git a/examples/real_single_thread_24.rs b/examples/real_single_thread_24.rs index de196753..5321101e 100644 --- a/examples/real_single_thread_24.rs +++ b/examples/real_single_thread_24.rs @@ -1,28 +1,30 @@ +use inetnum::addr::Prefix; use log::trace; -use std::time::Duration; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::IntoIpAddr; use std::thread; +use std::time::Duration; use rand::Rng; -use rotonda_store::prelude::*; - -use rotonda_store::meta_examples::PrefixAs; +use rotonda_store::test_types::PrefixAs; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting one-threaded yolo testing...."); - let v4 = vec![8]; - let v6 = vec![8]; - let mut tree_bitmap = rotonda_store::SingleThreadedStore::::new(v4, v6); + let tree_bitmap = + StarCastRib::::try_default()?; let mut pfx_int = 0_u32; let thread = std::thread::Builder::new() .name(1_u8.to_string()) .spawn(move || -> Result<(), Box> { - let mut rng= rand::thread_rng(); + let mut rng = rand::rng(); println!("park thread {}", 1); thread::park(); @@ -31,14 +33,20 @@ fn main() -> Result<(), Box> { while pfx_int <= 24 { pfx_int += 1; - let pfx = Prefix::new_relaxed( - pfx_int.into_ipaddr(), - 32, - ); + let pfx = Prefix::new_relaxed(pfx_int.into_ipaddr(), 32); print!("{}-", pfx_int); - let asn: u32 = rng.gen(); - match tree_bitmap.insert(&pfx.unwrap(), PrefixAs(asn)) { + let asn: u32 = rng.random(); + match tree_bitmap.insert( + &pfx.unwrap(), + Record::new( + 1, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(asn), + ), + None, + ) { Ok(_) => {} Err(e) => { println!("{}", e); @@ -49,10 +57,9 @@ fn main() -> Result<(), Box> { println!("--thread {} done.", 1); Ok(()) - }) .unwrap(); - + thread.thread().unpark(); thread::sleep(Duration::from_secs(10)); diff --git a/examples/single_thread_24.rs b/examples/single_thread_24.rs index d0308456..de78c553 100644 --- a/examples/single_thread_24.rs +++ b/examples/single_thread_24.rs @@ -1,20 +1,24 @@ +use inetnum::addr::Prefix; use log::trace; -use std::time::Duration; +use rotonda_store::prefix_record::Record; +use rotonda_store::prefix_record::RouteStatus; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::IntoIpAddr; use std::thread; +use std::time::Duration; use rand::Rng; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; - -use rotonda_store::meta_examples::PrefixAs; +use rotonda_store::test_types::PrefixAs; fn main() -> Result<(), Box> { #[cfg(feature = "cli")] env_logger::init(); trace!("Starting multi-threaded yolo testing...."); - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = + StarCastRib::::try_default()?; // let f = Arc::new(std::sync::atomic::AtomicBool::new(false)); let mut pfx_int = 0_u32; @@ -28,7 +32,7 @@ fn main() -> Result<(), Box> { .name(1_u8.to_string()) .spawn(move || -> Result<(), Box> { // while !start_flag.load(std::sync::atomic::Ordering::Acquire) { - let mut rng= rand::thread_rng(); + let mut rng = rand::rng(); println!("park thread {}", 1); thread::park(); @@ -38,17 +42,19 @@ fn main() -> Result<(), Box> { // let mut x = 0; while pfx_int <= 24 { pfx_int += 1; - let pfx = Prefix::new_relaxed( - pfx_int.into_ipaddr(), - 32, - ); + let pfx = Prefix::new_relaxed(pfx_int.into_ipaddr(), 32); // x += 1; // print!("{}-", i); - let asn: u32 = rng.gen(); + let asn: u32 = rng.random(); match tree_bitmap.insert( &pfx.unwrap(), - Record::new(0, 0, RouteStatus::Active, PrefixAs(asn)), - None + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(asn), + ), + None, ) { Ok(_) => {} Err(e) => { diff --git a/examples/treebitmap.rs b/examples/treebitmap.rs index 5d483ee9..feabf3e0 100644 --- a/examples/treebitmap.rs +++ b/examples/treebitmap.rs @@ -1,12 +1,20 @@ use inetnum::addr::Prefix; -use rotonda_store::prelude::*; -use rotonda_store::prelude::multi::*; -use rotonda_store::meta_examples::NoMeta; +use rotonda_store::match_options::IncludeHistory; +use rotonda_store::match_options::MatchOptions; +use rotonda_store::match_options::MatchType; +use rotonda_store::prefix_record::Record; +use rotonda_store::prefix_record::RouteStatus; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; +use rotonda_store::test_types::NoMeta; +use rotonda_store::IntoIpAddr; type Prefix4<'a> = Prefix; -fn main() -> Result<(), Box> { - let tree_bitmap = MultiThreadedStore::new()?; +type Type = Result<(), Box>; + +fn main() -> Type { + let tree_bitmap = StarCastRib::<_, MemoryOnlyConfig>::try_default()?; let pfxs = vec![ Prefix::new( 0b0000_0000_0000_0000_0000_0000_0000_0000_u32.into_ipaddr(), @@ -185,9 +193,9 @@ fn main() -> Result<(), Box> { for pfx in pfxs.into_iter() { // println!("insert {:?}", pfx); tree_bitmap.insert( - &pfx.unwrap(), + &pfx.unwrap(), Record::new(0, 0, RouteStatus::Active, NoMeta::Empty), - None + None, )?; } println!("------ end of inserts\n"); @@ -289,7 +297,7 @@ fn main() -> Result<(), Box> { ] { println!("search for: {:?}", spfx); // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); - let guard = &epoch::pin(); + let guard = &rotonda_store::epoch::pin(); let s_spfx = tree_bitmap.match_prefix( &spfx.unwrap(), &MatchOptions { @@ -297,9 +305,10 @@ fn main() -> Result<(), Box> { include_withdrawn: false, include_less_specifics: true, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, - guard + guard, ); println!("lmp: {:?}", s_spfx); println!("-----------"); diff --git a/proc_macros/Cargo.toml b/proc_macros/Cargo.toml deleted file mode 100644 index 4606c638..00000000 --- a/proc_macros/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "rotonda-macros" -categories = ["network-programming"] -description = "Procedural macros for the rotonda-store prefix store" -homepage = "https://nlnetlabs.nl/projects/routing/rotonda/" -repository = "https://github.com/NLnetLabs/rotonda-macros" -keywords = ["routing", "bgp"] -edition.workspace = true -version.workspace = true -authors.workspace = true -rust-version.workspace = true -license.workspace = true - -[lib] -proc-macro = true - -[dependencies] -syn = {version = "^2", features = ["proc-macro", "full", "parsing", "printing"] } -quote = "^1" diff --git a/proc_macros/Changelog.md b/proc_macros/Changelog.md deleted file mode 100644 index ad136953..00000000 --- a/proc_macros/Changelog.md +++ /dev/null @@ -1,41 +0,0 @@ -# Change Log - -## Unreleased Version - -Released xxxx-xx-xx. - -Breaking Changes - -New - -Other Changes - -## 0.4.0-rc0 - -Released 2024-06-12. - -Breaking Changes - -* remove MergeUpdate trait - -New - -* public API for best and backup path selection. -* public API for searching and iterating paths for multi_uniq_ids. -* public API for modifying local and global multi_uniq_ids ('mui'). - -## 0.3.1 - -Released 2021-03-25. - -Other Changes - -* Use inetnum create for Prefix, instead of routecore. - -## 0.1.1 - -Released 2021-06-29 - -First Release - -* Meant to be used by rotonda-store < 0.3 diff --git a/proc_macros/LICENSE b/proc_macros/LICENSE deleted file mode 100644 index 3f451536..00000000 --- a/proc_macros/LICENSE +++ /dev/null @@ -1,30 +0,0 @@ -Copyright (c) 2021, NLnet Labs. All rights reserved. - -This software is open source. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -Neither the name of the NLNET LABS nor the names of its contributors may -be used to endorse or promote products derived from this software without -specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/proc_macros/README.md b/proc_macros/README.md deleted file mode 100644 index fad76fb9..00000000 --- a/proc_macros/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# `rotonda-macros` – Procedural macros for Rotonda-store - -This crate provides a few procedural macros for the `rotonda-store`. - -`rotonda-store` is a part of the Rotonda project, a modular, analytical -BGP engine. - -Read more about [Rotonda]. - -## Contributing - -If you have comments, proposed changes, or would like to contribute, -please open an issue in the [GitHub repository]. In particular, if you -would like to use the crate but it is missing functionality for your use -case, we would love to hear from you! - -[GitHub repository]: (https://github.com/NLnetLabs/rotonda-macros) -[Rotonda]: (https://github.com/NLnetLabs/rotonda) - - -## License - -The _rotonda-macros_ crate is distributed under the terms of the BSD-3-clause license. -See LICENSE for details. diff --git a/proc_macros/src/lib.rs b/proc_macros/src/lib.rs deleted file mode 100644 index f3f07edf..00000000 --- a/proc_macros/src/lib.rs +++ /dev/null @@ -1,1558 +0,0 @@ -extern crate proc_macro; - -mod maps; - -use proc_macro::TokenStream; -use quote::{format_ident, quote}; -use std::iter::Iterator; -use syn::parse_macro_input; - -#[proc_macro_attribute] -pub fn stride_sizes(attr: TokenStream, input: TokenStream) -> TokenStream { - // The arguments for the macro invocation - let attrs = parse_macro_input!(attr as syn::ExprTuple); - - let attrs = attrs.elems.iter().collect::>(); - - let input = parse_macro_input!(input as syn::ItemStruct); - let type_name = &input.ident; - let ip_af = match attrs[0] { - syn::Expr::Path(t) => t, - _ => panic!("Expected Family Type"), - }; - let prefixes_all_len; - let all_len; - let prefixes_buckets_name: syn::Ident; - // let prefix_store_bits; - let get_root_prefix_set; - - // The name of the Struct that we're going to generate - // We'll prepend it with the name of the TreeBitMap struct - // that the user wants, so that our macro is a little bit - // more hygienic, and the user can create multiple types - // of TreeBitMap structs with different stride sizes. - let buckets_name = if ip_af.path.is_ident("IPv4") { - format_ident!("{}NodeBuckets4", type_name) - } else { - format_ident!("{}NodeBuckets6", type_name) - }; - let store_bits = if ip_af.path.is_ident("IPv4") { - all_len = (0..=32_u8).collect::>(); - prefixes_all_len = (0..=32_u8) - .map(|l| format_ident!("p{}", l)) - .collect::>(); - prefixes_buckets_name = format_ident!("PrefixBuckets4"); - // prefix_store_bits = format_ident!("prefix_store_bits_4"); - get_root_prefix_set = quote! { - fn get_root_prefix_set(&self, len: u8) -> &'_ PrefixSet { - [ - &self.p0, &self.p1, &self.p2, &self.p3, &self.p4, &self.p5, &self.p6, &self.p7, &self.p8, - &self.p9, &self.p10, &self.p11, &self.p12, &self.p13, &self.p14, &self.p15, &self.p16, - &self.p17, &self.p18, &self.p19, &self.p20, &self.p21, &self.p22, &self.p23, &self.p24, - &self.p25, &self.p26, &self.p27, &self.p28, &self.p29, &self.p30, &self.p31, &self.p32 - ][len as usize] - } - }; - crate::maps::node_buckets_map_v4() - } else { - all_len = (0..=128_u8).collect::>(); - prefixes_all_len = (0..=128_u8) - .map(|l| format_ident!("p{}", l)) - .collect::>(); - - prefixes_buckets_name = format_ident!("PrefixBuckets6"); - // prefix_store_bits = format_ident!("prefix_store_bits_6"); - get_root_prefix_set = quote! { - fn get_root_prefix_set(&self, len: u8) -> &'_ PrefixSet { - [ - &self.p0, &self.p1, &self.p2, &self.p3, &self.p4, &self.p5, &self.p6, &self.p7, &self.p8, - &self.p9, &self.p10, &self.p11, &self.p12, &self.p13, &self.p14, &self.p15, &self.p16, - &self.p17, &self.p18, &self.p19, &self.p20, &self.p21, &self.p22, &self.p23, &self.p24, - &self.p25, &self.p26, &self.p27, &self.p28, &self.p29, &self.p30, &self.p31, &self.p32, - &self.p33, &self.p34, &self.p35, &self.p36, &self.p37, &self.p38, &self.p39, &self.p40, - &self.p41, &self.p42, &self.p43, &self.p44, &self.p45, &self.p46, &self.p47, &self.p48, - &self.p49, &self.p50, &self.p51, &self.p52, &self.p53, &self.p54, &self.p55, &self.p56, - &self.p57, &self.p58, &self.p59, &self.p60, &self.p61, &self.p62, &self.p63, &self.p64, - &self.p65, &self.p66, &self.p67, &self.p68, &self.p69, &self.p70, &self.p71, &self.p72, - &self.p73, &self.p74, &self.p75, &self.p76, &self.p77, &self.p78, &self.p79, &self.p80, - &self.p81, &self.p82, &self.p83, &self.p84, &self.p85, &self.p86, &self.p87, &self.p88, - &self.p89, &self.p90, &self.p91, &self.p92, &self.p93, &self.p94, &self.p95, &self.p96, - &self.p97, &self.p98, &self.p99, &self.p100, &self.p101, &self.p102, &self.p103, &self.p104, - &self.p105, &self.p106, &self.p107, &self.p108, &self.p109, &self.p110, &self.p111, &self.p112, - &self.p113, &self.p114, &self.p115, &self.p116, &self.p117, &self.p118, &self.p119, &self.p120, - &self.p121, &self.p122, &self.p123, &self.p124, &self.p125, &self.p126, &self.p127, &self.p128 - ][len as usize] - } - }; - crate::maps::node_buckets_map_v6() - }; - - let mut strides_num: Vec = vec![]; - let mut strides = vec![]; - let mut strides_all_len = vec![]; - let mut strides_all_len_accu: Vec = vec![]; - let mut strides_all_len_level = vec![]; - let mut strides_len3 = vec![]; - let mut strides_len3_l = vec![]; - let mut strides_len4 = vec![]; - let mut strides_len4_l = vec![]; - let mut strides_len5 = vec![]; - let mut strides_len5_l = vec![]; - - let mut s_accu = 0_u8; - - let attrs_s = match attrs[1] { - syn::Expr::Array(arr) => arr, - _ => panic!("Expected an array"), - }; - let strides_len = attrs_s.elems.len() as u8; - let first_stride_size = &attrs_s.elems[0]; - - for (len, stride) in attrs_s.elems.iter().enumerate() { - strides_all_len.push(format_ident!("l{}", len)); - - match stride { - syn::Expr::Lit(s) => { - if let syn::Lit::Int(i) = &s.lit { - let stride_len = i.base10_digits().parse::().unwrap(); - strides_num.push(stride_len); - strides_all_len_level.push(format_ident!("l{}", s_accu)); - - match stride_len { - 3 => { - strides_len3.push(s_accu as usize); - strides_len3_l.push(format_ident!("l{}", s_accu)); - } - 4 => { - strides_len4.push(s_accu as usize); - strides_len4_l.push(format_ident!("l{}", s_accu)); - } - 5 => { - strides_len5.push(s_accu as usize); - strides_len5_l.push(format_ident!("l{}", s_accu)); - } - _ => panic!("Expected a stride of 3, 4 or 5"), - }; - strides_all_len_accu.push(s_accu); - - s_accu += stride_len; - strides.push(format_ident!("Stride{}", stride_len)) - } else { - panic!("Expected an integer") - } - } - _ => { - panic!("Expected a literal") - } - } - } - - // Check if the strides division makes sense - let mut len_to_stride_arr = [0_u8; 128]; - strides_all_len_accu - .iter() - .zip(strides_num.iter()) - .for_each(|(acc, s)| { - len_to_stride_arr[*acc as usize] = *s; - }); - - // These are the stride sizes as an array of u8s, padded with 0s to the - // right. It's bounded to 42 u8s to avoid having to set a const generic - // on the type (which would have to be carried over to its parent). So - // if a 0 is encountered, it's the end of the strides. - let mut stride_sizes = [0; 42]; - let (left, _right) = stride_sizes.split_at_mut(strides_len as usize); - left.swap_with_slice(&mut strides_num); - - let struct_creation = quote! { - - #[derive(Debug)] - pub(crate) struct #buckets_name { - // created fields for each sub-prefix (StrideNodeId) length, - // with hard-coded field-names, like this: - // l0: NodeSet, - // l5: NodeSet, - // l10: NodeSet, - // ... - // l29: NodeSet - # ( #strides_all_len_level: NodeSet<#ip_af, #strides>, )* - _af: PhantomData, - stride_sizes: [u8; 42], - strides_len: u8 - } - - #[derive(Debug)] - pub(crate) struct #prefixes_buckets_name { - // creates a bucket for each prefix (PrefixId) length, with - // hard-coded field-names, like this: - // p0: PrefixSet, - // p1: PrefixSet, - // ... - // p32: PrefixSet, - #( #prefixes_all_len: PrefixSet<#ip_af, M>, )* - _af: PhantomData, - _m: PhantomData, - } - - }; - - let prefix_buckets_map = if ip_af.path.is_ident("IPv4") { - crate::maps::prefix_buckets_map_v4() - } else { - crate::maps::prefix_buckets_map_v6() - }; - - let prefix_buckets_impl = quote! { - - impl PrefixBuckets<#ip_af, M> for #prefixes_buckets_name { - fn init() -> #prefixes_buckets_name { - #prefixes_buckets_name { - #( #prefixes_all_len: PrefixSet::init(1 << #prefixes_buckets_name::::get_bits_for_len(#all_len, 0)), )* - _af: PhantomData, - _m: PhantomData, - } - } - - fn remove(&mut self, id: PrefixId<#ip_af>) -> Option { unimplemented!() } - - #get_root_prefix_set - - #prefix_buckets_map - - } - - }; - - let struct_impl = quote! { - - impl NodeBuckets<#ip_af> for #buckets_name { - fn init() -> Self { - #buckets_name { - // creates l0, l1, ... l, but only for the - // levels at the end of each stride, so for strides - // [5,5,4,3,3,3,3,3,3] is will create l0, l5, l10, l14, - // l17, l20, l23, l26, l29 last level will be omitted, - // because that will never be used (l29 has children - // with prefixes up to prefix-length 32 in this example). - #( #strides_all_len_level: NodeSet::init(#buckets_name::::len_to_store_bits(#strides_all_len_accu, 0) ), )* - _af: PhantomData, - stride_sizes: [ #( #stride_sizes, )*], - strides_len: #strides_len - } - } - - fn get_store3(&self, id: StrideNodeId<#ip_af>) -> &NodeSet<#ip_af, Stride3> { - match id.get_id().1 as usize { - #( #strides_len3 => &self.#strides_len3_l, )* - _ => panic!( - "unexpected sub prefix length {} in stride size 3 ({})", - id.get_id().1, - id - ), - } - } - - fn get_store4(&self, id: StrideNodeId<#ip_af>) -> &NodeSet<#ip_af, Stride4> { - match id.get_id().1 as usize { - #( #strides_len4 => &self.#strides_len4_l, )* - // ex.: - // 10 => &self.l10, - _ => panic!( - "unexpected sub prefix length {} in stride size 4 ({})", - id.get_id().1, - id - ), - } - } - - fn get_store5(&self, id: StrideNodeId<#ip_af>) -> &NodeSet<#ip_af, Stride5> { - match id.get_id().1 as usize { - #( #strides_len5 => &self.#strides_len5_l, )* - // ex.: - // 0 => &self.l0, - // 5 => &self.l5, - _ => panic!( - "unexpected sub prefix length {} in stride size 5 ({})", - id.get_id().1, - id - ), - } - } - - #[inline] - fn get_stride_sizes(&self) -> &[u8] { - &self.stride_sizes[0..self.strides_len as usize] - } - - #[inline] - fn get_stride_for_id(&self, id: StrideNodeId<#ip_af>) -> u8 { - [ #(#len_to_stride_arr, )* ][id.get_id().1 as usize] - } - - #[inline] - #store_bits - - fn get_strides_len() -> u8 { - #strides_len - } - - fn get_first_stride_size() -> u8 { - #first_stride_size - } - } - - }; - - let type_alias = quote! { - type #type_name = TreeBitMap<#ip_af, M, #buckets_name<#ip_af>, #prefixes_buckets_name<#ip_af, M>>; - }; - - let result = quote! { - #struct_creation - #struct_impl - #prefix_buckets_impl - #type_alias - }; - - TokenStream::from(result) -} - -// ---------- Create Store struct ------------------------------------------- - -// This macro creates the struct that will be the public API for the -// PrefixStore. Therefore all methods defined in here should be public. - -/// Creates a new, user-named struct with user-defined specified stride sizes -/// that can used as a store type. -/// -/// # Usage -/// ```ignore -/// use rotonda_store::prelude::*; -/// use rotonda_store::prelude::multi::*; -/// use rotonda_store::meta_examples::PrefixAs; -/// -/// const IP4_STRIDE_ARRAY = [4; 8]; -/// const IP6_STRIDE_ARRAY = [4; 32]; -/// -/// #[create_store((IPV4_STRIDE_ARRAY, IPV6_STRIDE_ARRAY))] -/// struct NuStorage; -/// ``` -/// -/// This will create a `NuStorage` struct, that can be used as a regular -/// store. -/// -/// The stride-sizes can be any of \[3,4,5\], and they should add up -/// to the total number of bits in the address family (32 for IPv4 and -/// 128 for IPv6). Stride sizes in the array will be repeated if the sum -/// of them falls short of the total number of bits for the address -/// family. -/// -/// # Example -/// ```ignore -/// use rotonda_store::prelude::*; -/// use rotonda_store::prelude::multi::*; -/// use rotonda_store::meta_examples::PrefixAs; -/// -/// // The default stride sizes for IPv4, IPv6, resp. -/// #[create_store(( -/// [5, 5, 4, 3, 3, 3, 3, 3, 3, 3], -/// [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, -/// 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4] -/// ))] -/// struct NuStore; -/// -/// let store = Arc::new(NuStore::::new().unwrap()); -/// ``` -#[proc_macro_attribute] -pub fn create_store(attr: TokenStream, item: TokenStream) -> TokenStream { - let input = parse_macro_input!(item as syn::ItemStruct); - let store_name = &input.ident; - - let attr = parse_macro_input!(attr as syn::ExprTuple); - let attrs = attr.elems.iter().collect::>(); - let strides4 = attrs[0].clone(); - let strides6 = attrs[1].clone(); - let strides4_name = format_ident!("{}IPv4", store_name); - let strides6_name = format_ident!("{}IPv6", store_name); - - let create_strides = quote! { - use ::std::marker::PhantomData; - use ::inetnum::addr::Prefix; - - #[stride_sizes((IPv4, #strides4))] - struct #strides4_name; - - #[stride_sizes((IPv6, #strides6))] - struct #strides6_name; - }; - - let store = quote! { - /// A concurrently read/writable, lock-free Prefix Store, for use in a - /// multi-threaded context. - /// - /// This store will hold records keyed on Prefix, and with values - /// consisting of a multi-map (a map that can hold multiple values per - /// key), filled with Records. - /// - /// Records in the store contain the metadata, a `multi_uniq_id`, - /// logical time (to disambiguate the order of inserts into the store) - /// and the status of the Record. - /// - /// Effectively this means that the store holds values for the set of - /// `(prefix, multi_uniq_id)` pairs, where the primary key is the - /// prefix, and the secondary key is the `multi_uniq_id`. These - /// `multi_uniq_id`s are unique across all of the store. The store - /// facilitates iterating over and changing the status for all - /// prefixes per `multi_uniq_id`. - /// - /// The store has the concept of a global status for a - /// `multi_uniq_id`, e.g. to set all prefixes for a `multi_uniq_id` in - /// one atomic transaction to withdrawn. It also has local statuses - /// per `(prefix, multi_uniq_id)` pairs, e.g. to withdraw one value - /// for a `multi_uniq_id`. - /// - /// This way the store can hold RIBs for multiple peers in one - /// data-structure. - pub struct #store_name< - M: Meta - > { - v4: #strides4_name, - v6: #strides6_name, - } - - impl< - M: Meta - > Default for #store_name - { - fn default() -> Self { - Self::new().expect("failed to create store") - } - } - - impl< - M: Meta - > #store_name - { - /// Creates a new empty store with a tree for IPv4 and on for IPv6. - /// - /// The store will be created with the default stride sizes. After - /// creation you can wrap the store in an Arc<_> and `clone()` that - /// for every thread that needs read access and/or write acces to - /// it. As a convenience both read and write methods take a `&self` - /// instead of `&mut self`. - /// - /// If you need custom stride sizes you can use the - /// [`#[create_store]`](rotonda_macros::create_store) macro to - /// create a struct with custom stride sizes. - /// - /// # Example - /// ``` - /// use std::{sync::Arc, thread}; - /// use std::net::Ipv4Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::prelude::multi::*; - /// use rotonda_store::meta_examples::{NoMeta, PrefixAs}; - /// - /// let tree_bitmap = Arc::new(MultiThreadedStore::::new().unwrap()); - /// - /// let _: Vec<_> = (0..16) - /// .map(|_| { - /// let tree_bitmap = tree_bitmap.clone(); - /// - /// thread::spawn(move || { - /// let pfxs = [ - /// Prefix::new_relaxed( - /// Ipv4Addr::new(130, 55, 241, 0).into(), - /// 24, - /// ), - /// Prefix::new_relaxed( - /// Ipv4Addr::new(130, 55, 240, 0).into(), - /// 24, - /// ) - /// ]; - /// - /// for pfx in pfxs.into_iter() { - /// println!("insert {}", pfx.unwrap()); - /// tree_bitmap.insert( - /// &pfx.unwrap(), - /// Record::new(0, 0, RouteStatus::Active, NoMeta::Empty), - /// None - /// ).unwrap(); - /// } - /// }) - /// }).map(|t| t.join()).collect(); - /// ``` - pub fn new() -> Result> { - Ok(Self { - v4: #strides4_name::new()?, - v6: #strides6_name::new()?, - }) - } - } - - impl<'a, M: Meta, - > #store_name - { - /// Search for and return one or more prefixes that match the given - /// `search_pfx` argument. - /// - /// The search will return a [QueryResult] with the matching prefix, - /// if any, the type of match for the found prefix and the more and - /// less specifics for the requested prefix. The inclusion of more- - /// or less-specifics and the requested `match_type` is configurable - /// through the [MatchOptions] argument. - /// - /// The `match_type` in the `MatchOptions` indicates what match - /// types can appear in the [QueryResult] result. - /// - /// `ExactMatch` is the most strict, and will only allow exactly - /// matching prefixes in the result. Failing an exacly matching - /// prefix, it will return an `EmptyMatch`. - /// - /// `LongestMatch` is less strict, and either an exactly matching - /// prefix or - in case there is no exact match - a longest matching - /// prefix will be allowed in the result. Failing both an EmptyMatch - /// will be returned. - /// - /// For both `ExactMatch` and `LongestMatch` the - /// `include_less_specifics` and `include_more_specifics` options - /// will be respected and the result will contain the more and less - /// specifics according to the options for the requested prefix, - /// even if the result returns a `match_type` of `EmptyMatch`. - /// - /// `EmptyMatch` is the least strict, and will *always* return the - /// requested prefix, be it exactly matching, longest matching or not - /// matching at all (empty match), again, together with its less|more - /// specifics (if requested). Note that the last option, the empty - /// match in the result will never return less-specifics, but can - /// return more-specifics for a prefix that itself is not present - /// in the store. - /// - /// - /// This table sums it up: - /// - /// | query match_type | possible result types | less-specifics? | more-specifics? | - /// | ---------------- | ------------------------------------------ | --------------- | --------------- | - /// | `ExactMatch` | `ExactMatch`, `EmptyMatch` | maybe | maybe | - /// | `LongestMatch` | `ExactMatch`, `LongestMatch`, `EmptyMatch` | maybe | maybe | - /// | `EmptyMatch` | `ExactMatch`, `LongestMatch`, `EmptyMatch` | no for EmptyM res, maybe for others | yes for EmptyM for res, maybe for others | - /// - /// - /// Note that the behavior of the CLI command `show route exact` on - /// most router platforms can be modeled by setting the `match_type` - /// to `ExactMatch` and `include_less_specifics` to `true`. - /// - /// # Example - /// ``` - /// use std::net::Ipv4Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::meta_examples::PrefixAs; - /// use rotonda_store::prelude::multi::*; - /// - /// let store = MultiThreadedStore::::new().unwrap(); - /// let guard = &epoch::pin(); - /// - /// let pfx_addr = "185.49.140.0".parse::() - /// .unwrap() - /// .into(); - /// - /// store.insert( - /// &Prefix::new(pfx_addr, 22).unwrap(), - /// Record::new(0, 0, RouteStatus::Active, PrefixAs(211321)), - /// None - /// ); - /// - /// let res = store.match_prefix( - /// &Prefix::new(pfx_addr, 24).unwrap(), - /// &MatchOptions { - /// match_type: MatchType::LongestMatch, - /// include_withdrawn: false, - /// include_less_specifics: false, - /// include_more_specifics: false, - /// mui: None - /// }, - /// guard - /// ); - /// - /// assert_eq!(res.prefix_meta[0].meta.0, 211321); - /// - /// let res = store.match_prefix( - /// &Prefix::new(pfx_addr, 24).unwrap(), - /// &MatchOptions { - /// match_type: MatchType::ExactMatch, - /// include_withdrawn: false, - /// include_less_specifics: false, - /// include_more_specifics: false, - /// mui: None - /// }, - /// guard - /// ); - /// - /// assert!(res.match_type.is_empty()); - /// - /// ``` - pub fn match_prefix( - &'a self, - search_pfx: &Prefix, - options: &MatchOptions, - guard: &'a Guard, - ) -> QueryResult { - - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => { - self.v4.match_prefix_by_store_direct( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - options, - options.mui, - guard - ) - }, - std::net::IpAddr::V6(addr) => self.v6.match_prefix_by_store_direct( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - options, - options.mui, - guard - ), - } - } - - /// Return the record that belongs to the pre-calculated and - /// stored best path for a given prefix. - /// - /// If the Prefix does not exist in the store `None` is returned. - /// If the prefix does exist, but no best path was calculated - /// (yet), a `PrefixStoreError::BestPathNotFound` error will be - /// returned. A returned result of - /// `PrefixError::StoreNotReadyError` should never happen: it - /// would indicate an internal inconsistency in the store. - pub fn best_path(&'a self, - search_pfx: &Prefix, - guard: &Guard - ) -> Option, PrefixStoreError>> { - - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => self.v4.store - .non_recursive_retrieve_prefix( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - ) - .0 - .map(|p_rec| unsafe { p_rec - .get_path_selections(guard).best() - .map_or_else( - || Err(PrefixStoreError::BestPathNotFound), - |mui| p_rec.record_map - .get_record_for_active_mui(mui) - .ok_or(PrefixStoreError::StoreNotReadyError) - ) - }), - std::net::IpAddr::V6(addr) => self.v6.store - .non_recursive_retrieve_prefix( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - ) - .0 - .map(|p_rec| unsafe { p_rec - .get_path_selections(guard).best() - .map_or_else( - || Err(PrefixStoreError::BestPathNotFound), - |mui| p_rec.record_map - .get_record_for_active_mui(mui) - .ok_or(PrefixStoreError::StoreNotReadyError) - ) - }) - } - } - - /// Calculate and store the best path for the specified Prefix. - /// - /// If the result of the calculation is successful it will be - /// stored for the prefix. If they were set, it will return the - /// multi_uniq_id of the best path and the one for the backup - /// path, respectively. If the prefix does not exist in the store, - /// `None` will be returned. If the best path cannot be - /// calculated, a `Ok(None, None)` will be returned. - /// - /// Failing to calculate a best path, may be caused by - /// unavailability of any active paths, or by a lack of data (in - /// either the paths, or the supplied `TiebreakerInfo`). - /// - /// An Error result indicates an inconsistency in the store. - pub fn calculate_and_store_best_and_backup_path( - &self, - search_pfx: &Prefix, - tbi: &::TBI, - guard: &Guard - ) -> Result<(Option, Option), PrefixStoreError> { - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => self.v4.store - .non_recursive_retrieve_prefix( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - // guard - ).0.map_or( - Err(PrefixStoreError::StoreNotReadyError), - |p_rec| p_rec.calculate_and_store_best_backup( - tbi, guard), - ), - std::net::IpAddr::V6(addr) => self.v6.store - .non_recursive_retrieve_prefix( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - // guard - ).0.map_or( - Err(PrefixStoreError::StoreNotReadyError), - |p_rec| p_rec.calculate_and_store_best_backup( - tbi, guard), - ), - } - } - - pub fn is_ps_outdated( - &self, - search_pfx: &Prefix, - guard: &Guard - ) -> Result { - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => self.v4.store - .non_recursive_retrieve_prefix( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - // guard - ).0.map_or( - Err(PrefixStoreError::StoreNotReadyError), - |p| Ok(p.is_ps_outdated(guard)) - ), - std::net::IpAddr::V6(addr) => self.v6.store - .non_recursive_retrieve_prefix( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - // guard - ).0.map_or( - Err(PrefixStoreError::StoreNotReadyError), - |p| Ok(p.is_ps_outdated(guard)) - ) - } - } - - /// Return a [QueryResult] that contains all the more-specific - /// prefixes of the `search_pfx` in the store, including the - /// meta-data of these prefixes. - /// - /// The `search_pfx` argument can be either a IPv4 or an IPv6 - /// prefix. The `search_pfx` itself doesn't have to be present - /// in the store for an iterator to be non-empty, i.e. if - /// more-specific prefixes exist for a non-existent - /// `search_pfx` the iterator will yield these more-specific - /// prefixes. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// QuerySet to contain references to the meta-data objects, - /// instead of cloning them into it. - pub fn more_specifics_from(&'a self, - search_pfx: &Prefix, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> QueryResult { - - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => self.v4.more_specifics_from( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ), - std::net::IpAddr::V6(addr) => self.v6.more_specifics_from( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ), - } - } - - /// Return a `QuerySet` that contains all the less-specific - /// prefixes of the `search_pfx` in the store, including the - /// meta-data of these prefixes. - /// - /// The `search_pfx` argument can be either a IPv4 or an IPv6 - /// prefix. The `search_pfx` itself doesn't have to be present - /// in the store for an iterator to be non-empty, i.e. if - /// less-specific prefixes exist for a non-existent - /// `search_pfx` the iterator will yield these less-specific - /// prefixes. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// QuerySet to contain references to the meta-data objects, - /// instead of cloning them into it. - pub fn less_specifics_from(&'a self, - search_pfx: &Prefix, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> QueryResult { - - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => self.v4.less_specifics_from( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ), - std::net::IpAddr::V6(addr) => self.v6.less_specifics_from( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ), - } - } - - /// Returns an iterator over all the less-specific prefixes - /// of the `search_prefix`, if present in the store, including - /// the meta-data of these prefixes. - /// - /// The `search_pfx` argument can be either a IPv4 or an IPv6 - /// prefix. The `search_pfx` itself doesn't have to be present - /// in the store for an iterator to be non-empty, i.e. if - /// less-specific prefixes exist for a non-existent - /// `search_pfx` the iterator will yield these less-specific - /// prefixes. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// iterator to create and return references to the meta-data - /// objects to the caller (instead of cloning them). - /// - /// # Example - /// ``` - /// use std::net::Ipv4Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::meta_examples::PrefixAs; - /// use rotonda_store::prelude::multi::*; - /// - /// - /// let store = MultiThreadedStore::::new().unwrap(); - /// let guard = epoch::pin(); - /// - /// let pfx_addr = "185.49.140.0".parse::() - /// .unwrap() - /// .into(); - /// - /// store.insert( - /// &Prefix::new(pfx_addr, 22).unwrap(), - /// Record::new(0, 0, RouteStatus::Active, PrefixAs(211321)), - /// None - /// ); - /// - /// for prefix_record in store.less_specifics_iter_from( - /// &Prefix::new(pfx_addr, 24).unwrap(), - /// None, - /// false, - /// &guard - /// ) { - /// assert_eq!(prefix_record.meta[0].meta.0, 211321); - /// } - /// ``` - pub fn less_specifics_iter_from(&'a self, - search_pfx: &Prefix, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> impl Iterator> + 'a { - let (left, right) = match search_pfx.addr() { - std::net::IpAddr::V4(addr) => { - ( - Some(self.v4.store.less_specific_prefix_iter( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ) - .map(|p| PrefixRecord::from(p)) - ), - None - ) - } - std::net::IpAddr::V6(addr) => { - ( - None, - Some(self.v6.store.less_specific_prefix_iter( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ) - .map(|p| PrefixRecord::from(p)) - ) - ) - } - }; - left.into_iter().flatten().chain(right.into_iter().flatten()) - } - - /// Returns an iterator over all the more-specifics prefixes - /// of the `search_prefix`, if present in the store, including - /// the meta-data of these prefixes. - /// - /// The `search_pfx` argument can be either a IPv4 or an IPv6 - /// prefix. The `search_pfx` itself doesn't have to be present - /// in the store for an iterator to be non-empty, i.e. if - /// more-specific prefixes exist for a non-existent - /// `search_pfx` the iterator will yield these more-specific - /// prefixes. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// iterator to create and return references to the meta-data - /// objects to the caller (instead of cloning them). - /// - /// # Example - /// ``` - /// use std::net::Ipv4Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::prelude::multi::*; - /// use rotonda_store::meta_examples::PrefixAs; - /// - /// let store = MultiThreadedStore::::new().unwrap(); - /// let guard = epoch::pin(); - /// - /// let pfx_addr = "185.49.140.0".parse::() - /// .unwrap() - /// .into(); - /// - /// store.insert( - /// &Prefix::new(pfx_addr, 24).unwrap(), - /// Record::new(0, 0, RouteStatus::Active, PrefixAs(211321)), - /// None - /// ); - /// - /// for prefix_record in store.more_specifics_iter_from( - /// &Prefix::new(pfx_addr, 22).unwrap(), - /// None, - /// false, - /// &guard - /// ) { - /// assert_eq!(prefix_record.meta[0].meta.0, 211321); - /// } - /// ``` - pub fn more_specifics_iter_from(&'a self, - search_pfx: &Prefix, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> impl Iterator> + 'a { - - let (left, right) = match search_pfx.addr() { - std::net::IpAddr::V4(addr) => { - let bmin = unsafe { - self.v4.store.withdrawn_muis_bmin.load( - Ordering::Acquire, guard - ).deref() - }; - if mui.is_some() && bmin.contains(mui.unwrap()) { - (None, None) - } else { - ( - Some(self.v4.store.more_specific_prefix_iter_from( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ).map(|p| PrefixRecord::from(p)) - ), - None - ) - } - } - std::net::IpAddr::V6(addr) => { - let bmin = unsafe { - self.v6.store.withdrawn_muis_bmin.load( - Ordering::Acquire, guard - ).deref() - }; - if mui.is_some() && bmin.contains(mui.unwrap()) { - (None, None) - } else { - ( - None, - Some(self.v6.store.more_specific_prefix_iter_from( - PrefixId::::new( - addr.into(), - search_pfx.len(), - ), - mui, - include_withdrawn, - guard - ).map(|p| PrefixRecord::from(p)) - ) - ) - } - } - }; - left.into_iter().flatten().chain(right.into_iter().flatten()) - } - - pub fn iter_records_for_mui_v4( - &'a self, - mui: u32, - include_withdrawn: bool, - guard: &'a Guard - ) -> impl Iterator> +'a { - - let bmin = unsafe { - self.v4.store.withdrawn_muis_bmin.load( - Ordering::Acquire, guard - ).deref() - }; - - if bmin.contains(mui) && !include_withdrawn { - None - } else { - Some( - self.v4.store.more_specific_prefix_iter_from( - PrefixId::::new( - 0, - 0, - ), - Some(mui), - include_withdrawn, - guard - ).map(|p| PrefixRecord::from(p)) - ) - }.into_iter().flatten() - } - - pub fn iter_records_for_mui_v6( - &'a self, - mui: u32, - include_withdrawn: bool, - guard: &'a Guard - ) -> impl Iterator> +'a { - - let bmin = unsafe { - self.v4.store.withdrawn_muis_bmin.load( - Ordering::Acquire, guard - ).deref() - }; - - if bmin.contains(mui) && !include_withdrawn { - None - } else { - Some( - self.v6.store.more_specific_prefix_iter_from( - PrefixId::::new( - 0, - 0, - ), - Some(mui), - include_withdrawn, - guard - ).map(|p| PrefixRecord::from(p)) - ) - }.into_iter().flatten() - } - - /// Insert or replace a Record into the Store - /// - /// The specified Record will replace an existing record in the - /// store if the multi-map for the specified prefix already has an - /// entry for the `multi_uniq_id`, otherwise it will be added to - /// the multi-map. - /// - /// If the `update_path_sections` argument is used the best path - /// selection will be run on the resulting multi-map after insert - /// and stored for the specified prefix. - /// - /// Returns some metrics about the resulting insert. - pub fn insert( - &self, - prefix: &Prefix, - record: Record, - update_path_selections: Option - ) -> Result { - match prefix.addr() { - std::net::IpAddr::V4(addr) => { - self.v4.insert( - PrefixId::::from(*prefix), - record, - update_path_selections, - ) - } - std::net::IpAddr::V6(addr) => { - self.v6.insert( - PrefixId::::from(*prefix), - record, - update_path_selections, - ) - } - } - } - - /// Returns an unordered iterator over all prefixes, with any - /// status (including Withdrawn), for both IPv4 and IPv6, - /// currently in the store, including meta-data. - /// - /// Although the iterator is unordered within an address-family, - /// it first iterates over all IPv4 addresses and then over all - /// IPv6 addresses. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// iterator to create and return references to the meta-data - /// objects to the caller (instead of cloning them). - /// - /// # Example - /// ``` - /// use std::net::Ipv4Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::prelude::multi::*; - /// use rotonda_store::meta_examples::PrefixAs; - /// - /// let store = MultiThreadedStore::::new().unwrap(); - /// let guard = epoch::pin(); - /// - /// let pfx_addr = "185.49.140.0".parse::() - /// .unwrap() - /// .into(); - /// let our_asn = Record::new(0, 0, RouteStatus::Active, PrefixAs(211321)); - /// - /// store.insert(&Prefix::new(pfx_addr, 22).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 23).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 24).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 25).unwrap(), our_asn, None); - /// - /// let mut iter = store.prefixes_iter(); - /// - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 22).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 23).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 24).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 25).unwrap()); - /// ``` - pub fn prefixes_iter( - &'a self, - ) -> impl Iterator> + 'a { - self.v4.store.prefixes_iter() - .map(|p| PrefixRecord::from(p)) - .chain( - self.v6.store.prefixes_iter() - .map(|p| PrefixRecord::from(p)) - ) - } - - /// Returns an unordered iterator over all IPv4 prefixes in the - /// currently in the store, with any status (including Withdrawn), - /// including meta-data. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// iterator to create and return references to the meta-data - /// objects to the caller (instead of cloning them). - /// - /// # Example - /// ``` - /// use std::net::Ipv4Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::prelude::multi::*; - /// use rotonda_store::meta_examples::PrefixAs; - /// - /// let store = MultiThreadedStore::::new().unwrap(); - /// let guard = epoch::pin(); - /// - /// let pfx_addr = "185.49.140.0".parse::() - /// .unwrap() - /// .into(); - /// let our_asn = Record::new(0, 0, RouteStatus::Active, PrefixAs(211321)); - /// - /// store.insert(&Prefix::new(pfx_addr, 22).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 23).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 24).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 25).unwrap(), our_asn, None); - /// - /// let mut iter = store.prefixes_iter(); - /// - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 22).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 23).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 24).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 25).unwrap()); - /// ``` - pub fn prefixes_iter_v4( - &'a self, - ) -> impl Iterator> + 'a { - self.v4.store.prefixes_iter() - .map(|p| PrefixRecord::from(p)) - } - - /// Returns an unordered iterator over all IPv6 prefixes in the - /// currently in the store, with any status (including Withdrawn), - /// including meta-data. - /// - /// The `guard` should be a `&epoch::pin()`. It allows the - /// iterator to create and return references to the meta-data - /// objects to the caller (instead of cloning them). - /// - /// # Example - /// ``` - /// use std::net::Ipv6Addr; - /// - /// use rotonda_store::prelude::*; - /// use rotonda_store::prelude::multi::*; - /// use rotonda_store::meta_examples::PrefixAs; - /// - /// let store = MultiThreadedStore::::new().unwrap(); - /// let guard = epoch::pin(); - /// - /// let pfx_addr = "2a04:b900::".parse::() - /// .unwrap() - /// .into(); - /// let our_asn = Record::new(0, 0, RouteStatus::Active, PrefixAs(211321)); - /// - /// store.insert(&Prefix::new(pfx_addr, 29).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 48).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 56).unwrap(), our_asn.clone(), None); - /// store.insert(&Prefix::new(pfx_addr, 64).unwrap(), our_asn, None); - /// - /// let mut iter = store.prefixes_iter(); - /// - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 29).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 48).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 56).unwrap()); - /// assert_eq!(iter.next().unwrap().prefix, - /// Prefix::new(pfx_addr, 64).unwrap()); - /// ``` - pub fn prefixes_iter_v6( - &'a self, - ) -> impl Iterator> + 'a { - self.v6.store.prefixes_iter() - .map(|p| PrefixRecord::from(p)) - } - - /// Change the local status of the record for the combination of - /// (prefix, multi_uniq_id) to Withdrawn. Note that by default the - /// global `Withdrawn` status for a mui overrides the local status - /// of a record. - pub fn mark_mui_as_withdrawn_for_prefix( - &self, - prefix: &Prefix, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - match prefix.addr() { - std::net::IpAddr::V4(addr) => { - self.v4.store.mark_mui_as_withdrawn_for_prefix( - PrefixId::::from(*prefix), - mui, - // &guard - ) - } - std::net::IpAddr::V6(addr) => { - self.v6.store.mark_mui_as_withdrawn_for_prefix( - PrefixId::::from(*prefix), - mui, - // &guard - ) - } - } - } - - /// Change the local status of the record for the combination of - /// (prefix, multi_uniq_id) to Active. Note that by default the - /// global `Withdrawn` status for a mui overrides the local status - /// of a record. - pub fn mark_mui_as_active_for_prefix( - &self, - prefix: &Prefix, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - match prefix.addr() { - std::net::IpAddr::V4(addr) => { - self.v4.store.mark_mui_as_active_for_prefix( - PrefixId::::from(*prefix), - mui, - // &guard - ) - } - std::net::IpAddr::V6(addr) => { - self.v6.store.mark_mui_as_active_for_prefix( - PrefixId::::from(*prefix), - mui, - // &guard - ) - } - } - } - - /// Change the status of all records for IPv4 prefixes for this - /// `multi_uniq_id` globally to Active. Note that the global - /// `Active` status will be overridden by the local status of the - /// record. - pub fn mark_mui_as_active_v4( - &self, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - - self.v4.store.mark_mui_as_active( - mui, - &guard - ) - } - - /// Change the status of all records for IPv4 prefixes for this - /// `multi_uniq_id` globally to Withdrawn. A global `Withdrawn` - /// status for a `multi_uniq_id` overrides the local status of - /// prefixes for this mui. However the local status can still be - /// modified. This modification will take effect if the global - /// status is changed to `Active`. - pub fn mark_mui_as_withdrawn_v4( - &self, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - - self.v4.store.mark_mui_as_withdrawn( - mui, - &guard - ) - } - - /// Change the status of all records for IPv6 prefixes for this - /// `multi_uniq_id` globally to Active. Note that the global - /// `Active` status will be overridden by the local status of the - /// record. - pub fn mark_mui_as_active_v6( - &self, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - - self.v6.store.mark_mui_as_active( - mui, - &guard - ) - } - - /// Change the status of all records for IPv6 prefixes for this - /// `multi_uniq_id` globally to Withdrawn. A global `Withdrawn` - /// status for a `multi_uniq_id` overrides the local status of - /// prefixes for this mui. However the local status can still be - /// modified. This modification will take effect if the global - /// status is changed to `Active`. - pub fn mark_mui_as_withdrawn_v6( - &self, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - - self.v6.store.mark_mui_as_withdrawn( - mui, - &guard - ) - } - - - /// Change the status of all records for this `multi_uniq_id` to - /// Withdrawn. - /// - /// This method tries to mark all records: first the IPv4 records, - /// then the IPv6 records. If marking of the IPv4 records fails, - /// the method continues and tries to mark the IPv6 records. If - /// either or both fail, an error is returned. - pub fn mark_mui_as_withdrawn( - &self, - mui: u32 - ) -> Result<(), PrefixStoreError> { - let guard = &epoch::pin(); - - let res_v4 = self.v4.store.mark_mui_as_withdrawn( - mui, - &guard - ); - let res_v6 = self.v6.store.mark_mui_as_withdrawn( - mui, - &guard - ); - - res_v4.and(res_v6) - } - - - - // Whether the global status for IPv4 prefixes and the specified - // `multi_uniq_id` is set to `Withdrawn`. - pub fn mui_is_withdrawn_v4( - &self, - mui: u32 - ) -> bool { - let guard = &epoch::pin(); - - self.v4.store.mui_is_withdrawn(mui, guard) - } - - // Whether the global status for IPv6 prefixes and the specified - // `multi_uniq_id` is set to `Active`. - pub fn mui_is_withdrawn_v6( - &self, - mui: u32 - ) -> bool { - let guard = &epoch::pin(); - - self.v6.store.mui_is_withdrawn(mui, guard) - } - - /// Returns the number of all prefixes in the store. - /// - /// Note that this method will actually traverse the complete - /// tree. - pub fn prefixes_count(&self) -> usize { - self.v4.store.get_prefixes_count() - + self.v6.store.get_prefixes_count() - } - - /// Returns the number of all IPv4 prefixes in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn prefixes_v4_count(&self) -> usize { - self.v4.store.get_prefixes_count() - } - - /// Returns the number of all IPv4 prefixes with the - /// supplied prefix length in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn prefixes_v4_count_for_len(&self, len: u8) -> usize { - self.v4.store.get_prefixes_count_for_len(len) - } - - /// Returns the number of all IPv6 prefixes in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn prefixes_v6_count(&self) -> usize { - self.v6.store.get_prefixes_count() - } - - /// Returns the number of all IPv6 prefixes with the - /// supplied prefix length in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn prefixes_v6_count_for_len(&self, len: u8) -> usize { - self.v6.store.get_prefixes_count_for_len(len) - } - - /// Returns the number of nodes in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn nodes_count(&self) -> usize { - self.v4.store.get_nodes_count() - + self.v6.store.get_nodes_count() - } - - /// Returns the number of IPv4 nodes in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn nodes_v4_count(&self) -> usize { - self.v4.store.get_nodes_count() - } - - /// Returns the number of IPv6 nodes in the store. - /// - /// Note that this counter may be lower than the actual - /// number in the store, due to contention at the time of - /// reading the value. - pub fn nodes_v6_count(&self) -> usize { - self.v6.store.get_nodes_count() - } - - /// Print the store statistics to the standard output. - #[cfg(feature = "cli")] - pub fn print_funky_stats(&self) { - println!(""); - println!("Stats for IPv4 multi-threaded store\n"); - println!("{}", self.v4); - println!("Stats for IPv6 multi-threaded store\n"); - println!("{}", self.v6); - } - - // The Store statistics. - pub fn stats(&self) -> StoreStats { - StoreStats { - v4: self.v4.store.counters.get_prefix_stats(), - v6: self.v6.store.counters.get_prefix_stats(), - } - } - } - }; - - let result = quote! { - #create_strides - #store - }; - - TokenStream::from(result) -} diff --git a/proc_macros/src/maps.rs b/proc_macros/src/maps.rs deleted file mode 100644 index 7696a4f0..00000000 --- a/proc_macros/src/maps.rs +++ /dev/null @@ -1,547 +0,0 @@ -// Mappings between the Prefix lengths and storage level -// -// Each field in the CustomAllocStorage stores prefixes of the same length -// and consists of a start bucket, which in turn contains StoredPrefixes -// Each StoredPrefix contains a reference to another bucket, the next storage -// level. Each length field in the CustomAllocStorage can have its own -// arrangement for the sequence of bucket sizes. This file describes these -// arrangements in the form of a mapping between prefix-length and bucket -// sizes for each storage level. -// -// The mapping is a two-dimensional array, where the first dimension is the -// prefix-length and the second dimension is the index of the end bit that is -// stored in the storage level. Hence, an prefix-length array in this mapping -// should only have increasing numbers as elements, and the values of the -// elements summed MUST be the same as the prefix-length. A last requirement -// is that THE ARAY SHOULD END IN AT LEAST ONE ZERO. This is because a zero -// is used by the callers as a sentinel value to indicate that the last bucket -// has been reached. -// -// So an array for a length might look like this: -// [12, 24, 0, 0, 0, 0, 0, 0, 0, 0]. -// This is an array for a prefix-length of 24 and stores all prefixes in two -// levels maximum. - -use quote::quote; - -pub fn node_buckets_map_v4() -> quote::__private::TokenStream { - quote! { - - fn len_to_store_bits(len: u8, lvl: u8) -> u8 { - let res = 4 * (lvl + 1); - if res < len { - res - } else { - if res >= len + 4 { - 0 - } else { - len - } - } - // match len { - // l if l <= 12 => { - // if lvl > 0 { - // 0 - // } else { - // len - // } - // } - // l if l < 16 => { - // if lvl > 1 { - // 0 - // } else { - // 12 * if lvl == 0 { 1 } else { lvl } + lvl * (len - 12) - // } - // } - // _ => { - // let res = 4 * (lvl + 1); - // if res < len { - // res - // } else { - // if res >= len + 4 { - // 0 - // } else { - // len - // } - // } - // } - // } - } - - // fn len_to_store_bits_old(len: u8, level: u8) -> u8 { - // // (vert x hor) = len x level -> number of bits - // [ - // [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 0 - // [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 1 - never exists - // [2, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 2 - never exists - // [3, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 3 - // [4, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 4 - // [5, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 5 - // [6, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 6 - // [7, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 7 - // [8, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 8 - // [9, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 9 - // [10, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 10 - // [11, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 11 - // [12, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 12 - // [12, 13, 0, 0, 0, 0, 0, 0, 0, 0], // 13 - // [12, 14, 0, 0, 0, 0, 0, 0, 0, 0], // 14 - // [12, 15, 0, 0, 0, 0, 0, 0, 0, 0], // 15 - // [12, 16, 0, 0, 0, 0, 0, 0, 0, 0], // 16 - // [12, 17, 0, 0, 0, 0, 0, 0, 0, 0], // 17 - // [12, 18, 0, 0, 0, 0, 0, 0, 0, 0], // 18 - // [12, 19, 0, 0, 0, 0, 0, 0, 0, 0], // 19 - // [12, 20, 0, 0, 0, 0, 0, 0, 0, 0], // 20 - // [12, 21, 0, 0, 0, 0, 0, 0, 0, 0], // 21 - // [12, 22, 0, 0, 0, 0, 0, 0, 0, 0], // 22 - // [12, 23, 0, 0, 0, 0, 0, 0, 0, 0], // 23 - // [12, 24, 0, 0, 0, 0, 0, 0, 0, 0], // 24 - // [12, 24, 25, 0, 0, 0, 0, 0, 0, 0], // 25 - // [4, 8, 12, 16, 20, 24, 26, 0, 0, 0], // 26 - // [4, 8, 12, 16, 20, 24, 27, 0, 0, 0], // 27 - // [4, 8, 12, 16, 20, 24, 28, 0, 0, 0], // 28 - // [4, 8, 12, 16, 20, 24, 28, 29, 0, 0], // 29 - // [4, 8, 12, 16, 20, 24, 28, 30, 0, 0], // 30 - // [4, 8, 12, 16, 20, 24, 28, 31, 0, 0], // 31 - // [4, 8 , 12, 16, 20, 24, 28, 32, 0, 0], // 32 - // ][len as usize][level as usize] - // } - - } -} - -pub fn prefix_buckets_map_v4() -> quote::__private::TokenStream { - quote! { - - fn get_bits_for_len(len: u8, lvl: u8) -> u8 { - let res = 4 * (lvl + 1); - if res < len { - res - } else { - if res >= len + 4 { - 0 - } else { - len - } - } - // match len { - // l if l <= 12 => { - // if lvl > 0 { - // 0 - // } else { - // len - // } - // } - // l if l < 16 => { - // if lvl > 1 { - // 0 - // } else { - // 12 * if lvl == 0 { 1 } else { lvl } + lvl * (len - 12) - // } - // } - // _ => { - // let res = 4 * (lvl + 1); - // if res < len { - // res - // } else { - // if res >= len + 4 { - // 0 - // } else { - // len - // } - // } - // } - // } - } - - // fn get_bits_for_len_old(len: u8, level: u8) -> u8 { - // // (vert x hor) = len x level -> number of bits - // [ - // [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 0 - // [1, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 1 - never exists - // [2, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 2 - never exists - // [3, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 3 - // [4, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 4 - // [5, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 5 - // [6, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 6 - // [7, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 7 - // [8, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 8 - // [9, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 9 - // [10, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 10 - // [11, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 11 - // [12, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 12 - // [12, 13, 0, 0, 0, 0, 0, 0, 0, 0], // 13 - // [12, 14, 0, 0, 0, 0, 0, 0, 0, 0], // 14 - // [12, 15, 0, 0, 0, 0, 0, 0, 0, 0], // 15 - // [4, 8, 12, 16, 0, 0, 0, 0, 0, 0], // 16 - // [4, 8, 12, 16, 17, 0, 0, 0, 0, 0], // 17 - // [4, 8, 12, 16, 18, 0, 0, 0, 0, 0], // 18 - // [4, 8, 12, 16, 19, 0, 0, 0, 0, 0], // 19 - // [4, 8, 12, 16, 20, 0, 0, 0, 0, 0], // 20 - // [4, 8, 12, 16, 20, 21, 0, 0, 0, 0], // 21 - // [4, 8, 12, 16, 20, 22, 0, 0, 0, 0], // 22 - // [4, 8, 12, 16, 20, 23, 0, 0, 0, 0], // 23 - // [4, 8, 12, 16, 20, 24, 0, 0, 0, 0], // 24 - // [4, 8, 12, 16, 20, 24, 25, 0, 0, 0], // 25 - // [4, 8, 12, 16, 20, 24, 26, 0, 0, 0], // 26 - // [4, 8, 12, 16, 20, 24, 27, 0, 0, 0], // 27 - // [4, 8, 12, 16, 20, 24, 28, 0, 0, 0], // 28 - // [4, 8, 12, 16, 20, 24, 28, 29, 0, 0], // 29 - // [4, 8, 12, 16, 20, 24, 28, 30, 0, 0], // 30 - // [4, 8, 12, 16, 20, 24, 28, 31, 0, 0], // 31 - // [4, 8, 12, 16, 20, 24, 28, 32, 0, 0], // 32 - // ][len as usize][level as usize] - // } - - } -} - -pub fn node_buckets_map_v6() -> quote::__private::TokenStream { - quote! { - - fn len_to_store_bits(len: u8, lvl: u8) -> u8 { - let res = 4 * (lvl + 1); - if res < len { - res - } else { - if res >= len + 4 { 0 } else { len } - } - - // match len { - // l if l <= 12 => if lvl > 0 { 0 } else { len }, - // l if l <= 24 => if lvl > 1 { 0 } else { - // 12 * if lvl == 0 { 1 } else { lvl } + lvl * (len - 12) - // }, - // l if l <= 36 => { if lvl > 2 { 0 } else { - // 12 * if lvl <= 1 { lvl + 1 } else { 2 } - // + if lvl == 0 { 0 } else { lvl - 1 } * (len - 24) - // } - // } - // l if l <= 48 => { if lvl > 3 { 0 } else { - // 12 * if lvl <= 2 { lvl + 1 } else { 3 } - // + if lvl < 2 { 0 } else { lvl - 2 } * (len - 36) - // } - // } - // _ => { - // let res = 8 * (lvl + 1); - // if res < len { - // res - // } else { - // if res >= len + 8 { 0 } else { len } - // } - // } - // } - } - - // fn len_to_store_bits_old(len: u8, level: u8) -> u8 { - // // (vert x hor) = len x level -> number of bits - // [ - // [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 0 - // [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 1 - never exists - // [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 2 - // [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 3 - // [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 4 - // [5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 5 - // [6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 6 - // [7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 7 - // [8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 8 - // [9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 9 - // [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 10 - // [11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 11 - // [12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 12 - // [12, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 13 - // [12, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 14 - // [12, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 15 - // [12, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 16 - // [12, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 17 - // [12, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 18 - // [12, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 19 - // [12, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 20 - // [12, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 21 - // [12, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 22 - // [12, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 23 - // [12, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 24 - // [12, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 25 - // [12, 24, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 26 - // [12, 24, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 27 - // [12, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 28 - // [12, 24, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 29 - // [12, 24, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 30 - // [12, 24, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 31 - // [12, 24, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 32 - // [12, 24, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 33 - // [12, 24, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 34 - // [12, 24, 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 35 - // [12, 24, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 36 - // [12, 24, 36, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 37 - // [12, 24, 36, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 38 - // [12, 24, 36, 39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 39 - // [12, 24, 36, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 40 - // [12, 24, 36, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 41 - // [12, 24, 36, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 42 - // [12, 24, 36, 43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 43 - // [12, 24, 36, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 44 - // [12, 24, 36, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 45 - // [12, 24, 36, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 46 - // [12, 24, 36, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 47 - // [12, 24, 36, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 48 - // [4, 8, 12, 24, 28, 48, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 49 - // [4, 8, 12, 24, 28, 48, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 50 - // [4, 8, 12, 24, 28, 48, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 51 - // [4, 8, 12, 24, 28, 48, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 52 - // [4, 8, 12, 24, 28, 48, 52, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 53 - // [4, 8, 12, 24, 28, 48, 52, 54, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 54 - // [4, 8, 12, 24, 28, 48, 52, 55, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 55 - // [4, 8, 12, 24, 28, 48, 52, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 56 - // [4, 8, 12, 24, 28, 48, 52, 56, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 57 - // [4, 8, 12, 24, 28, 48, 52, 56, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 58 - // [4, 8, 12, 24, 28, 48, 52, 56, 59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 59 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 60 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 61 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 62 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 63 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 64 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 65 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 66 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 67 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 68 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 69 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 70 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 71 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 72 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 73 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 74 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 75 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 76 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 77 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 78 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 79 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 80 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 81 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 82 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 83 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 84 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 85 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 86, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 86 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 87 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 88 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 89 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 90 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 91, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 91 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 92 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 93, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 93 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 94 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 95 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 96 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 97, 0, 0, 0, 0, 0, 0, 0, 0], // 97 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 98, 0, 0, 0, 0, 0, 0, 0, 0], // 98 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 99, 0, 0, 0, 0, 0, 0, 0, 0], // 99 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 0, 0, 0, 0, 0, 0, 0, 0], // 100 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 101, 0, 0, 0, 0, 0, 0, 0], // 101 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 102, 0, 0, 0, 0, 0, 0, 0], // 102 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 103, 0, 0, 0, 0, 0, 0, 0], // 103 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 0, 0, 0, 0, 0, 0, 0], // 104 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 105, 0, 0, 0, 0, 0, 0], // 105 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 106, 0, 0, 0, 0, 0, 0], // 106 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 107, 0, 0, 0, 0, 0, 0], // 107 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 0, 0, 0, 0, 0, 0], // 108 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 109, 0, 0, 0, 0, 0], // 109 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 110, 0, 0, 0, 0, 0], // 110 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 111, 0, 0, 0, 0, 0], // 111 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 0, 0, 0, 0, 0], // 112 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 113, 0, 0, 0, 0], // 113 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 114, 0, 0, 0, 0], // 114 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 115, 0, 0, 0, 0], // 115 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 0, 0, 0, 0], // 116 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 117, 0, 0, 0], // 117 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 118, 0, 0, 0], // 118 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 119, 0, 0, 0], // 119 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 0, 0, 0], // 120 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 121, 0, 0], // 121 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 122, 0, 0], // 122 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 123, 0, 0], // 123 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 0, 0], // 124 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 125, 0], // 125 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 126, 0], // 126 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 127, 0], // 127 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 0], // 128 - // ][len as usize][level as usize] - // } - - } -} - -pub fn prefix_buckets_map_v6() -> quote::__private::TokenStream { - quote! { - - fn get_bits_for_len(len: u8, lvl: u8) -> u8 { - let res = 4 * (lvl + 1); - if res <= len { - res - } else { - if res >= len + 4 { 0 } else { len } - } - - // match len { - // l if l <= 12 => if lvl > 0 { 0 } else { len }, - // l if l <= 24 => if lvl > 1 { 0 } else { - // 12 * if lvl == 0 { 1 } else { lvl } + lvl * (len - 12) - // }, - // l if l <= 36 => { if lvl > 2 { 0 } else { - // 12 * if lvl <= 1 { lvl + 1 } else { 2 } - // + if lvl == 0 { 0 } else { lvl - 1 } * (len - 24) - // } - // } - // l if l <= 48 => { if lvl > 3 { 0 } else { - // 12 * if lvl <= 2 { lvl + 1 } else { 3 } - // + if lvl < 2 { 0 } else { lvl - 2 } * (len - 36) - // } - // } - // _ => { - // let res = 8 * (lvl + 1); - // if res < len { - // res - // } else { - // if res >= len + 8 { 0 } else { len } - // } - // } - // } - } - - // fn get_bits_for_len_old(len: u8, level: u8) -> u8 { - // // (vert x hor) = len x level -> number of bits - // [ - // [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 0 - // [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 1 - never exists - // [2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 2 - // [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 3 - // [4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 4 - // [5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 5 - // [6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 6 - // [7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 7 - // [8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 8 - // [9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 9 - // [10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 10 - // [11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 11 - // [12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // len 12 - // [12, 13, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 13 - // [12, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 14 - // [12, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 15 - // [12, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 16 - // [12, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 17 - // [12, 18, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 18 - // [12, 19, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 19 - // [12, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 20 - // [12, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 21 - // [12, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 22 - // [12, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 23 - // [12, 24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 24 - // [12, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 25 - // [12, 24, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 26 - // [12, 24, 27, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 27 - // [12, 24, 28, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 28 - // [12, 24, 29, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 29 - // [12, 24, 30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 30 - // [12, 24, 31, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 31 - // [12, 24, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 32 - // [12, 24, 33, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 33 - // [12, 24, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 34 - // [12, 24, 35, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 35 - // [12, 24, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 36 - // [12, 24, 36, 37, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 37 - // [12, 24, 36, 38, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 38 - // [12, 24, 36, 39, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 39 - // [12, 24, 36, 40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 40 - // [12, 24, 36, 41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 41 - // [12, 24, 36, 42, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 42 - // [12, 24, 36, 43, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 43 - // [12, 24, 36, 44, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 44 - // [12, 24, 36, 45, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 45 - // [12, 24, 36, 46, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 46 - // [12, 24, 36, 47, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 47 - // [12, 24, 36, 48, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 48 - // [4, 8, 12, 24, 28, 48, 49, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 49 - // [4, 8, 12, 24, 28, 48, 50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 50 - // [4, 8, 12, 24, 28, 48, 51, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 51 - // [4, 8, 12, 24, 28, 48, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 52 - // [4, 8, 12, 24, 28, 48, 52, 53, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 53 - // [4, 8, 12, 24, 28, 48, 52, 54, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 54 - // [4, 8, 12, 24, 28, 48, 52, 55, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 55 - // [4, 8, 12, 24, 28, 48, 52, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 56 - // [4, 8, 12, 24, 28, 48, 52, 56, 57, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 57 - // [4, 8, 12, 24, 28, 48, 52, 56, 58, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 58 - // [4, 8, 12, 24, 28, 48, 52, 56, 59, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 59 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 60 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 61, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 61 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 62, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 62 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 63, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 63 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 64 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 65, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 65 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 66 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 67, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 67 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 68 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 69, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 69 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 70 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 71, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 71 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 72, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 72 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 73, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 73 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 74 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 75, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 75 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 76 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 77, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 77 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 78 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 79 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 80 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 81, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 81 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 82 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 83 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 84 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 85 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 86, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 86 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 87 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 88 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 89 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 90 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 91, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 91 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 92 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 93, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 93 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 94, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 94 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 95, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 95 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0], // 96 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 97, 0, 0, 0, 0, 0, 0, 0, 0], // 97 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 98, 0, 0, 0, 0, 0, 0, 0, 0], // 98 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 99, 0, 0, 0, 0, 0, 0, 0, 0], // 99 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 0, 0, 0, 0, 0, 0, 0, 0], // 100 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 101, 0, 0, 0, 0, 0, 0, 0], // 101 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 102, 0, 0, 0, 0, 0, 0, 0], // 102 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 103, 0, 0, 0, 0, 0, 0, 0], // 103 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 0, 0, 0, 0, 0, 0, 0], // 104 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 105, 0, 0, 0, 0, 0, 0], // 105 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 106, 0, 0, 0, 0, 0, 0], // 106 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 107, 0, 0, 0, 0, 0, 0], // 107 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 0, 0, 0, 0, 0, 0], // 108 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 109, 0, 0, 0, 0, 0], // 109 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 110, 0, 0, 0, 0, 0], // 110 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 111, 0, 0, 0, 0, 0], // 111 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 0, 0, 0, 0, 0], // 112 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 113, 0, 0, 0, 0], // 113 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 114, 0, 0, 0, 0], // 114 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 115, 0, 0, 0, 0], // 115 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 0, 0, 0, 0], // 116 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 117, 0, 0, 0], // 117 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 118, 0, 0, 0], // 118 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 119, 0, 0, 0], // 119 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 0, 0, 0], // 120 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 121, 0, 0], // 121 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 122, 0, 0], // 122 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 123, 0, 0], // 123 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 0, 0], // 124 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 125, 0], // 125 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 126, 0], // 126 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 127, 0], // 127 - // [4, 8, 12, 24, 28, 48, 52, 56, 60, 64, 68, 74, 78, 82, 84, 88, 92, 96, 100, 104, 108, 112, 116, 120, 124, 128, 0], // 128 - // ][len as usize][level as usize] - // } - - } -} diff --git a/src/af.rs b/src/af.rs deleted file mode 100644 index 5510825c..00000000 --- a/src/af.rs +++ /dev/null @@ -1,326 +0,0 @@ -//------------ AddressFamily (trait) ---------------------------------------- -/// The address family of an IP address as a Trait. -/// -/// The idea of this trait is that each family will have a separate type to -/// be able to only take the amount of memory needs. Useful when building -/// trees with large amounts of addresses/prefixes. Used by rotonda-store for -/// this purpose. -pub trait AddressFamily: - std::fmt::Binary - + std::fmt::Debug - + std::hash::Hash - + std::fmt::Display - + From - + From - + Eq - + std::ops::BitAnd - + std::ops::BitOr - + std::ops::Shr - + std::ops::Shl - + std::ops::Shl - + std::ops::Sub - + Zero - + Copy - + Ord -{ - /// The byte representation of the family filled with 1s. - const BITMASK: Self; - /// The number of bits in the byte representation of the family. - const BITS: u8; - fn fmt_net(net: Self) -> String; - // returns the specified nibble from `start_bit` to (and including) - // `start_bit + len` and shifted to the right. - fn get_nibble(net: Self, start_bit: u8, len: u8) -> u32; - - /// Treat self as a prefix and append the given nibble to it. - fn add_nibble(self, len: u8, nibble: u32, nibble_len: u8) -> (Self, u8); - - fn truncate_to_len(self, len: u8) -> Self; - - fn from_ipaddr(net: std::net::IpAddr) -> Self; - - fn into_ipaddr(self) -> std::net::IpAddr; - - // temporary function, this will botch IPv6 completely. - fn dangerously_truncate_to_u32(self) -> u32; - - // temporary function, this will botch IPv6 completely. - fn dangerously_truncate_to_usize(self) -> usize; - - // For the sake of searching for 0/0, check the the right shift, since - // since shifting with MAXLEN (32 in Ipv4, or 128 in IPv6) will panic - // in debug mode. A failed check will simply retutrn zero. Used in - // finding node_ids (always zero for 0/0). - fn checked_shr_or_zero(self, rhs: u32) -> Self; -} - -//-------------- Ipv4 Type -------------------------------------------------- - -/// Exactly fitting IPv4 bytes (4 octets). -pub type IPv4 = u32; - -impl AddressFamily for IPv4 { - const BITMASK: u32 = 0x1u32.rotate_right(1); - const BITS: u8 = 32; - - fn fmt_net(net: Self) -> String { - std::net::Ipv4Addr::from(net).to_string() - } - - fn get_nibble(net: Self, start_bit: u8, len: u8) -> u32 { - (net << start_bit) >> ((32 - len) % 32) - } - - // You can't shift with the number of bits of self, so we'll just return - // zero for that case. - // - // Panics if len is greater than 32 (the number of bits of self). - fn truncate_to_len(self, len: u8) -> Self { - match len { - 0 => 0, - 1..=31 => (self >> ((32 - len) as usize)) << (32 - len) as usize, - 32 => self, - _ => panic!("Can't truncate to more than 32 bits"), - } - } - - /// Treat self as a prefix and append the given nibble to it. - /// - /// Shifts the rightmost `nibble_len` bits of `nibble` to the left to a - /// position `len` bits from the left, then ORs the result into self. - /// - /// For example: - /// - /// ``` - /// # use rotonda_store::IPv4; - /// # use rotonda_store::AddressFamily; - /// let prefix = 0b10101010_00000000_00000000_00000000_u32; // 8-bit prefix - /// let nibble = 0b1100110_u32; // 7-bit nibble - /// let (new_prefix, new_len) = prefix.add_nibble(8, nibble, 7); - /// assert_eq!(new_len, 8 + 7); - /// assert_eq!(new_prefix, 0b10101010_11001100_00000000_00000000); - /// // ^^^^^^^^ ^^^^^^^ - /// // prefix nibble - /// ``` - /// - /// # Panics in debug mode! - /// - /// Will panic if there is insufficient space to add the given nibble, - /// i.e. if `len + nibble_len >= 32`. - /// - /// ``` - /// # use rotonda_store::IPv4; - /// # use rotonda_store::AddressFamily; - /// let prefix = 0b10101010_00000000_00000000_00000100_u32; // 30-bit prefix - /// let nibble = 0b1100110_u32; // 7-bit nibble - /// let (new_prefix, new_len) = prefix.add_nibble(30, nibble, 7); - /// ``` - fn add_nibble(self, len: u8, nibble: u32, nibble_len: u8) -> (u32, u8) { - let res = - self | (nibble << (32 - len - nibble_len) as usize); - (res, len + nibble_len) - } - - fn from_ipaddr(addr: std::net::IpAddr) -> u32 { - // Well, this is awkward. - if let std::net::IpAddr::V4(addr) = addr { - (addr.octets()[0] as u32) << 24 - | (addr.octets()[1] as u32) << 16 - | (addr.octets()[2] as u32) << 8 - | (addr.octets()[3] as u32) - } else { - panic!("Can't convert IPv6 to IPv4"); - } - } - - fn into_ipaddr(self) -> std::net::IpAddr { - std::net::IpAddr::V4(std::net::Ipv4Addr::from(self)) - } - - fn dangerously_truncate_to_u32(self) -> u32 { - // not dangerous at all. - self - } - - fn dangerously_truncate_to_usize(self) -> usize { - // not dangerous at all. - self as usize - } - - fn checked_shr_or_zero(self, rhs: u32) -> Self { - self.checked_shr(rhs).unwrap_or(0) - } -} - -//-------------- Ipv6 Type -------------------------------------------------- - -/// Exactly fitting IPv6 bytes (16 octets). -pub type IPv6 = u128; - -impl AddressFamily for IPv6 { - const BITMASK: u128 = 0x1u128.rotate_right(1); - const BITS: u8 = 128; - fn fmt_net(net: Self) -> String { - std::net::Ipv6Addr::from(net).to_string() - } - - fn get_nibble(net: Self, start_bit: u8, len: u8) -> u32 { - ((net << start_bit) >> ((128 - len) % 128)) as u32 - } - - /// Treat self as a prefix and append the given nibble to it. - /// - /// Shifts the rightmost `nibble_len` bits of `nibble` to the left to a - /// position `len` bits from the left, then ORs the result into self. - /// - /// For example: - /// - /// ``` - /// # use rotonda_store::IPv6; - /// # use rotonda_store::AddressFamily; - /// let prefix = 0xF0F0F0F0_F0000000_00000000_00000000u128; // 36-bit prefix - /// let nibble = 0xA8A8_u32; // 16-bit nibble - /// let (new_prefix, new_len) = prefix.add_nibble(36, nibble, 16); - /// assert_eq!(new_len, 36 + 16); - /// assert_eq!(new_prefix, 0xF0F0F0F0F_A8A8000_00000000_00000000u128); - /// // ^^^^^^^^^ ^^^^ - /// // prefix nibble - /// ``` - /// - /// # Panics only in debug mode! - /// - /// In release mode this will be UB (Undefined Behaviour)! - /// - /// Will panic if there is insufficient space to add the given nibble, - /// i.e. if `len + nibble_len >= 128`. - /// - /// ``` - /// # use rotonda_store::IPv6; - /// # use rotonda_store::AddressFamily; - /// let prefix = 0xFFFFFFFF_FFFFFFFF_FFFFFFFF_FFFF0000u128; // 112-bit prefix - /// let nibble = 0xF00FF00F_u32; // 32-bit nibble - /// let (new_prefix, new_len) = prefix.add_nibble(112, nibble, 32); - /// ``` - fn add_nibble(self, len: u8, nibble: u32, nibble_len: u8) -> (Self, u8) { - let res = self - | ((nibble as u128) << (128 - len - nibble_len) as usize); - (res, len + nibble_len) - } - - fn truncate_to_len(self, len: u8) -> Self { - match len { - 0 => 0, - 1..=127 => { - (self >> ((128 - len) as usize)) << (128 - len) as usize - } - 128 => self, - _ => panic!("Can't truncate to more than 128 bits"), - } - } - - // fn truncate_to_len(self, len: u8) -> Self { - // if (128 - len) == 0 { - // 0 - // } else { - // (self >> (128 - len)) << (128 - len) - // } - // } - - fn from_ipaddr(net: std::net::IpAddr) -> u128 { - if let std::net::IpAddr::V6(addr) = net { - addr.octets()[15] as u128 - | (addr.octets()[14] as u128) << 8 - | (addr.octets()[13] as u128) << 16 - | (addr.octets()[12] as u128) << 24 - | (addr.octets()[11] as u128) << 32 - | (addr.octets()[10] as u128) << 40 - | (addr.octets()[9] as u128) << 48 - | (addr.octets()[8] as u128) << 56 - | (addr.octets()[7] as u128) << 64 - | (addr.octets()[6] as u128) << 72 - | (addr.octets()[5] as u128) << 80 - | (addr.octets()[4] as u128) << 88 - | (addr.octets()[3] as u128) << 96 - | (addr.octets()[2] as u128) << 104 - | (addr.octets()[1] as u128) << 112 - | (addr.octets()[0] as u128) << 120 - } else { - panic!("Can't convert IPv4 to IPv6"); - } - } - - fn into_ipaddr(self) -> std::net::IpAddr { - std::net::IpAddr::V6(std::net::Ipv6Addr::from(self)) - } - - fn dangerously_truncate_to_u32(self) -> u32 { - // this will chop off the high bits. - self as u32 - } - - fn dangerously_truncate_to_usize(self) -> usize { - // this will chop off the high bits. - self as usize - } - - fn checked_shr_or_zero(self, rhs: u32) -> Self { - self.checked_shr(rhs).unwrap_or(0) - } -} - -// ----------- Zero Trait --------------------------------------------------- - -pub trait Zero { - fn zero() -> Self; - fn is_zero(&self) -> bool; -} - -impl Zero for u128 { - fn zero() -> Self { - 0 - } - - fn is_zero(&self) -> bool { - *self == 0 - } -} - -impl Zero for u64 { - fn zero() -> Self { - 0 - } - - fn is_zero(&self) -> bool { - *self == 0 - } -} - -impl Zero for u32 { - fn zero() -> Self { - 0 - } - - fn is_zero(&self) -> bool { - *self == 0 - } -} - -impl Zero for u16 { - fn zero() -> Self { - 0 - } - - fn is_zero(&self) -> bool { - *self == 0 - } -} - -impl Zero for u8 { - fn zero() -> Self { - 0 - } - - fn is_zero(&self) -> bool { - *self == 0 - } -} diff --git a/src/bin/cli.rs b/src/bin/cli.rs index df2abb1d..57f7faf2 100644 --- a/src/bin/cli.rs +++ b/src/bin/cli.rs @@ -1,11 +1,14 @@ #![cfg(feature = "cli")] use ansi_term::Colour; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{PrefixRecord, Record, RouteStatus}; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; use rustyline::error::ReadlineError; use rustyline::Editor; use inetnum::addr::Prefix; -use rotonda_store::meta_examples::PrefixAs; -use rotonda_store::prelude::{multi::*, *}; +use rotonda_store::test_types::PrefixAs; use rustyline::history::DefaultHistory; use std::env; @@ -40,7 +43,12 @@ fn load_prefixes( let asn: u32 = record[2].parse().unwrap(); let pfx = PrefixRecord::new( Prefix::new(ip, len)?, - vec![Record::new(0, 0, RouteStatus::Active, PrefixAs(asn))], + vec![Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(asn), + )], ); // let ip: Vec<_> = record[0] @@ -61,7 +69,8 @@ fn load_prefixes( fn main() -> Result<(), Box> { let mut pfxs: Vec> = vec![]; - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = + StarCastRib::::try_default()?; if let Err(err) = load_prefixes(&mut pfxs) { println!("error running example: {}", err); @@ -82,7 +91,7 @@ fn main() -> Result<(), Box> { // tree_bitmap.print_funky_stats(); // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); - let guard = &epoch::pin(); + let guard = &rotonda_store::epoch::pin(); let mut rl = Editor::<(), DefaultHistory>::new()?; if rl.load_history("/tmp/rotonda-store-history.txt").is_err() { @@ -99,52 +108,55 @@ fn main() -> Result<(), Box> { match cmd.to_string().as_ref() { "p" => match line.chars().as_str() { "p4" => { - tree_bitmap.prefixes_iter_v4().for_each( - |pfx| { + tree_bitmap + .prefixes_iter_v4(guard) + .for_each(|pfx| { + let pfx = pfx.unwrap(); println!( "{} {}", pfx.prefix, pfx.meta[0] ); - }, - ); + }); println!( - "ipv4 prefixes :\t{}", + "ipv4 prefixes :\t{:?}", tree_bitmap.prefixes_v4_count() ); } "p6" => { - tree_bitmap.prefixes_iter_v6().for_each( - |pfx| { + tree_bitmap + .prefixes_iter_v6(guard) + .for_each(|pfx| { + let pfx = pfx.unwrap(); println!( "{} {}", pfx.prefix, pfx.meta[0] ); - }, - ); + }); println!( - "ipv6 prefixes :\t{}", + "ipv6 prefixes :\t{:?}", tree_bitmap.prefixes_v6_count() ); } _ => { println!( - "ipv4 prefixes :\t{}", + "ipv4 prefixes :\t{:?}", tree_bitmap.prefixes_v4_count() ); println!( - "ipv6 prefixes :\t{}", + "ipv6 prefixes :\t{:?}", tree_bitmap.prefixes_v6_count() ); - tree_bitmap.prefixes_iter().for_each( - |pfx| { + tree_bitmap + .prefixes_iter(guard) + .for_each(|pfx| { + let pfx = pfx.unwrap(); println!( "{} {}", pfx.prefix, pfx.meta[0] ); - }, - ); + }); println!( - "total prefixes :\t{}", + "total prefixes :\t{:?}", tree_bitmap.prefixes_count() ); } @@ -223,9 +235,10 @@ fn main() -> Result<(), Box> { include_less_specifics: true, include_more_specifics: true, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("start query result"); println!("{}", query_result); println!("end query result"); @@ -254,7 +267,7 @@ fn main() -> Result<(), Box> { None, false, guard, - ) + )? .more_specifics .map_or("None".to_string(), |x| x .to_string()) @@ -268,7 +281,7 @@ fn main() -> Result<(), Box> { None, false, guard, - ) + )? .less_specifics .map_or("None".to_string(), |x| x .to_string()) @@ -285,10 +298,12 @@ fn main() -> Result<(), Box> { include_withdrawn: true, include_less_specifics: true, include_more_specifics: true, - mui: None + mui: None, + include_history: + IncludeHistory::None }, guard - ) + )? ); println!("--- numatch"); println!("more specifics"); @@ -300,7 +315,7 @@ fn main() -> Result<(), Box> { None, false, guard, - ) + )? .more_specifics .map_or("None".to_string(), |x| x .to_string()) @@ -314,7 +329,7 @@ fn main() -> Result<(), Box> { None, false, guard - ) + )? .less_specifics .map_or("None".to_string(), |x| x .to_string()) diff --git a/src/bin/load_mrt.rs b/src/bin/load_mrt.rs new file mode 100644 index 00000000..beb285e9 --- /dev/null +++ b/src/bin/load_mrt.rs @@ -0,0 +1,742 @@ +use std::collections::BTreeSet; +use std::fmt; +use std::fs::File; +use std::path::PathBuf; +use std::time::Instant; + +use clap::Parser; +use inetnum::addr::Prefix; +use memmap2::Mmap; +use rayon::{ + iter::{ParallelBridge, ParallelIterator}, + prelude::*, +}; +use rotonda_store::{ + prefix_record::{Meta, Record, RouteStatus}, + rib::{ + config::{ + Config, MemoryOnlyConfig, PersistHistoryConfig, + PersistOnlyConfig, PersistStrategy, WriteAheadConfig, + }, + StarCastRib, + }, + stats::UpsertReport, +}; +use routecore::{ + bgp::{ + aspath::HopPath, + message::{update_builder::StandardCommunitiesList, PduParseInfo}, + path_attributes::OwnedPathAttributes, + }, + mrt::{MrtFile, RibEntryIterator, TableDumpIterator}, +}; + +use rand::seq::SliceRandom; + +#[derive(Clone, Debug)] +struct PaBytes(Vec); + +impl std::fmt::Display for PaBytes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self.0) + } +} + +impl AsRef<[u8]> for PaBytes { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From> for PaBytes { + fn from(value: Vec) -> Self { + Self(value) + } +} + +impl Meta for PaBytes { + type Orderable<'a> = u32; + + type TBI = u32; + + fn as_orderable(&self, _tbi: Self::TBI) -> Self::Orderable<'_> { + todo!() + } +} + +#[derive(Copy, Clone, Default)] +struct UpsertCounters { + unique_prefixes: usize, + unique_routes: usize, + persisted_routes: usize, + total_routes: usize, +} + +impl std::ops::AddAssign for UpsertCounters { + fn add_assign(&mut self, rhs: Self) { + self.unique_prefixes += rhs.unique_prefixes; + self.unique_routes += rhs.unique_routes; + self.persisted_routes += rhs.persisted_routes; + self.total_routes += rhs.total_routes; + } +} + +impl std::ops::Add for UpsertCounters { + type Output = UpsertCounters; + + fn add(self, rhs: Self) -> Self::Output { + Self { + unique_prefixes: self.unique_prefixes + rhs.unique_prefixes, + unique_routes: self.unique_routes + rhs.unique_routes, + persisted_routes: self.persisted_routes + rhs.persisted_routes, + total_routes: self.total_routes + rhs.total_routes, + } + } +} + +impl std::fmt::Display for UpsertCounters { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + writeln!(f, "inserted unique prefixes:\t{}", self.unique_prefixes)?; + writeln!(f, "inserted unique routes:\t\t{}", self.unique_routes)?; + writeln!(f, "persisted routes:\t\t{}", self.persisted_routes)?; + writeln!(f, "total routes:\t\t\t{}", self.total_routes)?; + writeln!( + f, + "calculated persisted routes:\t{}", + self.total_routes - self.unique_routes + ) + } +} + +fn counter_update( + counters: &mut UpsertCounters, +) -> impl FnMut(UpsertReport) -> UpsertCounters + '_ { + move |r| match (r.prefix_new, r.mui_new) { + // new prefix, new mui + (true, true) => { + counters.unique_prefixes += 1; + counters.unique_routes += 1; + counters.total_routes += 1; + *counters + } + // old prefix, new mui + (false, true) => { + counters.unique_routes += 1; + counters.total_routes += 1; + *counters + } + // old prefix, old mui + (false, false) => { + counters.total_routes += 1; + *counters + } + // new prefix, old mui + (true, false) => { + panic!("THIS DOESN'T MEAN ANYTHING!"); + } + } +} + +#[derive(Parser)] +#[command(version, about, long_about = None)] +struct Cli { + /// Enable concurrent route inserts + #[arg(short, long, default_value_t = false)] + mt: bool, + + /// Prime store by sequentially inserting prefixes first + #[arg(short, long, default_value_t = false)] + prime: bool, + + /// Enable concurrent priming inserts + #[arg(long, default_value_t = false)] + mt_prime: bool, + + /// Shuffle prefixes before priming the store. Enables priming. + #[arg(short, long, default_value_t = false)] + shuffle: bool, + + /// Use the same store for all MRT_FILES + #[arg(long, default_value_t = false)] + single_store: bool, + + /// MRT files to process. + #[arg(required = true)] + mrt_files: Vec, + + /// Don't insert in store, only parse MRT_FILES + #[arg(long, default_value_t = false)] + parse_only: bool, + + /// Verify the persisted entries + #[arg(long, default_value_t = false)] + verify: bool, + + /// Persistence Strategy + #[arg(long)] + persist_strategy: Option, +} + +type Type = rotonda_store::errors::PrefixStoreError; + +fn insert( + store: &StarCastRib, + prefix: &Prefix, + mui: u32, + ltime: u64, + route_status: RouteStatus, + value: T, +) -> Result { + let record = Record::new(mui, ltime, route_status, value); + store + .insert(prefix, record, None) + .inspect_err(|e| eprintln!("Error in test_store: {e}")) +} + +fn par_load_prefixes( + mrt_file: &MrtFile, + shuffle: bool, +) -> Vec<(Prefix, u16)> { + let t0 = std::time::Instant::now(); + let mut prefixes = mrt_file + .tables() + .unwrap() + .par_bridge() + .map(|(_fam, reh)| { + let iter = routecore::mrt::SingleEntryIterator::new(reh); + iter.map(|(prefix, peer_idx, _)| (prefix, peer_idx)) + }) + .flatten_iter() + .collect::>(); + + eprintln!( + "loaded file with {} prefixes in {}ms", + prefixes.len(), + t0.elapsed().as_millis() + ); + + if shuffle { + let t_s = Instant::now(); + eprint!("shuffling prefixes... "); + prefixes.shuffle(&mut rand::rng()); + eprintln!("done! took {}ms", t_s.elapsed().as_millis()); + } + + prefixes +} + +fn mt_parse_and_insert_table( + tables: TableDumpIterator<&[u8]>, + store: Option<&StarCastRib>, + ltime: u64, +) -> (UpsertCounters, Vec) { + let persist_strategy = + store.map_or(PersistStrategy::MemoryOnly, |p| p.persist_strategy()); + let counters = tables + .par_bridge() + .map(|(_fam, reh)| { + let mut local_counters = UpsertCounters::default(); + let iter = routecore::mrt::SingleEntryIterator::new(reh); + let persisted_prefixes = &mut vec![]; + // let mut cnt = 0; + for (prefix, peer_idx, pa_bytes) in iter { + // cnt += 1; + // let (prefix, peer_idx, pa_bytes) = e; + let mui = peer_idx.into(); + let val = PaBytes(pa_bytes); + let mut persisted_routes = 0; + + if let Some(store) = store { + let counters = insert( + store, + &prefix, + mui, + ltime, + RouteStatus::Active, + val, + ) + .map(|r| match (r.prefix_new, r.mui_new) { + // new prefix, new mui + (true, true) => { + match persist_strategy { + PersistStrategy::WriteAhead + | PersistStrategy::PersistOnly => { + persisted_prefixes.push(prefix); + persisted_routes = 1; + } + _ => {} + }; + UpsertCounters { + unique_prefixes: 1, + unique_routes: 1, + persisted_routes, + total_routes: 1, + } + } + // old prefix, new mui + (false, true) => { + match persist_strategy { + PersistStrategy::WriteAhead + | PersistStrategy::PersistOnly => { + persisted_prefixes.push(prefix); + persisted_routes = 1; + } + _ => {} + }; + + UpsertCounters { + unique_prefixes: 0, + unique_routes: 1, + persisted_routes, + total_routes: 1, + } + } + // old prefix, old mui + (false, false) => { + if persist_strategy != PersistStrategy::MemoryOnly + { + persisted_prefixes.push(prefix); + persisted_routes = 1; + } + UpsertCounters { + unique_prefixes: 0, + unique_routes: 0, + persisted_routes, + total_routes: 1, + } + } + // new prefix, old mui + (true, false) => { + panic!("THIS DOESN'T MEAN ANYTHING!"); + } + }) + .unwrap(); + + local_counters += counters; + } + } + (local_counters, persisted_prefixes.clone()) + }) + .fold( + || (UpsertCounters::default(), vec![]), + |mut acc, c| { + acc.1.extend(c.1); + (acc.0 + c.0, acc.1) + }, + ) + .reduce( + || (UpsertCounters::default(), vec![]), + |mut acc, c| { + acc.1.extend(c.1); + (acc.0 + c.0, acc.1) + }, + ); + + println!("{}", counters.0); + + counters +} + +fn st_parse_and_insert_table( + entries: RibEntryIterator<&[u8]>, + store: Option<&StarCastRib>, + ltime: u64, +) -> UpsertCounters { + let mut counters = UpsertCounters::default(); + let mut cnt = 0; + let t0 = std::time::Instant::now(); + + for (_, peer_idx, _, prefix, pamap) in entries { + cnt += 1; + let mui = peer_idx.into(); + let val = PaBytes(pamap); + + if let Some(store) = store { + insert(store, &prefix, mui, ltime, RouteStatus::Active, val) + .map(counter_update(&mut counters)) + .unwrap(); + } + } + + println!( + "parsed & inserted {} prefixes in {}ms", + cnt, + t0.elapsed().as_millis() + ); + println!("{}", counters); + + counters +} + +fn mt_prime_store( + prefixes: &Vec<(Prefix, u16)>, + store: &StarCastRib, +) -> UpsertCounters { + let t0 = std::time::Instant::now(); + + let counters = prefixes + .par_iter() + .fold(UpsertCounters::default, |mut acc, p| { + insert( + store, + &p.0, + p.1 as u32, + 0, + RouteStatus::InActive, + PaBytes(vec![]), + ) + .map(counter_update(&mut acc)) + .unwrap() + }) + .reduce(UpsertCounters::default, |c1, c2| c1 + c2); + + println!( + "primed {} prefixes in {}ms", + prefixes.len(), + t0.elapsed().as_millis() + ); + + // println!("{}", counters); + + counters +} + +fn st_prime_store( + prefixes: &Vec<(Prefix, u16)>, + store: &StarCastRib, +) -> UpsertCounters { + let mut counters = UpsertCounters::default(); + + for p in prefixes { + insert( + store, + &p.0, + p.1 as u32, + 0, + RouteStatus::InActive, + PaBytes(vec![]), + ) + .map(counter_update(&mut counters)) + .unwrap(); + } + + counters +} + +type Stores = Vec>; + +// Create all the stores necessary, and if at least one is created, create +// a reference to the first one. +fn create_stores<'a, C: Config + Sync>( + stores: &'a mut Stores, + args: &'a Cli, + store_config: C, +) -> Option<&'a StarCastRib> { + match &args { + a if a.single_store && a.parse_only => { + eprintln!( + "Can't combine --parse-only and --single-store. + Make up your mind." + ); + None + } + a if a.single_store => { + stores.push( + StarCastRib::::new_with_config( + store_config.clone(), + ) + .unwrap(), + ); + println!( + "created a single-store with strategy: {:?}\n", + store_config + ); + + exec_for_store(Some(&stores[0]), stores, args); + Some(&stores[0]) + } + a if a.parse_only => { + println!("No store created (parse only)"); + None + } + _ => { + for _ in &args.mrt_files { + stores + .push(StarCastRib::::try_default().unwrap()); + } + println!("Number of created stores: {}", stores.len()); + println!("store config: {:?}", store_config); + exec_for_store(Some(&stores[0]), stores, args); + Some(&stores[0]) + } + } +} + +fn exec_for_store<'a, C: Config + Sync>( + mut store: Option<&'a StarCastRib>, + inner_stores: &'a Stores, + args: &'a Cli, +) { + let mut global_counters = UpsertCounters::default(); + let mut mib_total: usize = 0; + let mut persisted_prefixes = BTreeSet::new(); + let t_total = Instant::now(); + + // Loop over all the mrt-files specified as arguments + for (f_index, mrtfile) in args.mrt_files.iter().enumerate() { + print!("file #{} ", f_index); + + let file = File::open(mrtfile).unwrap(); + let mmap = unsafe { Mmap::map(&file).unwrap() }; + println!("{} ({}MiB)", mrtfile.to_string_lossy(), mmap.len() >> 20); + mib_total += mmap.len() >> 20; + + let mrt_file = MrtFile::new(&mmap[..]); + + if !args.single_store && !args.parse_only { + println!("use store #{}", f_index); + store = Some(&inner_stores[f_index]); + } + // Load the mrt file, maybe shuffle, and maybe prime the store + match &args { + a if a.mt_prime && a.prime => { + eprintln!( + "--prime and --mt-prime can't be combined. + Make up your mind." + ); + return; + } + a if a.prime => { + let prefixes = par_load_prefixes(&mrt_file, a.shuffle); + st_prime_store(&prefixes, store.unwrap()); + } + a if a.mt_prime => { + let prefixes = par_load_prefixes(&mrt_file, a.shuffle); + mt_prime_store(&prefixes, store.unwrap()); + } + _ => {} + }; + + // Parse the prefixes in the file, and maybe insert them into the + // Store + global_counters += match &args { + a if a.mt => { + let tables = mrt_file.tables().unwrap(); + let (counters, per_pfxs) = + mt_parse_and_insert_table(tables, store, f_index as u64); + if args.verify { + persisted_prefixes.extend(&per_pfxs) + } + counters + } + _ => { + let entries = mrt_file.rib_entries().unwrap(); + st_parse_and_insert_table(entries, store, f_index as u64) + } + }; + } + + if let Some(store) = store { + let res = store.flush_to_disk(); + if res.is_err() { + eprintln!("Persistence Error: {:?}", res); + } + } + + // eprintln!( + // "processed {} routes in {} files in {:.2}s", + // routes_count, + // args.mrt_files.len(), + // t_total.elapsed().as_millis() as f64 / 1000.0 + // ); + + println!("upsert counters"); + println!("---------------"); + println!("{}", global_counters); + + if let Some(store) = store { + println!("store in-memory counters"); + println!("------------------------"); + println!("prefixes:\t\t\t{:?}\n", store.prefixes_count().in_memory()); + + println!("store persistence counters"); + println!("--------------------------"); + println!( + "approx. prefixes:\t\t{} + {} = {}", + store.approx_persisted_items().0, + store.approx_persisted_items().1, + store.approx_persisted_items().0 + + store.approx_persisted_items().1 + ); + println!( + "disk size of persisted store:\t{}MiB\n", + store.disk_space() / (1024 * 1024) + ); + } + + println!( + "{:.0} routes per second\n\ + {:.0} MiB per second", + global_counters.total_routes as f64 + / t_total.elapsed().as_secs() as f64, + mib_total as f64 / t_total.elapsed().as_secs() as f64 + ); + + if let Some(s) = store { + s.print_funky_stats(); + } + + if args.verify { + println!("\nverifying disk persistence..."); + let mut max_len = 0; + for pfx in persisted_prefixes { + let values = store + .unwrap() + .get_records_for_prefix(&pfx, None, false) + .unwrap() + .unwrap(); + if values.is_empty() { + eprintln!("Found empty prefix on disk"); + eprintln!("prefix: {}", pfx); + return; + } + if values.len() > max_len { + max_len = values.len(); + let recs = store + .unwrap() + .get_records_for_prefix(&pfx, None, false) + .unwrap() + .unwrap(); + println!("LEN {} prefix: {}", max_len, pfx); + for rec in recs { + let pa = OwnedPathAttributes::from(( + PduParseInfo::modern(), + rec.meta.0.to_vec(), + )); + print!( + "({})\tp[{}]", + rec.multi_uniq_id, + &pa.get::().unwrap() + ); + if let Some(comms) = &pa.get::() + { + print!(" c["); + comms + .communities() + .iter() + .for_each(|c| print!("{c} ")); + println!("]"); + } else { + println!(" no_c"); + }; + } + } + values.iter().filter(|v| v.meta.0.is_empty()).for_each(|v| { + println!("withdraw for {}, mui {}", pfx, v.multi_uniq_id) + }) + } + } +} + +fn main() { + let args = Cli::parse(); + + // let t_total = Instant::now(); + + // let mut global_counters = UpsertCounters::default(); + // let mut mib_total: usize = 0; + // let mut persisted_prefixes = BTreeSet::new(); + + match &args.persist_strategy { + Some(a) if a == &"memory_only".to_string() => { + let mut store_config = MemoryOnlyConfig; + store_config.set_persist_path("/tmp/rotonda/".into()); + let mut inner_stores: Stores = vec![]; + create_stores::( + &mut inner_stores, + &args, + store_config, + ); + } + Some(a) if a == &"write_ahead".to_string() => { + let mut store_config = WriteAheadConfig::default(); + store_config.set_persist_path("/tmp/rotonda/".into()); + let mut inner_stores: Stores = vec![]; + create_stores::( + &mut inner_stores, + &args, + store_config, + ); + } + Some(a) if a == &"persist_only".to_string() => { + let mut store_config = PersistOnlyConfig::default(); + store_config.set_persist_path("/tmp/rotonda/".into()); + let mut inner_stores: Stores = vec![]; + create_stores::( + &mut inner_stores, + &args, + store_config, + ); + } + Some(a) if a == &"persist_history".to_string() => { + let mut store_config = PersistHistoryConfig::default(); + store_config.set_persist_path("/tmp/rotonda/".into()); + let mut inner_stores: Stores = vec![]; + create_stores::( + &mut inner_stores, + &args, + store_config, + ); + } + None => { + let mut store_config = PersistHistoryConfig::default(); + store_config.set_persist_path("/tmp/rotonda/".into()); + let mut inner_stores: Stores = vec![]; + create_stores::( + &mut inner_stores, + &args, + store_config, + ); + } + Some(a) => { + eprintln!("Unknown persist strategy: {}", a); + } + } + + // let mut store = match &args { + // a if a.single_store && a.parse_only => { + // eprintln!( + // "Can't combine --parse-only and --single-store. + // Make up your mind." + // ); + // return; + // } + // a if a.single_store => { + // inner_stores.push( + // MultiThreadedStore::::new_with_config( + // store_config.clone(), + // ) + // .unwrap(), + // ); + // println!( + // "created a single-store with strategy: {:?}\n", + // store_config + // ); + // Some(&inner_stores[0]) + // } + // a if a.parse_only => { + // println!("No store created (parse only)"); + // None + // } + // _ => { + // for _ in &args.mrt_files { + // inner_stores.push( + // MultiThreadedStore::::try_default().unwrap(), + // ); + // } + // println!("Number of created stores: {}", inner_stores.len()); + // println!("store config: {:?}", store_config); + // Some(&inner_stores[0]) + // } + // }; +} diff --git a/src/bin/truncate_len.rs b/src/bin/truncate_len.rs new file mode 100644 index 00000000..7512a69f --- /dev/null +++ b/src/bin/truncate_len.rs @@ -0,0 +1,37 @@ +use zerocopy::{NetworkEndian, U32}; + +// max len 128! + +fn truncate_to_len(bits: U32, len: u8) -> U32 { + match len { + 0 => U32::new(0), + 1..=31 => { + (bits >> U32::from(32 - len as u32)) << U32::from(32 - len as u32) + } + 32 => bits, + len => panic!("Can't truncate to more than 128 bits: {}", len), + } +} + +fn branchless_trunc(bits: U32, len: u8) -> u32 { + (bits + & ((1_u32.rotate_right(len as u32) + ^ 1_u32.saturating_sub(len as u32)) + .wrapping_sub(1) + ^ u32::MAX)) + .into() +} + +fn main() { + for b in 0..=u32::MAX { + if b % (1024 * 256) == 0 { + print!("."); + } + for l in 0..=32 { + let tl1 = truncate_to_len(U32::::new(b), l); + let tl2 = branchless_trunc(U32::::new(b), l); + // println!("{:032b} {:032b}", tl1, tl2); + assert_eq!(tl1, tl2); + } + } +} diff --git a/src/cht/mod.rs b/src/cht/mod.rs new file mode 100644 index 00000000..ea32b46f --- /dev/null +++ b/src/cht/mod.rs @@ -0,0 +1,131 @@ +mod oncebox; + +pub(crate) use oncebox::OnceBoxSlice; + +use crate::rib::STRIDE_SIZE; + +pub(crate) trait Value { + fn init_with_p2_children(size: usize) -> Self; +} + +#[derive(Debug)] +pub(crate) struct Cht< + V, + const ROOT_SIZE: usize, + const STRIDES_PER_BUCKET: usize, +>([V; ROOT_SIZE]); + +impl + Cht +{ + pub(crate) fn init() -> Self { + Self(std::array::from_fn::<_, ROOT_SIZE, _>(|_| { + V::init_with_p2_children(STRIDE_SIZE as usize) + })) + } + + // There cannot be a root node for a prefix length that has NO slots, + // STRIDES_PER_BICKET (a instance wide const) should always be bigger + // than 0. + #[allow(clippy::indexing_slicing)] + pub(crate) fn root_for_len(&self, len: u8) -> &V { + &self.0[len as usize / STRIDES_PER_BUCKET] + } +} + +// This output of this function is exactly same as this (for all values of len +// and lvl we care for at least): +// +// let res = 4 * (lvl + 1); +// if res < len { +// 4 +// } else if res >= len + 4 { +// 0 +// } else if len % 4 == 0 { +// 4 +// } else { +// len % 4 +// } +// +// The gist of this function is that, we want exactly the numnber of slots in +// our NodeSet that we can fill. This means: +// - for any len smaller than STRIDE_SIZE (4), we only have one level, and +// that level takes `len` slots. There are no next levels in in these lens. +// - len of STRIDE_SIZE and bigger have as many levels as can fit full +// STRIDE_SIZES, so with len 4, that is still one level (lvl = 0), for +// len 5 that is two levels, one of size 4 (lvl = 0), and one of size 1 +// (lvl = 1). From len 9 there's three levels and so on. +// - The first len, level combination beyond the max. size of lvl should +// return a 0, so the looper knows that it has to go to the next len. +// +// This is the output of the first values of len, lvl +// +// len, lvl : input parameters +// ts : total size of id +// ns : number of child slots for the NodeSet for this len, lvl +// +// len lvl ts ns +// 00 00 00 0 +// 01 00 01 1 +// 01 01 00 0 +// 02 00 02 2 +// 02 01 00 0 +// 03 00 03 3 +// 03 01 00 0 +// 04 00 04 4 +// 04 01 00 0 +// 05 00 04 4 +// 05 01 05 1 +// 05 02 00 0 +// 06 00 04 4 +// 06 01 06 2 +// 06 02 00 0 +// 07 00 04 4 +// 07 01 07 3 +// 07 02 00 0 +// 08 00 04 4 +// 08 01 08 4 +// 08 02 00 0 +// 09 00 04 4 +// 09 01 08 4 +// 09 02 09 1 +// 09 03 00 0 +// 10 00 04 4 +// ... +pub fn nodeset_size(len: u8, lvl: u8) -> u8 { + 4_u8.saturating_sub((4 * (lvl + 1)).saturating_sub(len)) +} + +// The value of the set of the parent of this one. used to calculate the shift +// offset in the hash for the CHT, so this is basically the `nodeset_size` +// shifted on (len, lvl) combination downwards. +// +// len lvl prev +// 00 00 00 +// 01 00 00 +// 01 01 01 +// 02 00 00 +// 02 01 02 +// 03 00 00 +// 03 01 03 +// 04 00 00 +// 04 01 04 +// 05 00 00 +// 05 01 04 +// 05 02 05 +// 06 00 00 +// 06 01 04 +// 06 02 06 +// 07 00 00 +// 07 01 04 +// 07 02 07 +// 08 00 00 +// 08 01 04 +// 08 02 08 +// 09 00 00 +// 09 01 04 +// 09 02 08 +// 09 03 09 +pub fn prev_node_size(len: u8, lvl: u8) -> u8 { + (lvl * 4) - lvl.saturating_sub(len >> 2) * ((lvl * 4) - len) +} diff --git a/src/local_array/store/oncebox.rs b/src/cht/oncebox.rs similarity index 73% rename from src/local_array/store/oncebox.rs rename to src/cht/oncebox.rs index 9752ba5f..d05973d8 100644 --- a/src/local_array/store/oncebox.rs +++ b/src/cht/oncebox.rs @@ -2,6 +2,12 @@ use std::ptr::null_mut; use std::slice; use std::sync::atomic::{AtomicPtr, Ordering}; +//------------ OnceBox ------------------------------------------------------- +// +// Create an atomic pointer once, never to be modified. The pointee can be +// changed, if enough considerations around atomically updating values are +// taken into account. Used by the Chained Hash Table (Cht) in `cht`. + #[derive(Debug, Default)] pub struct OnceBox { ptr: AtomicPtr, @@ -61,21 +67,25 @@ impl Drop for OnceBox { } } +//------------ OnceBoxSlice -------------------------------------------------- +// +// A slice of OnceBoxes, subject to the same constraints. Used in Cht. + #[derive(Debug, Default)] -pub struct OnceBoxSlice { +pub(crate) struct OnceBoxSlice { ptr: AtomicPtr>, - p2_size: u8, + size: usize, } impl OnceBoxSlice { - pub fn new(p2_size: u8) -> Self { + pub fn new(size: usize) -> Self { Self { ptr: AtomicPtr::new(null_mut()), - p2_size, + size, } } - pub fn is_null(&self) -> bool { + pub fn _is_null(&self) -> bool { self.ptr.load(Ordering::Relaxed).is_null() } @@ -84,18 +94,21 @@ impl OnceBoxSlice { if ptr.is_null() { None } else { - let slice = - unsafe { slice::from_raw_parts(ptr, 1 << self.p2_size) }; + let slice = unsafe { slice::from_raw_parts(ptr, self.size) }; slice.get(idx).and_then(|inner| inner.get()) } } + // This is a bit tricky: the caller of this method should make sure that + // the slice has enough elements. For performance reasons we are NOT + // checking that here. + #[allow(clippy::indexing_slicing)] pub fn get_or_init( &self, idx: usize, create: impl FnOnce() -> T, ) -> (&T, bool) { - // assert!(idx < (1 << self.p2_size)); + // assert!(idx < self.p2_size); let slice = self.get_or_make_slice(); slice[idx].get_or_init(create) } @@ -103,12 +116,12 @@ impl OnceBoxSlice { fn get_or_make_slice(&self) -> &[OnceBox] { let ptr = self.ptr.load(Ordering::Relaxed); if !ptr.is_null() { - return unsafe { slice::from_raw_parts(ptr, 1 << self.p2_size) }; + return unsafe { slice::from_raw_parts(ptr, self.size) }; } // Create a slice, set it, get again. - let mut vec = Vec::with_capacity(1 << self.p2_size); - for _ in 0..(1 << self.p2_size) { + let mut vec = Vec::with_capacity(self.size); + for _ in 0..(self.size) { vec.push(OnceBox::new()) } // Convert Vec<[OnceBox] -> Box<[OnceBox] -> &mut [OnceBox] @@ -130,16 +143,13 @@ impl OnceBoxSlice { // return current. assert!(!current.is_null()); let _ = unsafe { - Box::from_raw(slice::from_raw_parts_mut( - ptr, - 1 << self.p2_size, - )) + Box::from_raw(slice::from_raw_parts_mut(ptr, self.size)) }; current } }; - unsafe { slice::from_raw_parts(res, 1 << self.p2_size) } + unsafe { slice::from_raw_parts(res, self.size) } } } @@ -148,10 +158,7 @@ impl Drop for OnceBoxSlice { let ptr = self.ptr.swap(null_mut(), Ordering::Relaxed); if !ptr.is_null() { let _ = unsafe { - Box::from_raw(slice::from_raw_parts_mut( - ptr, - 1 << self.p2_size, - )) + Box::from_raw(slice::from_raw_parts_mut(ptr, self.size)) }; } } diff --git a/src/lib.rs b/src/lib.rs index 4bfdc1c0..2db27a68 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,42 +1,127 @@ -//! A treebitmap based IP Prefix Store - -//! IP prefixes storage and retrieval data structures for IPv4 and IPv6 prefixes. -//! This crate contains structures for both single and multi-threaded contexts, as -//! well as async contexts. +#![deny( + clippy::unwrap_used, + clippy::expect_used, + clippy::panic, + clippy::indexing_slicing +)] +#![allow(clippy::multiple_crate_versions)] +//! A library that provides abstractions for a BGP Routing Information Base +//! (RIB) for different AFI/SAFI types, as a database. +//! +//! The data structures provided by this crate can be used to store and query +//! routes (and route-like data) in memory and on-disk, for both current and +//! historical data. //! -//! The underlying tree structure is based on the tree bitmap as outlined in -//! [this paper](https://www.cs.cornell.edu/courses/cs419/2005sp/tree-bitmap.pdf). +//! The main data structures that this crate implements are firstly a tree +//! bitmap, largely as described in this paper[^1] - but with a twist. +//! There's also a blog post[^2] about the tree bitmap, and similar data +//! structures. Secondly, this repo implements a concurrent chained hash +//! table (called `cht` throughout this repo), geared towards keys that are +//! "prefix-like", i.e. variable-length bitfields, that fit within a +//! primitive integer type. //! -//! Part of the Rotonda modular BGP engine. +//! The log-structured merge tree ("lsm_tree") used in this library is +//! provided by the `lsm_tree` crate - the crate that powers `fjall`. +//! +//! [^1]: +//! [^2]: -//! Read more about the data-structure in this [blog post](https://blog.nlnetlabs.nl/donkeys-mules-horses/). -mod af; -mod local_array; -mod local_vec; -mod node_id; -mod prefix_record; -mod stride; -mod synth_int; +// ┌───────────────────┐ +// │ StarCastRib │ +// └────────┬┬─────────┘ +// ┌────────────────┘└────────────────┐ +// ┌──▼──┐ ┌──▼──┐ +// │ v4 │ │ v4 │ +// └──┬──┘ └──┬──┘ +// ┌───────────┼─────────┐ ┌───────────┼──────────┐ +// ┌─────▼────┐┌─────▼────┐┌───▼─────┐┌─────▼────┐┌─────▼────┐┌────▼───┐ +// │treebitmap││prefix_cht││lsm_tree ││treebitmap││prefix_cht││lsm_tree│ +// └─────┬────┘└──────────┘└─────────┘└─────┬────┘└──────────┘└────────┘ +// ┌──────┴─────┐ ┌──────┴─────┐ +// ┌───▼────┐┌──────▼────┐ ┌───▼────┐┌──────▼────┐ +// │node_cht││muis_bitmap│ │node_cht││muis_bitmap│ +// └────────┘└───────────┘ └────────┘└───────────┘ -#[macro_use] -mod macros; +// Rotonda-store is a fairly layered repo, it uses three different +// types of trees, that are all hidden behind one public interface. + +// `rib::starcast::StarCastRib`, holds that public API. This is the RIB +// that stores (route-like) data for IPv4/IPv6 unicast and multicast (hence +// *cast). This is a good starting point to dive into this repo. + +// `rib::starcast_af::StarCastAfRib` holds the three trees for a store, per +// Address Family. From there `tree_bitmap` (the mod.rs file), holds the tree +// bitmap, `tree_bit_map::node_cht` holds the CHT that stores the nodes for +// the tree bitmap. Next to the tree, it also holds a bitmap that indexes all +// muis that are withdrawn for the whole tree. The tree bitmap is used for +// all strategies. -mod rotonda_store; +// `prefix_cht::cht` holds the CHT that stores all the route-like data for the +// in-memory strategies. This CHT is the same data-structure that is used for +// the nodes, but it stores `MultiMap` typed values in its nodes (described in +// the same file). -// Public Interfaces +// `lsm_tree` (again, in the mod.rs file) holds the log-structured merge tree +// used for persistent storage on disk. -pub mod prelude; -/// Statistics for the two trees (IPv4 and IPv6). -pub mod stats; +//------------ Modules ------------------------------------------------------- -/// Some simple metadata implementations -pub mod meta_examples; +// the Chained Hash Table that is used by the treebitmap, and the CHT for the +// prefixes (for in-menory storage of prefixes). +mod cht; -/// The publicly available devices -pub use crate::rotonda_store::*; +// The log-structured merge tree, used as persistent storage (on disk). +mod lsm_tree; + +// The Chained Hash Table that stores the records for the prefixers in memory +mod prefix_cht; + +// The Treebitmap, that stores the existence of all prefixes, and that is used +//for all strategies. +mod tree_bitmap; + +// Types, both public and private, that are used throughout the store. +mod types; + +#[macro_use] +mod macros; + +pub(crate) use lsm_tree::LsmTree; +pub(crate) use tree_bitmap::TreeBitMap; // re-exports -pub use inetnum::addr; pub use crossbeam_epoch::{self as epoch, Guard}; +pub use inetnum::addr; + +// Public Interfaces on the root of the crate + +/// RIBs for various AFI/SAFI types +pub mod rib; + +/// Types used to create match queries on a RIB +pub use types::match_options; + +/// Record, Record Iterator and related types/traits +pub use types::prefix_record; + +/// Error types returned by a RIB +pub use types::errors; + +/// Trait that defines the AFIs 1 (IPv4) and 2 (IPv6). +pub use types::af::AddressFamily; + +/// The underlying value (u32) and trait impl for AFI 1. +pub use types::af::IPv4; +/// The underlying value (u128) and trait impl for AFI 2. +pub use types::af::IPv6; + +/// Trait that describes the conversion of a u32 or u128 in to a IPv4, or IPV6 +/// respectively. +pub use types::af::IntoIpAddr; + +/// Statistics and metrics types returned by methods on a RIB +pub use types::stats; -pub use prefix_record::{PublicRecord, RecordSet}; +// Used in tests +#[doc(hidden)] +pub use types::test_types; diff --git a/src/local_array/atomic_stride.rs b/src/local_array/atomic_stride.rs deleted file mode 100644 index a3788d76..00000000 --- a/src/local_array/atomic_stride.rs +++ /dev/null @@ -1,756 +0,0 @@ -use log::trace; -use parking_lot_core::SpinWait; -use std::fmt::{Binary, Debug}; -use std::sync::atomic::{ - fence, AtomicU16, AtomicU32, AtomicU64, AtomicU8, Ordering, -}; - -use crate::af::Zero; -use crate::synth_int::AtomicU128; -use crate::{impl_primitive_atomic_stride, AddressFamily}; - -pub type Stride3 = u16; -pub type Stride4 = u32; -pub type Stride5 = u64; - -pub struct AtomicStride2(pub AtomicU8); -pub struct AtomicStride3(pub AtomicU16); -pub struct AtomicStride4(pub AtomicU32); -pub struct AtomicStride5(pub AtomicU64); -pub struct AtomicStride6(pub AtomicU128); - -pub struct CasResult(pub Result); - -impl CasResult { - fn new(value: InnerType) -> Self { - CasResult(Ok(value)) - } -} - -pub trait AtomicBitmap -where - Self: From, -{ - type InnerType: Binary - + Copy - + Debug - + Zero - + PartialOrd - + std::ops::BitAnd - + std::ops::BitOr; - - fn new() -> Self; - fn inner(self) -> Self::InnerType; - fn is_set(&self, index: usize) -> bool; - fn compare_exchange( - &self, - current: Self::InnerType, - new: Self::InnerType, - ) -> CasResult; - fn load(&self) -> Self::InnerType; - fn to_u64(&self) -> u64; - fn to_u32(&self) -> u32; - fn set(&self, value: Self::InnerType); - fn merge_with(&self, node: Self::InnerType) { - let mut spinwait = SpinWait::new(); - let current = self.load(); - - fence(Ordering::Acquire); - - let mut new = current | node; - loop { - match self.compare_exchange(current, new) { - CasResult(Ok(_)) => { - return; - } - CasResult(Err(current)) => { - new = current | node; - } - } - spinwait.spin_no_yield(); - } - } -} - -impl AtomicBitmap for AtomicStride2 { - type InnerType = u8; - - fn new() -> Self { - AtomicStride2(AtomicU8::new(0)) - } - fn inner(self) -> Self::InnerType { - self.0.into_inner() - } - fn is_set(&self, bit: usize) -> bool { - self.load() & (1 << bit) != 0 - } - fn compare_exchange( - &self, - current: Self::InnerType, - new: Self::InnerType, - ) -> CasResult { - CasResult(self.0.compare_exchange( - current, - new, - Ordering::Acquire, - Ordering::Relaxed, - )) - } - fn load(&self) -> Self::InnerType { - self.0.load(Ordering::SeqCst) - } - - fn set(&self, value: Self::InnerType) { - self.0.store(value, Ordering::Relaxed); - } - - fn to_u32(&self) -> u32 { - self.0.load(Ordering::SeqCst) as u32 - } - - fn to_u64(&self) -> u64 { - self.0.load(Ordering::SeqCst) as u64 - } -} - -impl Zero for AtomicStride2 { - fn zero() -> Self { - AtomicStride2(AtomicU8::new(0)) - } - - fn is_zero(&self) -> bool { - self.0.load(Ordering::SeqCst) == 0 - } -} - -impl From for AtomicStride2 { - fn from(value: u8) -> Self { - Self(AtomicU8::new(value)) - } -} - -impl AtomicBitmap for AtomicStride3 { - type InnerType = u16; - - fn new() -> Self { - AtomicStride3(AtomicU16::new(0)) - } - fn inner(self) -> Self::InnerType { - self.0.into_inner() - } - fn is_set(&self, bit: usize) -> bool { - self.load() & (1 << bit) != 0 - } - fn compare_exchange( - &self, - current: Self::InnerType, - new: Self::InnerType, - ) -> CasResult { - CasResult(self.0.compare_exchange( - current, - new, - Ordering::Acquire, - Ordering::Relaxed, - )) - } - - fn load(&self) -> Self::InnerType { - self.0.load(Ordering::Relaxed) - } - - fn set(&self, value: Self::InnerType) { - self.0.store(value, Ordering::Relaxed); - } - - fn to_u32(&self) -> u32 { - self.0.load(Ordering::Relaxed) as u32 - } - - fn to_u64(&self) -> u64 { - self.0.load(Ordering::Relaxed) as u64 - } -} - -impl From for AtomicStride3 { - fn from(value: u16) -> Self { - Self(AtomicU16::new(value)) - } -} - -impl Zero for AtomicStride3 { - fn zero() -> Self { - AtomicStride3(AtomicU16::new(0)) - } - - fn is_zero(&self) -> bool { - self.0.load(Ordering::SeqCst) == 0 - } -} - -impl AtomicBitmap for AtomicStride4 { - type InnerType = u32; - - fn new() -> Self { - AtomicStride4(AtomicU32::new(0)) - } - fn inner(self) -> Self::InnerType { - self.0.into_inner() - } - fn is_set(&self, bit: usize) -> bool { - self.load() & (1 << bit) != 0 - } - fn compare_exchange( - &self, - current: Self::InnerType, - new: Self::InnerType, - ) -> CasResult { - CasResult(self.0.compare_exchange( - current, - new, - Ordering::Acquire, - Ordering::Relaxed, - )) - } - fn load(&self) -> Self::InnerType { - self.0.load(Ordering::Relaxed) - } - - fn set(&self, value: Self::InnerType) { - self.0.store(value, Ordering::Relaxed); - } - - fn to_u32(&self) -> u32 { - self.0.load(Ordering::Relaxed) - } - - fn to_u64(&self) -> u64 { - self.0.load(Ordering::Relaxed) as u64 - } -} - -impl From for AtomicStride4 { - fn from(value: u32) -> Self { - Self(AtomicU32::new(value)) - } -} -impl Zero for AtomicStride4 { - fn zero() -> Self { - AtomicStride4(AtomicU32::new(0)) - } - - fn is_zero(&self) -> bool { - self.0.load(Ordering::SeqCst) == 0 - } -} - -impl AtomicBitmap for AtomicStride5 { - type InnerType = u64; - - fn new() -> Self { - AtomicStride5(AtomicU64::new(0)) - } - fn inner(self) -> Self::InnerType { - self.0.into_inner() - } - fn is_set(&self, bit: usize) -> bool { - self.load() & (1 << bit) != 0 - } - fn compare_exchange( - &self, - current: Self::InnerType, - new: Self::InnerType, - ) -> CasResult { - CasResult(self.0.compare_exchange( - current, - new, - Ordering::SeqCst, - Ordering::SeqCst, - )) - } - fn load(&self) -> Self::InnerType { - self.0.load(Ordering::SeqCst) - } - - fn set(&self, value: Self::InnerType) { - self.0.store(value, Ordering::Relaxed); - } - - fn to_u32(&self) -> u32 { - self.0.load(Ordering::SeqCst) as u32 - } - - fn to_u64(&self) -> u64 { - self.0.load(Ordering::SeqCst) - } -} - -impl From for AtomicStride5 { - fn from(value: u64) -> Self { - Self(AtomicU64::new(value)) - } -} - -impl Zero for AtomicStride5 { - fn zero() -> Self { - AtomicStride5(AtomicU64::new(0)) - } - - fn is_zero(&self) -> bool { - self.0.load(Ordering::SeqCst) == 0 - } -} - -impl AtomicBitmap for AtomicStride6 { - type InnerType = u128; - - fn new() -> Self { - AtomicStride6(AtomicU128::new(0)) - } - fn inner(self) -> Self::InnerType { - let hi = self.0 .0.into_inner().to_be_bytes(); - let lo = self.0 .1.into_inner().to_be_bytes(); - - u128::from_be_bytes([ - hi[0], hi[1], hi[2], hi[3], hi[4], hi[5], hi[6], hi[7], lo[0], - lo[1], lo[2], lo[3], lo[4], lo[5], lo[6], lo[7], - ]) - } - fn is_set(&self, bit: usize) -> bool { - self.load() & (1 << bit) != 0 - } - fn compare_exchange( - &self, - current: Self::InnerType, - new: Self::InnerType, - ) -> CasResult { - // TODO TODO - // This is not actually thread-safe, it actually - // needs a memory fence, since we're writing - // to two different memory locations. - ( - self.0 .0.compare_exchange( - ((current << 64) >> 64) as u64, - ((new >> 64) << 64) as u64, - Ordering::SeqCst, - Ordering::SeqCst, - ), - self.0 .1.compare_exchange( - ((current << 64) >> 64) as u64, - ((new >> 64) << 64) as u64, - Ordering::SeqCst, - Ordering::SeqCst, - ), - ) - .into() - } - fn load(&self) -> Self::InnerType { - let hi = self.0 .0.load(Ordering::SeqCst).to_be_bytes(); - let lo = self.0 .1.load(Ordering::SeqCst).to_be_bytes(); - u128::from_be_bytes([ - hi[0], hi[1], hi[2], hi[3], hi[4], hi[5], hi[6], hi[7], lo[0], - lo[1], lo[2], lo[3], lo[4], lo[5], lo[6], lo[7], - ]) - } - - fn set(&self, _value: Self::InnerType) { - todo!() - } - - fn to_u32(&self) -> u32 { - unimplemented!() - } - - fn to_u64(&self) -> u64 { - unimplemented!() - } - - fn merge_with(&self, _node: Self::InnerType) { - todo!() - } -} - -impl From for AtomicStride6 { - fn from(value: u128) -> Self { - Self(AtomicU128::new(value)) - } -} - -impl Zero for AtomicStride6 { - fn zero() -> Self { - AtomicStride6(AtomicU128::new(0)) - } - - fn is_zero(&self) -> bool { - self.0 .0.load(Ordering::SeqCst) == 0 - && self.0 .1.load(Ordering::SeqCst) == 0 - } -} - -impl From<(Result, Result)> for CasResult { - fn from(r: (Result, Result)) -> Self { - match r { - (Ok(hi), Ok(lo)) => CasResult::new(u128::from_be_bytes([ - hi.to_be_bytes()[0], - hi.to_be_bytes()[1], - hi.to_be_bytes()[2], - hi.to_be_bytes()[3], - hi.to_be_bytes()[4], - hi.to_be_bytes()[5], - hi.to_be_bytes()[6], - hi.to_be_bytes()[7], - lo.to_be_bytes()[0], - lo.to_be_bytes()[1], - lo.to_be_bytes()[2], - lo.to_be_bytes()[3], - lo.to_be_bytes()[4], - lo.to_be_bytes()[5], - lo.to_be_bytes()[6], - lo.to_be_bytes()[7], - ])), - (Err(hi), Ok(lo)) => CasResult(Err(u128::from_be_bytes([ - hi.to_be_bytes()[0], - hi.to_be_bytes()[1], - hi.to_be_bytes()[2], - hi.to_be_bytes()[3], - hi.to_be_bytes()[4], - hi.to_be_bytes()[5], - hi.to_be_bytes()[6], - hi.to_be_bytes()[7], - lo.to_be_bytes()[0], - lo.to_be_bytes()[1], - lo.to_be_bytes()[2], - lo.to_be_bytes()[3], - lo.to_be_bytes()[4], - lo.to_be_bytes()[5], - lo.to_be_bytes()[6], - lo.to_be_bytes()[7], - ]))), - (Ok(hi), Err(lo)) => CasResult(Err(u128::from_be_bytes([ - hi.to_be_bytes()[0], - hi.to_be_bytes()[1], - hi.to_be_bytes()[2], - hi.to_be_bytes()[3], - hi.to_be_bytes()[4], - hi.to_be_bytes()[5], - hi.to_be_bytes()[6], - hi.to_be_bytes()[7], - lo.to_be_bytes()[0], - lo.to_be_bytes()[1], - lo.to_be_bytes()[2], - lo.to_be_bytes()[3], - lo.to_be_bytes()[4], - lo.to_be_bytes()[5], - lo.to_be_bytes()[6], - lo.to_be_bytes()[7], - ]))), - (Err(hi), Err(lo)) => CasResult(Err(u128::from_be_bytes([ - hi.to_be_bytes()[0], - hi.to_be_bytes()[1], - hi.to_be_bytes()[2], - hi.to_be_bytes()[3], - hi.to_be_bytes()[4], - hi.to_be_bytes()[5], - hi.to_be_bytes()[6], - hi.to_be_bytes()[7], - lo.to_be_bytes()[0], - lo.to_be_bytes()[1], - lo.to_be_bytes()[2], - lo.to_be_bytes()[3], - lo.to_be_bytes()[4], - lo.to_be_bytes()[5], - lo.to_be_bytes()[6], - lo.to_be_bytes()[7], - ]))), - } - } -} -pub trait Stride: - Sized - + Debug - + Eq - + Binary - + PartialOrd - + PartialEq - + Zero - + std::ops::BitAnd - + std::ops::BitOr -where - Self::AtomicPtrSize: AtomicBitmap, - Self::AtomicPfxSize: AtomicBitmap, - Self::PtrSize: Zero - + Binary - + Copy - + Debug - + std::ops::BitAnd - + PartialOrd - + Zero, -{ - type AtomicPfxSize; - type AtomicPtrSize; - type PtrSize; - const BITS: u8; - const STRIDE_LEN: u8; - - // Get the bit position of the start of the given nibble. - // The nibble is defined as a `len` number of bits set from the right. - // bit_pos always has only one bit set in the complete array. - // e.g.: - // len: 4 - // nibble: u16 = 0b0000 0000 0000 0111 - // bit_pos: u16 = 0b0000 0000 0000 1000 - - // `::BITS` - // is the whole length of the bitmap, since we are shifting to the left, - // we have to start at the end of the bitmap. - // `((1 << len) - 1)` - // is the offset for this nibble length in the bitmap. - // `nibble` - // shifts to the right position withing the bit range for this nibble - // length, this follows from the fact that the `nibble` value represents - // *both* the bitmap part, we're considering here *and* the position - // relative to the nibble length offset in the bitmap. - fn get_bit_pos( - nibble: u32, - len: u8, - ) -> <::AtomicPfxSize as AtomicBitmap>::InnerType; - - fn get_bit_pos_as_u8(nibble: u32, len: u8) -> u8; - - // Clear the bitmap to the right of the pointer and count the number of - // ones. This number represents the index to the corresponding prefix in - // the pfx_vec. - - // Clearing is performed by shifting to the right until we have the - // nibble all the way at the right. - - // `(::BITS >> 1)` - // The end of the bitmap (this bitmap is half the size of the pfx bitmap) - - // `nibble` - // The bit position relative to the offset for the nibble length, this - // index is only used at the last (relevant) stride, so the offset is - // always 0. - - // get_pfx_index only needs nibble and len for fixed-layout bitarrays, - // since the index can be deducted from them. - fn get_pfx_index(nibble: u32, len: u8) -> usize; - - // Clear the bitmap to the right of the pointer and count the number of - // ones. This number represents the index to the corresponding child node - // in the ptr_vec. - - // Clearing is performed by shifting to the right until we have the - // nibble all the way at the right. - - // For ptrbitarr the only index we want is the one for a full-length - // nibble (stride length) at the last stride, so we don't need the length - // of the nibble. - - // `(::BITS >> 1)` - // The end of the bitmap (this bitmap is half the size of the pfx bitmap) - // AF::BITS is the size of the pfx bitmap. - - // `nibble` - // The bit position relative to the offset for the nibble length, this - // index is only used at the last (relevant) stride, so the offset is - // always 0. - fn get_ptr_index( - bitmap: <::AtomicPtrSize as AtomicBitmap>::InnerType, - nibble: u32, - ) -> usize; - - #[allow(clippy::wrong_self_convention)] - fn into_node_id( - addr_bits: AF, - len: u8, - ) -> super::node::StrideNodeId; - - // Convert a ptrbitarr into a pfxbitarr sized bitmap, - // so we can do bitwise operations with a pfxbitarr sized - // bitmap on them. - // Since the last bit in the pfxbitarr isn't used, but the - // full ptrbitarr *is* used, the prtbitarr should be shifted - // one bit to the left. - #[allow(clippy::wrong_self_convention)] - fn into_stride_size( - bitmap: <::AtomicPtrSize as AtomicBitmap>::InnerType, - ) -> <::AtomicPfxSize as AtomicBitmap>::InnerType; - - // Convert a pfxbitarr sized bitmap into a ptrbitarr sized - // Note that bitwise operators align bits of unsigned types with - // different sizes to the right, so we don't have to do anything to pad - // the smaller sized type. We do have to shift one bit to the left, to - // accommodate the unused pfxbitarr's last bit. - #[allow(clippy::wrong_self_convention)] - fn into_ptrbitarr_size( - bitmap: <::AtomicPfxSize as AtomicBitmap>::InnerType, - ) -> <::AtomicPtrSize as AtomicBitmap>::InnerType; - - fn leading_zeros(self) -> u32; -} - -impl_primitive_atomic_stride![3; 16; u16; AtomicStride3; u8; AtomicStride2]; -impl_primitive_atomic_stride![4; 32; u32; AtomicStride4; u16; AtomicStride3]; -impl_primitive_atomic_stride![5; 64; u64; AtomicStride5; u32; AtomicStride4]; -// impl_primitive_stride![6; 128; u128; u64]; - -// impl Stride for Stride7 { -// type PtrSize = u128; -// const BITS: u8 = 255; -// const STRIDE_LEN: u8 = 7; - -// fn get_bit_pos(nibble: u32, len: u8) -> Self { -// match 256 - ((1 << len) - 1) as u16 - nibble as u16 - 1 { -// n if n < 128 => U256(0, 1 << n), -// n => U256(1 << (n as u16 - 128), 0), -// } -// } - -// fn get_pfx_index(bitmap: Self, nibble: u32, len: u8) -> usize { -// let n = 256 - ((1 << len) - 1) as u16 - nibble as u16 - 1; -// match n { -// // if we move less than 128 bits to the right, -// // all of bitmap.0 and a part of bitmap.1 will be used for counting zeros -// // ex. -// // ...1011_1010... >> 2 => ...0010_111010... -// // ____ ==== -- --==== -// n if n < 128 => { -// bitmap.0.count_ones() as usize + (bitmap.1 >> n).count_ones() as usize - 1 -// } -// // if we move more than 128 bits to the right, -// // all of bitmap.1 wil be shifted out of sight, -// // so we only have to count bitmap.0 zeroes than (after) shifting of course). -// n => (bitmap.0 >> (n - 128)).count_ones() as usize - 1, -// } -// } - -// fn get_ptr_index(bitmap: Self::PtrSize, nibble: u32) -> usize { -// (bitmap >> ((256 >> 1) - nibble as u16 - 1) as usize).count_ones() as usize - 1 -// } - -// fn into_stride_size(bitmap: Self::PtrSize) -> Self { -// // One bit needs to move into the self.0 u128, -// // since the last bit of the *whole* bitmap isn't used. -// U256(bitmap >> 127, bitmap << 1) -// } - -// fn into_ptrbitarr_size(bitmap: Self) -> Self::PtrSize { -// // TODO expand: -// // self.ptrbitarr = -// // S::into_ptrbitarr_size(bit_pos | S::into_stride_size(self.ptrbitarr)); -// (bitmap.0 << 127 | bitmap.1 >> 1) as u128 -// } - -// #[inline] -// fn leading_zeros(self) -> u32 { -// let lz = self.0.leading_zeros(); -// if lz == 128 { -// lz + self.1.leading_zeros() -// } else { -// lz -// } -// } -// } - -// impl Stride for Stride8 { -// type PtrSize = U256; -// const BITS: u8 = 255; // bogus -// const STRIDE_LEN: u8 = 8; - -// fn get_bit_pos(nibble: u32, len: u8) -> Self { -// match 512 - ((1 << len) - 1) as u16 - nibble as u16 - 1 { -// n if n < 128 => U512(0, 0, 0, 1 << n), -// n if n < 256 => U512(0, 0, 1 << (n as u16 - 128), 0), -// n if n < 384 => U512(0, 1 << (n as u16 - 256), 0, 0), -// n => U512(1 << (n as u16 - 384), 0, 0, 0), -// } -// } - -// fn get_pfx_index(bitmap: Self, nibble: u32, len: u8) -> usize { -// let n = 512 - ((1 << len) - 1) as u16 - nibble as u16 - 1; -// match n { -// // if we move less than 128 bits to the right, all of bitmap.2 -// // and a part of bitmap.3 will be used for counting zeros. -// // ex. -// // ...1011_1010... >> 2 => ...0010_111010... -// // ____ ==== -- --==== -// n if n < 128 => { -// bitmap.0.count_ones() as usize -// + bitmap.1.count_ones() as usize -// + bitmap.2.count_ones() as usize -// + (bitmap.3 >> n).count_ones() as usize -// - 1 -// } - -// n if n < 256 => { -// bitmap.0.count_ones() as usize -// + bitmap.1.count_ones() as usize -// + (bitmap.2 >> (n - 128)).count_ones() as usize -// - 1 -// } - -// n if n < 384 => { -// bitmap.0.count_ones() as usize + (bitmap.1 >> (n - 256)).count_ones() as usize - 1 -// } - -// // if we move more than 384 bits to the right, all of bitmap. -// // [1,2,3] will be shifted out of sight, so we only have to count -// // bitmap.0 zeroes then (after shifting of course). -// n => (bitmap.0 >> (n - 384)).count_ones() as usize - 1, -// } -// } - -// fn get_ptr_index(bitmap: Self::PtrSize, nibble: u32) -> usize { -// let n = (512 >> 1) - nibble as u16 - 1; -// match n { -// // if we move less than 256 bits to the right, all of bitmap.0 -// // and a part of bitmap.1 will be used for counting zeros -// // ex. -// // ...1011_1010... >> 2 => ...0010_111010... -// // ____ ==== -- --==== -// n if n < 128 => { -// bitmap.0.count_ones() as usize + (bitmap.1 >> n).count_ones() as usize - 1 -// } -// // if we move more than 256 bits to the right, all of bitmap.1 -// // wil be shifted out of sight, so we only have to count bitmap.0 -// // zeroes than (after) shifting of course). -// n => (bitmap.0 >> (n - 128)).count_ones() as usize - 1, -// } -// } - -// fn into_stride_size(bitmap: Self::PtrSize) -> Self { -// // One bit needs to move into the self.0 u128, -// // since the last bit of the *whole* bitmap isn't used. -// U512( -// 0, -// bitmap.0 >> 127, -// (bitmap.0 << 1) | (bitmap.1 >> 127), -// bitmap.1 << 1, -// ) -// } - -// fn into_ptrbitarr_size(bitmap: Self) -> Self::PtrSize { -// // TODO expand: -// // self.ptrbitarr = -// // S::into_ptrbitarr_size(bit_pos | S::into_stride_size(self.ptrbitarr)); -// U256( -// (bitmap.1 << 127 | bitmap.2 >> 1) as u128, -// (bitmap.2 << 127 | bitmap.3 >> 1) as u128, -// ) -// } - -// #[inline] -// fn leading_zeros(self) -> u32 { -// let mut lz = self.0.leading_zeros(); -// if lz == 128 { -// lz += self.1.leading_zeros(); -// if lz == 256 { -// lz += self.2.leading_zeros(); -// if lz == 384 { -// lz += self.3.leading_zeros(); -// } -// } -// } -// lz -// } -// } diff --git a/src/local_array/bit_span.rs b/src/local_array/bit_span.rs deleted file mode 100644 index 480281dd..00000000 --- a/src/local_array/bit_span.rs +++ /dev/null @@ -1,33 +0,0 @@ -#[derive(Copy, Clone, Debug)] -pub(crate) struct BitSpan { - pub bits: u32, - pub len: u8 -} - -impl BitSpan { - pub(crate) fn new(bits: u32, len: u8) -> Self { - Self { - bits, - len - } - } - - // Increment the bit span by one and calculate the new length. - #[allow(dead_code)] - pub(crate) fn inc(&mut self) { - self.bits += 1; - self.len = ::max(self.len, (32 - self.bits.leading_zeros()) as u8); - } - - #[allow(dead_code)] - pub(crate) fn set_len_to_bits(&mut self) { - self.len = ::max(self.len, (32 - self.bits.leading_zeros()) as u8); - } - -} - -impl std::fmt::Binary for BitSpan { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:032b} (len {})", self.bits, self.len) - } -} \ No newline at end of file diff --git a/src/local_array/macros.rs b/src/local_array/macros.rs deleted file mode 100644 index 8ef26c66..00000000 --- a/src/local_array/macros.rs +++ /dev/null @@ -1,198 +0,0 @@ -#[macro_export] -// This macro expands into a match node {} -// with match arms for all SizedStrideNode::Stride[3-8] -// for use in insert() -#[doc(hidden)] -macro_rules! insert_match { - ( - $self: ident; - $guard: ident; - $nibble_len: expr; - $nibble: expr; // nibble is a variable-length bitarray (1,2,4,8,etc) - $is_last_stride: expr; - $pfx: ident; // the whole search prefix - $record: ident; // the record holding the metadata - $update_path_selections: ident; // boolean indicate whether to update the path selections for this route - $truncate_len: ident; // the start of the length of this stride - $stride_len: ident; // the length of this stride - $cur_i: expr; // the id of the current node in this stride - $level: expr; - $acc_retry_count: expr; - // $enum: ident; - // The strides to generate match arms for, - // $variant is the name of the enum varian (Stride[3..8]) and - // $len is the index of the stats level, so 0..5 - $( $variant: ident; $stats_level: expr ), * - ) => { - // Look up the current node in the store. This should never fail, - // since we're starting at the root node and retrieve that. If a node - // does not exist, it is created here. BUT, BUT, in a multi-threaded - // context, one thread creating the node may be outpaced by a thread - // reading the same node. Because the creation of a node actually - // consists of two independent atomic operations (first setting the - // right bit in the parent bitarray, second storing the node in the - // store with the meta-data), a thread creating a new node may have - // altered the parent bitarray, but not it didn't create the node - // in the store yet. The reading thread, however, saw the bit in the - // parent and wants to read the node in the store, but that doesn't - // exist yet. In that case, the reader thread needs to try again - // until it is actually created - - // This macro counts the number of retries and adds that to the - // $acc_retry_count variable, to be used by the incorporating - // function. - { - // this counts the number of retry_count for this loop only, - // but ultimately we will return the accumulated count of all - // retry_count from this macro. - let local_retry_count = 0; - // retrieve_node_mut updates the bitmap index if necessary. - if let Some(current_node) = $self.store.retrieve_node_mut($cur_i, $record.multi_uniq_id) { - match current_node { - $( - SizedStrideRef::$variant(current_node) => { - // eval_node_or_prefix_at mutates the node to reflect changes - // in the ptrbitarr & pfxbitarr. - match current_node.eval_node_or_prefix_at( - $nibble, - $nibble_len, - // All the bits of the search prefix, but with a length set to - // the start of the current stride. - StrideNodeId::dangerously_new_with_id_as_is($pfx.get_net(), $truncate_len), - // the length of THIS stride - $stride_len, - // the length of the next stride - $self.store.get_stride_sizes().get(($level + 1) as usize), - $is_last_stride, - ) { - (NewNodeOrIndex::NewNode(n), retry_count) => { - // Stride3 logs to stats[0], Stride4 logs to stats[1], etc. - // $self.stats[$stats_level].inc($level); - - // get a new identifier for the node we're going to create. - let new_id = $self.store.acquire_new_node_id(($pfx.get_net(), $truncate_len + $nibble_len)); - - // store the new node in the global - // store. It returns the created id - // and the number of retries before - // success. - match $self.store.store_node(new_id, $record.multi_uniq_id, n) { - Ok((node_id, s_retry_count)) => { - Ok((node_id, $acc_retry_count + s_retry_count + retry_count)) - }, - Err(err) => { - Err(err) - } - } - } - (NewNodeOrIndex::ExistingNode(node_id), retry_count) => { - // $self.store.update_node($cur_i,SizedStrideRefMut::$variant(current_node)); - if log_enabled!(log::Level::Trace) { - if local_retry_count > 0 { - trace!("{} contention: Node already exists {}", - std::thread::current().name().unwrap_or("unnamed-thread"), node_id - ) - } - } - Ok((node_id, $acc_retry_count + local_retry_count + retry_count)) - }, - (NewNodeOrIndex::NewPrefix, retry_count) => { - return $self.store.upsert_prefix($pfx, $record, $update_path_selections, $guard) - .and_then(|mut r| { - r.cas_count += $acc_retry_count as usize + local_retry_count as usize + retry_count as usize; - Ok(r) - }) - // Log - // $self.stats[$stats_level].inc_prefix_count($level); - } - (NewNodeOrIndex::ExistingPrefix, retry_count) => { - return $self.store.upsert_prefix($pfx, $record, $update_path_selections, $guard) - .and_then(|mut r| { - r.cas_count += $acc_retry_count as usize + local_retry_count as usize + retry_count as usize; - Ok(r) - }) - } - } // end of eval_node_or_prefix_at - } - )*, - } - } else { - Err(PrefixStoreError::NodeCreationMaxRetryError) - } - } - } -} - -#[macro_export] -// This macro only works for stride with bitmaps that are <= u128, -// the ones with synthetic integers (U256, U512) don't have the trait -// implementations for left|right shift, counting ones etc. -#[doc(hidden)] -macro_rules! impl_primitive_atomic_stride { - ( - $( - $len: expr; - $bits: expr; - $pfxsize: ty; - $atomicpfxsize: ty; - $ptrsize: ty; - $atomicptrsize: ty - ), - *) => { - $( - impl Stride for $pfxsize { - type AtomicPfxSize = $atomicpfxsize; - type AtomicPtrSize = $atomicptrsize; - type PtrSize = $ptrsize; - const BITS: u8 = $bits; - const STRIDE_LEN: u8 = $len; - - fn get_bit_pos(nibble: u32, len: u8) -> $pfxsize { - trace!("nibble {}, len {}, BITS {}", nibble, len, ::BITS); - 1 << ( - ::BITS - ((1 << len) - 1) as u8 - - nibble as u8 - 1 - ) - } - - fn get_bit_pos_as_u8(nibble: u32, len: u8) -> u8 { - 1 << ( - ::BITS - ((1 << len) - 1) as u8 - - nibble as u8 - 1 - ) - } - - fn get_pfx_index(nibble: u32, len: u8) - -> usize { - (Self::get_bit_pos(nibble, len).leading_zeros() - 1) as usize - - } - - fn get_ptr_index(_bitmap: $ptrsize, nibble: u32) -> usize { - (nibble as u16).into() - } - - fn into_node_id( - addr_bits: AF, - len: u8 - ) -> $crate::local_array::node::StrideNodeId { - let id = $crate::local_array::node::StrideNodeId::new_with_cleaned_id(addr_bits, len); - id - } - - fn into_stride_size(bitmap: $ptrsize) -> $pfxsize { - bitmap as $pfxsize << 1 - } - - fn into_ptrbitarr_size(bitmap: $pfxsize) -> $ptrsize { - (bitmap >> 1) as $ptrsize - } - - #[inline] - fn leading_zeros(self) -> u32 { - self.leading_zeros() - } - } - )* - }; -} diff --git a/src/local_array/mod.rs b/src/local_array/mod.rs deleted file mode 100644 index 812c15c6..00000000 --- a/src/local_array/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -mod atomic_stride; -pub(crate) mod bit_span; -pub(crate) mod node; -pub(crate) mod query; -mod tests; -pub(crate) mod tree; - -pub mod store; - -#[macro_use] -mod macros; diff --git a/src/local_array/node.rs b/src/local_array/node.rs deleted file mode 100644 index 88ebfd06..00000000 --- a/src/local_array/node.rs +++ /dev/null @@ -1,1087 +0,0 @@ -use std::sync::atomic::{AtomicU16, AtomicU32, AtomicU64, AtomicU8}; -use std::{ - fmt::Debug, - marker::PhantomData, -}; - -use log::trace; -use parking_lot_core::SpinWait; - -// pub use super::atomic_stride::*; -use super::bit_span::BitSpan; -use super::store::iterators::SizedNodeMoreSpecificIter; -use crate::local_array::store::iterators::SizedPrefixIter; - -pub use crate::local_array::tree::*; -use crate::af::Zero; -use crate::af::AddressFamily; - -//------------ TreeBitMap Node ---------------------------------------------- - -// The treebitmap turned into a "trie-bitmap", really. A Node in the -// treebitmap now only holds a ptrbitarr bitmap and a pfxbitarr bitmap, that -// indicate whether a node or a prefix exists in that spot. The corresponding -// node Ids and prefix ids are calculated from their position in the array. -// Nodes do *NOT* have a clue where they are in the tree, so they don't know -// the node id they represent. Instead, the node id is calculated from the -// position in the tree. That's why several methods take a `base_prefix` as a -// an argument: it represents the ID of the node itself. -// -// The elision of both the collection of children nodes and the prefix nodes -// in a treebitmap node is enabled by the storage backend for the -// multi-threaded store, since holds its entries keyed on the [node|prefix] -// id. (in contrast with arrays or `vec`s, that have -pub struct TreeBitMapNode< - AF, - S, -> where - Self: Sized, - S: Stride, - AF: AddressFamily, -{ - pub ptrbitarr: ::AtomicPtrSize, - pub pfxbitarr: ::AtomicPfxSize, - pub _af: PhantomData, -} - -impl Debug - for TreeBitMapNode -where - AF: AddressFamily, - S: Stride, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TreeBitMapNode") - .field("ptrbitarr", &self.ptrbitarr.load()) - .field("pfxbitarr", &self.pfxbitarr.load()) - .finish() - } -} - -impl - std::fmt::Display for TreeBitMapNode -where - AF: AddressFamily, - S: Stride -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "TreeBitMapNode {{ ptrbitarr: {:?}, pfxbitarr: {:?} }}", - self.ptrbitarr.load(), - self.pfxbitarr.load(), - ) - } -} - -impl - TreeBitMapNode -where - AF: AddressFamily, - S: Stride -{ - pub(crate) fn new() -> Self { - TreeBitMapNode { - ptrbitarr: <::AtomicPtrSize as AtomicBitmap>::new(), - pfxbitarr: <::AtomicPfxSize as AtomicBitmap>::new(), - _af: PhantomData - } - } - - // ------- Iterators ---------------------------------------------------- - - // Iterate over all the child node of this node - pub(crate) fn ptr_iter(&self, base_prefix: StrideNodeId) -> - NodeChildIter { - NodeChildIter:: { - base_prefix, - ptrbitarr: self.ptrbitarr.load(), - bit_span: BitSpan::new(0, 1), - _af: PhantomData, - } - } - - // Iterate over all the prefix ids contained in this node. - // Note that this function is *not* used by the iterator that iterates - // over all prefixes. That one doesn't have to use the tree at all, but - // uses the store directly. - pub(crate) fn pfx_iter(&self, base_prefix: StrideNodeId) -> - NodePrefixIter { - NodePrefixIter:: { - pfxbitarr: self.pfxbitarr.load(), - base_prefix, - bit_span: BitSpan::new(0,1 ), - _af: PhantomData, - _s: PhantomData, - } - } - - // Iterate over the more specific prefixes ids contained in this node - pub(crate) fn more_specific_pfx_iter(&self, base_prefix: StrideNodeId, - start_bit_span: BitSpan, skip_self: bool) -> - NodeMoreSpecificsPrefixIter { - NodeMoreSpecificsPrefixIter:: { - pfxbitarr: self.pfxbitarr.load(), - base_prefix, - start_bit_span, - cursor: start_bit_span, - skip_self, - _s: PhantomData, - } - } - - // Iterate over the nodes that contain more specifics for the requested - // base_prefix and corresponding bit_span. - pub(crate) fn more_specific_ptr_iter(&self, base_prefix: StrideNodeId, - start_bit_span: BitSpan) -> - NodeMoreSpecificChildIter { - NodeMoreSpecificChildIter:: { - ptrbitarr: self.ptrbitarr.load(), - base_prefix, - start_bit_span, - cursor: None, - } - } - - - // ------- Search by Traversal methods ----------------------------------- - - // Inspects the stride (nibble, nibble_len) to see it there's already a - // child node (if not at the last stride) or a prefix (if it's the last - // stride). - // - // Returns a tuple of which the first element is one of: - // - A newly created child node. - // - The index of the existing child node in the global `nodes` vec - // - A newly created Prefix - // - The index of the existing prefix in the global `prefixes` vec - // and the second element is the number of accumulated retries for the - // compare_exchange of both ptrbitarr and pfxbitarr. - pub(crate) fn eval_node_or_prefix_at( - &self, - nibble: u32, - nibble_len: u8, - // all the bits of the search prefix, but with the length set to - // the length of this stride. So bits are set beyond its length. - base_prefix: StrideNodeId, - stride_len: u8, - next_stride: Option<&u8>, - is_last_stride: bool, - ) -> (NewNodeOrIndex, u32) { - - // THE CRITICAL SECTION - // - // UPDATING ptrbitarr & pfxbitarr - // - // This section is not as critical as creating/updating a - // a prefix. We need to set one bit only, and if somebody - // beat us to it that's fine, we'll figure that out when - // we try to write the prefix's serial number later on. - // The one thing that can go wrong here is that we are - // using an old ptrbitarr and overwrite bits set in the - // meantime elsewhere in the bitarray. - let mut retry_count = 0; - let ptrbitarr = self.ptrbitarr.load(); - let pfxbitarr = self.pfxbitarr.load(); - let bit_pos = S::get_bit_pos(nibble, nibble_len); - let new_node: SizedStrideNode; - - // Check that we're not at the last stride (pfx.len <= stride_end), - // Note that next_stride may have a value, but we still don't want to - // continue, because we've exceeded the length of the prefix to - // be inserted. - // Also note that a nibble_len < S::BITS (a smaller than full nibble) - // does indeed indicate the last stride has been reached, but the - // reverse is *not* true, i.e. a full nibble can also be the last - // stride. Hence the `is_last_stride` argument - if !is_last_stride { - - // We are not at the last stride - // Check it the ptr bit is already set in this position - if (S::into_stride_size(ptrbitarr) & bit_pos) == - <<::AtomicPfxSize as AtomicBitmap>::InnerType>::zero() { - // Nope, set it and create a child node - - match next_stride.unwrap() { - 3_u8 => { - new_node = SizedStrideNode::Stride3(TreeBitMapNode { - ptrbitarr: AtomicStride2(AtomicU8::new(0)), - pfxbitarr: AtomicStride3(AtomicU16::new(0)), - // pfx_vec: PrefixSet::empty(14), - _af: PhantomData, - }); - } - 4_u8 => { - new_node = SizedStrideNode::Stride4(TreeBitMapNode { - ptrbitarr: AtomicStride3(AtomicU16::new(0)), - pfxbitarr: AtomicStride4(AtomicU32::new(0)), - // pfx_vec: PrefixSet::empty(30), - _af: PhantomData, - }); - } - 5_u8 => { - new_node = SizedStrideNode::Stride5(TreeBitMapNode { - ptrbitarr: AtomicStride4(AtomicU32::new(0)), - pfxbitarr: AtomicStride5(AtomicU64::new(0)), - // pfx_vec: PrefixSet::empty(62), - _af: PhantomData, - }); - } - _ => { - panic!("can't happen"); - } - }; - - // THE CRITICAL SECTION - // - // UPDATING pfxbitarr - // - // preventing using an old ptrbitarr and overwrite bits set - // in the meantime elsewhere in the bitarray. - let mut a_ptrbitarr = self.ptrbitarr.compare_exchange(ptrbitarr, - S::into_ptrbitarr_size( - bit_pos | S::into_stride_size(ptrbitarr), - )); - let mut spinwait = SpinWait::new(); - loop { - match a_ptrbitarr { - CasResult(Ok(_)) => { - break; - } - CasResult(Err(newer_array)) => { - // Someone beat us to it, so we need to use the - // newer array. - retry_count += 1; - a_ptrbitarr = self.ptrbitarr.compare_exchange(newer_array, - S::into_ptrbitarr_size( - bit_pos | S::into_stride_size(newer_array), - )); - } - }; - spinwait.spin_no_yield(); - } - - return (NewNodeOrIndex::NewNode( - new_node - ), retry_count); - } - } else { - // only at the last stride do we create the bit in the prefix - // bitmap, and only if it doesn't exist already - if pfxbitarr & bit_pos - == <<::AtomicPfxSize as AtomicBitmap>::InnerType as std::ops::BitAnd>::Output::zero() - { - - // THE CRITICAL SECTION - // - // UPDATING pfxbitarr - // - // preventing using an old pfxbitarr and overwrite bits set - // in the meantime elsewhere in the bitarray. - let mut a_pfxbitarr = - self.pfxbitarr.compare_exchange( - pfxbitarr, bit_pos | pfxbitarr - ); - let mut spinwait = SpinWait::new(); - - loop { - match a_pfxbitarr { - CasResult(Ok(_)) => { - break; - } - CasResult(Err(newer_array)) => { - // Someone beat us to it, so we need to use the - // newer array. - retry_count += 1; - a_pfxbitarr = self.pfxbitarr.compare_exchange( - newer_array, bit_pos | newer_array - ); - } - }; - spinwait.spin_no_yield(); - } - - return (NewNodeOrIndex::NewPrefix, retry_count); - } - return (NewNodeOrIndex::ExistingPrefix, retry_count); - } - - // Nodes always live at the last length of a stride (i.e. the last - // nibble), so we add the stride length to the length of the - // base_prefix (which is always the start length of the stride). - (NewNodeOrIndex::ExistingNode( - base_prefix.add_to_len(stride_len).truncate_to_len() - ), retry_count) - } - - //-------- Search nibble functions -------------------------------------- - - // This function looks for the longest marching prefix in the provided - // nibble, by iterating over all the bits in it and comparing that with - // the appropriate bytes from the requested prefix. It mutates the - // `less_specifics_vec` that was passed in to hold all the prefixes found - // along the way. - pub(crate) fn search_stride_for_longest_match_at( - &self, - search_pfx: PrefixId, - mut nibble: u32, - nibble_len: u8, - start_bit: u8, - less_specifics_vec: &mut Option>>, - ) -> (Option>, Option>) { - let pfxbitarr = self.pfxbitarr.load(); - let ptrbitarr = self.ptrbitarr.load(); - let mut bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_pfx = None; - - trace!("start longest_match search"); - for n_l in 1..(nibble_len + 1) { - // Move the bit in the right position. - nibble = - AddressFamily::get_nibble(search_pfx.get_net(), start_bit, n_l); - bit_pos = S::get_bit_pos(nibble, n_l); - - // Check if the prefix has been set, if so select the prefix. - // This is not necessarily the final prefix that will be - // returned. - - // Check it there's a prefix matching in this bitmap for this - // nibble. - trace!("pfxbitarr {:032b}", pfxbitarr); - - if pfxbitarr & bit_pos > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() { - let f_pfx = PrefixId::new(search_pfx.get_net().truncate_to_len(start_bit + n_l), start_bit + n_l); - // f_pfx.set_serial(self.get_pfx_serial(f_pfx, nibble, n_l, guard).load(Ordering::Relaxed)); - - // Receiving a less_specifics_vec means that the user wants - // to have all the last-specific prefixes returned, so add - // the found prefix. - trace!("gather pfx in less_specifics {:?}", f_pfx); - trace!("ls_vec {:?}", less_specifics_vec); - if let Some(ls_vec) = less_specifics_vec { - trace!("len {}", search_pfx.get_len()); - trace!("start_bit {}", start_bit); - trace!("n_l {}", n_l); - trace!("smaller length? {}", search_pfx.get_len() > start_bit + n_l); - trace!("{}", (S::into_stride_size(ptrbitarr) - & bit_pos) - == <::AtomicPfxSize as AtomicBitmap>::InnerType::zero()); - if search_pfx.get_len() > start_bit + n_l - && (S::into_stride_size(ptrbitarr) - & bit_pos) - == <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() - { - ls_vec.push(f_pfx); - } - } - - found_pfx = Some(f_pfx); - } - } - - let base_prefix = - StrideNodeId::new_with_cleaned_id(search_pfx.get_net(), start_bit); - - // Check if this the last stride, or if they're no more children to - // go to, if so return what we found up until now. - if search_pfx.get_len() <= start_bit + nibble_len - || (S::into_stride_size(ptrbitarr) & bit_pos) == <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() - // No children or at the end, return the definitive LMP we found. - { - return ( - None, /* no more children */ - found_pfx, /* The definitive LMP if any */ - ); - } - - // There's another child, return it together with the preliminary LMP - // we found. - ( - // The identifier of the node that has children of the next - // stride. - Some(base_prefix.add_nibble(nibble, nibble_len)), - found_pfx, - ) - } - - // This function looks for the exactly matching prefix in the provided - // nibble. It doesn't need to iterate over anything it just compares - // the complete nibble, with the appropriate bits in the requested - // prefix. Although this is rather efficient, there's no way to collect - // less-specific prefixes from the search prefix. - pub(crate) fn search_stride_for_exact_match_at( - &'_ self, - search_pfx: PrefixId, - nibble: u32, - nibble_len: u8, - start_bit: u8, - _: &mut Option>>, - ) -> (Option>, Option>) { - let pfxbitarr = self.pfxbitarr.load(); - let ptrbitarr = self.ptrbitarr.load(); - // This is an exact match, so we're only considering the position of - // the full nibble. - let bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_pfx = None; - let mut found_child = None; - - // Is this the last nibble? - // Otherwise we're not looking for a prefix (exact matching only - // lives at last nibble) - match search_pfx.get_len() <= start_bit + nibble_len { - // We're at the last nibble. - true => { - // Check for an actual prefix at the right position, i.e. - // consider the complete nibble. - if pfxbitarr & bit_pos > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() { - let f_pfx = PrefixId::new(search_pfx.get_net().truncate_to_len(start_bit + nibble_len), start_bit + nibble_len); - found_pfx = Some(f_pfx); - } - } - // We're not at the last nibble. - false => { - // Check for a child node at the right position, i.e. - // consider the complete nibble. - if (S::into_stride_size(ptrbitarr) & bit_pos) > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() - { - found_child = Some( - StrideNodeId::new_with_cleaned_id(search_pfx.get_net(), start_bit + nibble_len) - ); - } - } - } - - ( - found_child, /* The node that has children in the next stride, if - any */ - found_pfx, /* The exactly matching prefix, if any */ - ) - } - - // This function looks for the exactly matching prefix in the provided - // nibble, just like the one above, but this *does* iterate over all the - // bytes in the nibble to collect the less-specific prefixes of the the - // search prefix. This is of course slower, so it should only be used - // when the user explicitly requests less-specifics. - pub(crate) fn search_stride_for_exact_match_with_less_specifics_at( - &self, - search_pfx: PrefixId, - mut nibble: u32, - nibble_len: u8, - start_bit: u8, - less_specifics_vec: &mut Option>>, - ) -> (Option>, Option>) { - let pfxbitarr = self.pfxbitarr.load(); - let ptrbitarr = self.ptrbitarr.load(); - let mut bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_pfx = None; - - let ls_vec = less_specifics_vec - .as_mut() - .expect(concat!("You shouldn't call this function without", - "a `less_specifics_vec` buffer. Supply one when calling this function", - "or use `search_stride_for_exact_match_at`")); - - for n_l in 1..(nibble_len + 1) { - // Move the bit in the right position. - nibble = - AddressFamily::get_nibble(search_pfx.get_net(), start_bit, n_l); - bit_pos = S::get_bit_pos(nibble, n_l); - - // Check if the prefix has been set, if so select the prefix. - // This is not necessarily the final prefix that will be - // returned. - - // Check it there's a prefix matching in this bitmap for this - // nibble. - if pfxbitarr & bit_pos > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() { - // since we want an exact match only, we will fill the prefix - // field only if we're exactly at the last bit of the nibble - if n_l == nibble_len { - let f_pfx = - PrefixId::new( - search_pfx.get_net().truncate_to_len(start_bit + n_l), start_bit + n_l); - found_pfx = Some(f_pfx); - } - - // Receiving a less_specifics_vec means that the user wants to - // have all the last-specific prefixes returned, so add the - // found prefix. - let f_pfx = PrefixId::new(search_pfx.get_net().truncate_to_len(start_bit + n_l), start_bit + n_l); - ls_vec.push(f_pfx); - } - } - - if found_pfx.is_none() { - // no prefix here, clear out all of the prefixes we found along - // the way, since it doesn't make sense to return less-specifics - // if we don't have a exact match. - ls_vec.clear(); - } - - // Check if this the last stride, or if they're no more children to - // go to, if so return what we found up until now. - match search_pfx.get_len() <= start_bit + nibble_len - || (S::into_stride_size(ptrbitarr) & bit_pos) - == <<::AtomicPfxSize as AtomicBitmap>::InnerType as std::ops::BitAnd>::Output::zero() - { - // No children or at the end, return the definitive LMP we found. - true => ( - None, /* no more children */ - found_pfx, /* The definitive LMP if any */ - ), - // There's another child, we won't return the found_pfx, since - // we're not at the last nibble and we want an exact match only. - false => ( - Some(StrideNodeId::new_with_cleaned_id(search_pfx.get_net(), start_bit + nibble_len)), - None, - ), - } - } - - // Search a stride for more-specific prefixes and child nodes containing - // more specifics for `search_prefix`. - pub(crate) fn add_more_specifics_at( - &self, - nibble: u32, - nibble_len: u8, - base_prefix: StrideNodeId, - ) -> ( - Vec>, /* child nodes with more more-specifics in - this stride */ - Vec>, /* more-specific prefixes in this stride */ - ) { - trace!("start adding more specifics"); - let pfxbitarr = self.pfxbitarr.load(); - let ptrbitarr = self.ptrbitarr.load(); - trace!("ptrbitarr {:032b}", ptrbitarr); - trace!("pfxbitarr {:032b}", pfxbitarr); - let mut found_children_with_more_specifics = vec![]; - let mut found_more_specifics_vec: Vec> = vec![]; - - // This is an exact match, so we're only considering the position of - // the full nibble. - let mut bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_child = None; - - // Is there also a child node here? - // Note that even without a child node, there may be more specifics - // further up in this pfxbitarr or children in this ptrbitarr. - if (S::into_stride_size(ptrbitarr) & bit_pos) - > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero( - ) - { - found_child = Some(base_prefix.add_nibble(nibble, nibble_len)); - } - - if let Some(child) = found_child { - found_children_with_more_specifics.push(child); - } - - // We're expanding the search for more-specifics bit-by-bit. - // `ms_nibble_len` is the number of bits including the original - // nibble we're considering, e.g. if our prefix has a length of 25 - // and we've all strides sized 4, we would end up with a last - // nibble_len of 1. `ms_nibble_len` will expand then from 2 up and - // till 4. - // - // ex.: - // nibble: 1 , (nibble_len: 1) - // Iteration: - // ms_nibble_len=1,n_l=0: 10, n_l=1: 11 - // ms_nibble_len=2,n_l=0: 100, n_l=1: 101, n_l=2: 110, n_l=3: 111 - // ms_nibble_len=3,n_l=0: 1000, n_l=1: 1001, n_l=2: 1010, ..., - // n_l=7: 1111 - - for ms_nibble_len in nibble_len + 1..=S::STRIDE_LEN { - // iterate over all the possible values for this `ms_nibble_len`, - // e.g. two bits can have 4 different values. - for n_l in 0..(1 << (ms_nibble_len - nibble_len)) { - // move the nibble left with the amount of bits we're going - // to loop over. e.g. a stride of size 4 with a nibble 0000 - // 0000 0000 0011 becomes 0000 0000 0000 1100, then it will - // iterate over ...1100,...1101,...1110,...1111 - let ms_nibble = - (nibble << (ms_nibble_len - nibble_len)) + n_l as u32; - bit_pos = S::get_bit_pos(ms_nibble, ms_nibble_len); - - if (S::into_stride_size(ptrbitarr) & bit_pos) > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() - { - found_children_with_more_specifics.push( - base_prefix.add_nibble(ms_nibble, ms_nibble_len) - ); - } - - if pfxbitarr & bit_pos > <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() { - found_more_specifics_vec.push( - base_prefix.add_nibble(ms_nibble, ms_nibble_len).into() ) - } - } - } - - trace!("found_children_with_more_specifics {:?}", found_children_with_more_specifics); - trace!("found_more_specifics_vec {:?}", found_more_specifics_vec); - - ( - // We're done here, the caller should now go over all nodes in - // found_children_with_more_specifics vec and add ALL prefixes - // found in there. - found_children_with_more_specifics, - found_more_specifics_vec, - ) - } -} - - -// ------------ Iterator methods -------------------------------------------- - -// ----------- NodeChildIter ------------------------------------------------ - -// create an iterator over all child nodes id -// -// we don't have a collection of local nodes anymore, since the id of the -// node are deterministically generated, as the prefix+len they represent -// in the treebitmap. This has both the advantage of using less memory, -// and being easier to use in a concurrently updated tree. The -// disadvantage is that we have to look up the child nodes on the fly -// when we want to iterate over all children of a node. -// -// ptr child nodes only exist at the last nibble of the stride size -// (`child_len`). Since children in the first nibbles are leaf nodes. -// leaf nodes will only be prefixes. So if we have a first stride of -// size 5, all ptr nodes wil have StrideNodeIds with len = 5. -// -// Ex.: -// -// Stride no. 1 2 3 4 5 6 7 -// StrideSize 5 5 4 3 3 3 3 -// child pfxs len /1-5 /5-10 /10-14 /15-17 /18-20 /21-23 /24-26 -// child Nodes len /5 /10 /14 /17 /20 /23 /26 -// -// Stride no. 8 9 -// StrideSize 3 3 -// child pfxs len /27-29 /30-32 -// child Nodes len /29 /32 - -#[derive(Debug, Copy, Clone)] -pub(crate) struct NodeChildIter { - base_prefix: StrideNodeId, - ptrbitarr: <::AtomicPtrSize as AtomicBitmap>::InnerType, - bit_span: BitSpan, // start with 0 - _af: PhantomData, -} - -impl std::iter::Iterator for - NodeChildIter -{ - type Item = StrideNodeId; - fn next(&mut self) -> Option { - // iterate over all the possible values for this stride length, e.g. - // two bits can have 4 different values. - for cursor in self.bit_span.bits..(1 << S::STRIDE_LEN) { - // move the bit_span left with the amount of bits we're going to - // loop over. - // e.g. a stride of size 4 with a nibble 0000 0000 0000 0011 - // becomes 0000 0000 0000 1100, then it will iterate over - // ...1100,...1101,...1110,...1111 - let bit_pos = S::get_bit_pos(cursor, S::STRIDE_LEN); - if (S::into_stride_size(self.ptrbitarr) & bit_pos) > - <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() - { - self.bit_span.bits = cursor + 1; - return Some(self.base_prefix.add_nibble(cursor, S::STRIDE_LEN)); - } - - } - None - } -} - -// ----------- NodeMoreSpecificChildIter ------------------------------------ - -// Create an iterator over all the child nodes that hold a more specific -// prefixes of the specified start_bit_span. This basically the same Iterator -// as the ChildNodeIter, except that it stops (potentially) earlier, to avoid -// including nodes with adjacent prefixes. Starting an iterator with a -// `start_bit_span` of { bits: 0, len: 0 } will return all child nodes of -// this node. In that case you could also use the `NodeChildIter` instead. -// -// inputs -// -// `base_prefix` -// This iterator take a `base_prefix` since the nodes themselves have no -// knowledge of their own prefixes, those are inferred by their position in -// the tree (therefore, it's actually a Trie). Note that `base_prefix` + -// `bit_span` define the actual starting prefix for this iterator. -// -// `ptrbitarr` -// is the bitmap that holds the slots that have child nodes. -// -// `start_bit_span` -// is the bit span that is going to be used as a starting point for the -// iterator. -// -// `cursor` -// holds the current cursor offset from the start_bit_span.bits, the sum of -// these describe the current position in the bitmap. Used for re-entry into -// the iterator. A new iterator should start with None. -// -// How this works -// -// The iterator starts at the start_bit_span.bits position in the bitmap and -// advances until it reaches either a one in the bitmap, or the maximum -// position for the particular more-specifics for this bit_span. -// -// e.x. -// The stride size is 5 and the starting bit span is {bits: 2, len: 4} (0010) -// The starting point is therefore the bit_array 0010. The iterator will go -// over 0010 0 and 0010 1. The next bits to consider would be 0011 0 which -// would not fit our starting bit_span of 0010. So we have to stop after 2 -// iterations. This means that the number of iterations is determined by the -// difference between the number of bits in the stride size (5) and the the -// number of bits in the start_bit_span (4). The number of iterations in the -// above example is therefore 1 << (5 - 4) = 2. Remember that a ptrbitarr -// holds only one stride size (the largest for its stride size), so we're -// done now. -#[derive(Debug, Copy, Clone)] -pub(crate) struct NodeMoreSpecificChildIter { - base_prefix: StrideNodeId, - ptrbitarr: <::AtomicPtrSize as AtomicBitmap>::InnerType, - start_bit_span: BitSpan, - cursor: Option, -} - -impl std::iter::Iterator for - NodeMoreSpecificChildIter -{ - type Item = StrideNodeId; - fn next(&mut self) -> Option { - // Early exits - - // Empty bitmap - if self.ptrbitarr == <::AtomicPtrSize as AtomicBitmap>::InnerType::zero() { - trace!("empty ptrbitrarr. this iterator is done."); - return None; - } - - // Previous iteration incremented the cursor beyond the stride size. - if let Some(cursor) = self.cursor { - if cursor >= (1 << (S::STRIDE_LEN - self.start_bit_span.len)) { - trace!("cursor.bits >= (1 << (S::STRIDE_LEN - self.start_bit_span.len))"); - trace!("cursor: {}", cursor); - trace!("start_bit_span: {} {}", self.start_bit_span.bits, self.start_bit_span.len); - return None; - } - } - - // No early exits, we're in business. - trace!("NodeMoreSpecificChildIter"); - trace!("base_prefix {}", self.base_prefix); - trace!("stride_size {}", S::STRIDE_LEN); - trace!("start_bit_span bits {} len {} ", self.start_bit_span.bits, self.start_bit_span.len); - trace!("cursor bits {:?}", self.cursor); - trace!(" x1 4 8 12 16 20 24 28 32"); - trace!("ptrbitarr {:032b}", self.ptrbitarr); - - let start = if let Some(bits) = self.cursor { bits } else { self.start_bit_span.bits }; - // We either stop if we have reached the maximum number of bits that - // we should check for this bit_span or we stop at the end of the - // stride (in case of a start_bit_span.bits == 0). - let stop = ::min((1 << (S::STRIDE_LEN - self.start_bit_span.len)) + start, (1 << S::STRIDE_LEN) - 1); - - trace!("start {:?} stop {}", start, stop); - for cursor in start..=stop { - // move the bit_span left with the amount of bits we're going to loop over. - // e.g. a stride of size 4 with a nibble 0000 0000 0000 0011 - // becomes 0000 0000 0000 1100, then it will iterate over - // ...1100,...1101,...1110,...1111 - let bit_pos = - S::get_bit_pos( - cursor, S::STRIDE_LEN); - trace!("cmpbitarr x{:032b} {}", bit_pos, stop - start); - if (S::into_stride_size(self.ptrbitarr) & bit_pos) > - <::AtomicPfxSize as AtomicBitmap - >::InnerType::zero() - { - trace!("bingo!"); - self.cursor = Some(cursor + 1); - - trace!("next bit_span {} {} with cursor {:?}", self.start_bit_span.bits, self.start_bit_span.len, self.cursor); - return Some(self.base_prefix.add_nibble(cursor, S::STRIDE_LEN)); - } - } - trace!("No more nodes. End of the iterator."); - None - } -} - -impl NodeMoreSpecificChildIter { - pub fn wrap(self) -> SizedNodeMoreSpecificIter { - SizedNodeMoreSpecificIter::::Stride3(self) - } -} - -impl NodeMoreSpecificChildIter { - pub fn wrap(self) -> SizedNodeMoreSpecificIter { - SizedNodeMoreSpecificIter::::Stride4(self) - } -} - -impl NodeMoreSpecificChildIter { - pub fn wrap(self) -> SizedNodeMoreSpecificIter { - SizedNodeMoreSpecificIter::::Stride5(self) - } -} - - -// ----------- NodePrefixIter ----------------------------------------------- - -// Create an iterator of all prefix ids hosted by this node. - -// Partition for stride 3 -// -// pfxbitarr (AF::BITS) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 -// bit_span (binary) * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 * * -// bit_span (dec.) * 0 1 0 1 2 3 0 1 2 3 4 5 6 7 * * -// len 0 1 2 3 -// -// pfxbitarr (example) 1 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 -// pos (example) 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 -// -// Ex.: -// `pos` describes the bit that is currently under consideration. -// -// `pfxbitarr` is the bitmap that contains the prefixes. Every 1 in the -// bitmap means that the prefix is hosted by this node. Moreover, the -// position in the bitmap describes the address part of the prefix, given -// a `base prefix`. The described prefix is the bits of the `base_prefix` -// bitmap appended by the `bit span` bits. -// -// The length of the prefix is -// described by sum of the length of the base_prefix and the `len` -// variable. -// -// The `bit_span` variable starts counting at every new prefix length. -pub(crate) struct NodePrefixIter { - base_prefix: StrideNodeId, - pfxbitarr: <::AtomicPfxSize as AtomicBitmap>::InnerType, - bit_span: BitSpan, // start with 0 - _af: PhantomData, - _s: PhantomData, -} - -impl std::iter::Iterator for - NodePrefixIter { - type Item = PrefixId; - - fn next(&mut self) -> Option { - // iterate over all the possible values for this stride length, e.g. - // two bits can have 4 different values. - for cursor in self.bit_span.bits..(1 << S::STRIDE_LEN) { - - let bit_pos = S::get_bit_pos(cursor, S::STRIDE_LEN); - if self.pfxbitarr & bit_pos > - <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() - { - self.bit_span.bits = cursor + 1; - return Some(self.base_prefix.add_nibble(cursor, S::STRIDE_LEN).into()); - } - - } - None - } -} - -// Creates an Iterator that returns all prefixes that exist in a node that -// are a more-specific prefix of the `base_prefix` + `start_bit_span`. -// -// Inputs -// -// `base_prefix` -// This iterator take a `base_prefix` since the nodes themselves have no -// knowledge of their own prefixes, those are inferred by their position in -// the tree (therefore, it's actually a Trie). Note that `base_prefix` + -// `bit_span` define the actual starting prefix for this iterator. -// -// `pfxbitarr` -// is the bitmap that holds the slots that have prefixes. -// -// `start_bit_span` -// is the bit span that is going to be used as a starting point for the -// iterator. -// -// `cursor` -// holds the current cursor offset from the start_bit_span.bits, the sum of -// these describe the current position in the bitmap. Used for re-entry into -// the iterator. A new iterator should start with None. -// -// How this works -// -// The iterator starts at the start_bit_span.bits position in the bitmap and -// advances until it reaches either a one in the bitmap, or the maximum -// position for the particular more-specifics for this bit_span. When it -// reaches the maximum position it determines whether there are more stride- -// sizes available in this bitmap. If there are, it advances to the next -// stride-size in the first position. If not it terminates the iterator. -// -// e.x. -// The stride size is 5 and the starting bit span is {bits: 1, len: 3} (001) -// This means that the stride size that we have to consider are 4 and 5. 3 -// being the size of the current bit_span and 5 being the size of the total -// stride. -// The starting point is therefore the bit_array 001. The iterator will go -// over 001 00, 001 01, 001 10 and 001 11. The next bits to consider would be -// 010 00 which would not fit our starting bit_span of 0010. So we have to -// stop after 2 iterations. This means that the number of iterations is -// determined by the difference between the number of bits in the stride size -// (5) and the the number of bits in the start_bit_span (4). The number of -// iterations in the above example is therefore 1 << (5 - 3) = 4. -// Unlike the MoreSpecificPrefixIter, we will have to consider more lengths -// than just the bit_span len. We will have to jump a few pfxbitarr bits and -// move to the next stride size in the bitmap, starting at bit_array 0010, or -// the bit_span { bits: 2, len: 3 }, a.k.a. 0010 << 1. But now we will have -// to go over a different amount of 1 << (5 - 4) = 2 iterations to reap the -// next bit_spans of 0010 0 and 0010 1. - -pub(crate) struct NodeMoreSpecificsPrefixIter { - // immutables - base_prefix: StrideNodeId, - pfxbitarr: <::AtomicPfxSize - as crate::local_array::atomic_stride::AtomicBitmap>::InnerType, - // we need to keep around only the `bits` part of the `bit_span` - // technically, (it needs resetting the current state to it after each - // prefix-length), but we'll keep the start-length as well for clarity - // and increment it on a different field ('cur_len'). - start_bit_span: BitSpan, - cursor: BitSpan, - skip_self: bool, - _s: PhantomData, -} - -impl std::iter::Iterator for - NodeMoreSpecificsPrefixIter { - type Item = PrefixId; - - fn next(&mut self) -> Option { - - // Easy early exit conditions - - // Empty bitmap - if self.pfxbitarr == <::AtomicPfxSize as AtomicBitmap>::InnerType::zero() { - trace!("empty pfxbitarr. This iterator is done."); - return None; - } - - // No early exits, We're in business. - trace!("len_offset {}", ((1<< self.cursor.len) - 1)); - trace!("start_bit {}", self.start_bit_span.bits); - trace!("number of check bits in len {}", - (1 << (self.cursor.len - self.start_bit_span.len))); - - trace!("next more specifics prefix iter start bits {} len {}", - self.start_bit_span.bits, self.start_bit_span.len); - - let mut res = None; - - // Move to the next len if we're at the first prefix-length that matches, - // if `skip_self` is set. Typically this flag is set for the first stride. - // In the consecutive strides, we don't want to skip the base prefix, since - // that base_prefix is a more specific prefix of the one requested in the - // first stride. - if self.skip_self && (1 << (self.cursor.len - self.start_bit_span.len)) == 1 { - // self.cursor.len += (self.start_bit_span.bits & 1) as u8; - trace!("skipping self"); - self.start_bit_span.bits <<= 1; - self.cursor.bits = self.start_bit_span.bits; - self.cursor.len += 1; - self.skip_self = false; - trace!("new start bits {} len {}", self.start_bit_span.bits, self.start_bit_span.len); - trace!("new cursor bits {} len {}", self.cursor.bits, self.cursor.len); - } - - // Previous iteration or the skip_self routine may have - // incremented the cursor beyond the end of the stride size. - if self.cursor.len > S::STRIDE_LEN { - trace!("cursor.len > S::STRIDE_LEN. This iterator is done."); - return None; - } - - loop { - trace!(" x1 4 8 12 16 20 24 28 32 36 40 44 48 52 56 60 64"); - trace!("cmpnibble {:064b} ({} + {}) len {} stride_size {}", - S::get_bit_pos(self.cursor.bits, self.cursor.len), - (1<< self.cursor.len) - 1, - self.cursor.bits, - self.cursor.len + self.base_prefix.get_len(), - S::STRIDE_LEN - ); - - trace!("pfxbitarr {:064b}", self.pfxbitarr); - - if (S::get_bit_pos(self.cursor.bits, self.cursor.len) | self.pfxbitarr) == self.pfxbitarr { - trace!("found prefix with len {} at pos {} pfx len {}", - self.cursor.len, - self.cursor.bits, - self.base_prefix.get_len() + self.cursor.len, - ); - res = Some(self.base_prefix - .add_nibble(self.cursor.bits, self.cursor.len).into()); - trace!("found prefix {:?}", res); - } - - // Determine when we're at the end of the bits applicable to - // this combo of this start_bit_span. - // bitspan offset: - // self.start_bit_span.bits - // number of matches in this length: - // 1 << (self.cursor.len - self.start_bit_span.len) - let max_pos_offset = - self.start_bit_span.bits + - (1 << (self.cursor.len - self.start_bit_span.len)) - 1; - - trace!("max_pos_offset {} > cursor bit_pos {}?", max_pos_offset, self.cursor.bits); - trace!("number of check bits in len {}", (1 << (self.cursor.len - self.start_bit_span.len))); - - // case 1. At the beginning or inside a prefix-length. - if max_pos_offset > self.cursor.bits { - self.cursor.bits += 1; - } - // case 2. At the end of a prefix-lengths in this stride. - else if self.cursor.len < S::STRIDE_LEN { - trace!("move len to {}", self.cursor.len + 1); - self.start_bit_span.bits <<= 1; - self.cursor.bits = self.start_bit_span.bits; - self.cursor.len += 1; - } - // case 4. At the end of a prefix-length, but not at the end of the pfxbitarr. - else { - self.start_bit_span.bits <<= 1; - self.cursor.bits = self.start_bit_span.bits; - self.cursor.len += 1; - trace!("end of stride, next cursor bits {} len {}", self.cursor.bits, self.cursor.len); - return res; - } - - trace!("some res {:?}", res); - if res.is_some() { return res; } - } - } -} - -impl NodeMoreSpecificsPrefixIter { - pub fn wrap(self) -> SizedPrefixIter { - SizedPrefixIter::::Stride3(self) - } -} - -impl NodeMoreSpecificsPrefixIter { - pub fn wrap(self) -> SizedPrefixIter { - SizedPrefixIter::::Stride4(self) - } -} - -impl NodeMoreSpecificsPrefixIter { - pub fn wrap(self) -> SizedPrefixIter { - SizedPrefixIter::Stride5(self) - } -} diff --git a/src/local_array/query.rs b/src/local_array/query.rs deleted file mode 100644 index 8411a04f..00000000 --- a/src/local_array/query.rs +++ /dev/null @@ -1,816 +0,0 @@ -use std::sync::atomic::Ordering; - -use crossbeam_epoch::{self as epoch}; -use epoch::Guard; - -use crate::af::AddressFamily; -use crate::local_array::store::atomic_types::{NodeBuckets, PrefixBuckets}; -use crate::prefix_record::{Meta, PublicRecord}; -use inetnum::addr::Prefix; - -use crate::QueryResult; - -use crate::local_array::node::TreeBitMapNode; -use crate::local_array::tree::TreeBitMap; -use crate::{MatchOptions, MatchType}; - -use super::node::{PrefixId, SizedStrideRef, StrideNodeId}; -use super::store::atomic_types::{RouteStatus, StoredPrefix}; - -//------------ Prefix Matching ---------------------------------------------- - -impl<'a, AF, M, NB, PB> TreeBitMap -where - AF: AddressFamily, - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets, -{ - pub fn more_specifics_from( - &'a self, - prefix_id: PrefixId, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> QueryResult { - let result = self.store.non_recursive_retrieve_prefix(prefix_id); - let prefix = result.0; - let more_specifics_vec = self.store.more_specific_prefix_iter_from( - prefix_id, - mui, - include_withdrawn, - guard, - ); - - QueryResult { - prefix: if let Some(pfx) = prefix { - Prefix::new( - pfx.prefix.get_net().into_ipaddr(), - pfx.prefix.get_len(), - ) - .ok() - } else { - None - }, - prefix_meta: prefix - .map(|r| self.get_filtered_records(r, mui, guard)) - .unwrap_or_default(), - match_type: MatchType::EmptyMatch, - less_specifics: None, - more_specifics: Some(more_specifics_vec.collect()), - } - } - - pub fn less_specifics_from( - &'a self, - prefix_id: PrefixId, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> QueryResult { - let result = self.store.non_recursive_retrieve_prefix(prefix_id); - - let prefix = result.0; - let less_specifics_vec = result.1.map( - |(prefix_id, _level, _cur_set, _parents, _index)| { - self.store.less_specific_prefix_iter( - prefix_id, - mui, - include_withdrawn, - guard, - ) - }, - ); - - QueryResult { - prefix: if let Some(pfx) = prefix { - Prefix::new( - pfx.prefix.get_net().into_ipaddr(), - pfx.prefix.get_len(), - ) - .ok() - } else { - None - }, - prefix_meta: prefix - .map(|r| self.get_filtered_records(r, mui, guard)) - .unwrap_or_default(), - match_type: MatchType::EmptyMatch, - less_specifics: less_specifics_vec.map(|iter| iter.collect()), - more_specifics: None, - } - } - - pub fn more_specifics_iter_from( - &'a self, - prefix_id: PrefixId, - mui: Option, - include_withdrawn: bool, - guard: &'a Guard, - ) -> Result< - impl Iterator, Vec>)> + '_, - std::io::Error, - > { - Ok(self.store.more_specific_prefix_iter_from( - prefix_id, - mui, - include_withdrawn, - guard, - )) - } - - pub fn match_prefix_by_store_direct( - &'a self, - search_pfx: PrefixId, - options: &MatchOptions, - mui: Option, - guard: &'a Guard, - ) -> QueryResult { - // `non_recursive_retrieve_prefix` returns an exact match - // only, so no longest matching prefix! - let mut stored_prefix = - self.store.non_recursive_retrieve_prefix(search_pfx).0.map( - |pfx| { - ( - pfx.prefix, - if !options.include_withdrawn { - // Filter out all the withdrawn records, both with - // globally withdrawn muis, and with local statuses - // set to Withdrawn. - self.get_filtered_records(pfx, mui, guard) - .into_iter() - .collect() - } else { - // Do no filter out any records, but do rewrite the - // local statuses of the records with muis that - // appear in the specified bitmap index. - pfx.record_map.as_records_with_rewritten_status( - unsafe { - self.store - .withdrawn_muis_bmin - .load(Ordering::Acquire, guard) - .deref() - }, - RouteStatus::Withdrawn, - ) - }, - ) - }, - ); - - // Check if we have an actual exact match, if not then fetch the - // first lesser-specific with the greatest length, that's the Longest - // matching prefix, but only if the user requested a longest match or - // empty match. - let match_type = match (&options.match_type, &stored_prefix) { - // we found an exact match, we don't need to do anything. - (_, Some((_pfx, meta))) if !meta.is_empty() => { - MatchType::ExactMatch - } - // we didn't find an exact match, but the user requested it - // so we need to find the longest matching prefix. - (MatchType::LongestMatch | MatchType::EmptyMatch, _) => { - stored_prefix = self - .store - .less_specific_prefix_iter( - search_pfx, - mui, - options.include_withdrawn, - guard, - ) - .max_by(|p0, p1| p0.0.get_len().cmp(&p1.0.get_len())); - if stored_prefix.is_some() { - MatchType::LongestMatch - } else { - MatchType::EmptyMatch - } - } - // We got an empty match, but the user requested an exact match, - // even so, we're going to look for more and/or less specifics if - // the user asked for it. - (MatchType::ExactMatch, _) => MatchType::EmptyMatch, - }; - - QueryResult { - prefix: stored_prefix.as_ref().map(|p| p.0.into_pub()), - prefix_meta: stored_prefix - .as_ref() - .map(|pfx| pfx.1.clone()) - .unwrap_or_default(), - less_specifics: if options.include_less_specifics { - Some( - self.store - .less_specific_prefix_iter( - if let Some(ref pfx) = stored_prefix { - pfx.0 - } else { - search_pfx - }, - mui, - options.include_withdrawn, - guard, - ) - .collect(), - ) - } else { - None - }, - more_specifics: if options.include_more_specifics { - Some( - self.store - .more_specific_prefix_iter_from( - if let Some(pfx) = stored_prefix { - pfx.0 - } else { - search_pfx - }, - mui, - options.include_withdrawn, - guard, - ) - // .map(|p| (p.prefix_into_pub(), p)) - .collect(), - ) - // The user requested more specifics, but there aren't any, so we - // need to return an empty vec, not a None. - } else { - None - }, - match_type, - } - } - - // In a LMP search we have to go over all the nibble lengths in the - // stride up until the value of the actual nibble length were looking for - // (until we reach stride length for all strides that aren't the last) - // and see if the prefix bit in that position is set. Note that this does - // not search for prefixes with length 0 (which would always match). - // So for matching a nibble 1010, we have to search for 1, 10, 101 and - // 1010 on resp. position 1, 5, 12 and 25: - // ↓ ↓ ↓ - // nibble * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 - // nibble len offset 0 1 2 3 - // - // (contd.) - // pfx bit arr (u32) 15 16 17 18 19 20 21 22 23 24 - // nibble 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 - // nibble len offset 4 - // - // (contd.) ↓ - // pfx bit arr (u32) 25 26 27 28 29 30 31 - // nibble 1010 1011 1100 1101 1110 1111 x - // nibble len offset 4(contd.) - - pub fn match_prefix_by_tree_traversal( - &'a self, - search_pfx: PrefixId, - options: &MatchOptions, - // guard: &'a Guard, - ) -> QueryResult { - // --- The Default Route Prefix ------------------------------------- - - // The Default Route Prefix unfortunately does not fit in tree as we - // have it. There's no room for it in the pfxbitarr of the root node, - // since that can only contain serial numbers for prefixes that are - // children of the root node. We, however, want the default prefix - // which lives on the root node itself! We are *not* going to return - // all of the prefixes in the tree as more-specifics. - if search_pfx.get_len() == 0 { - match self.store.load_default_route_prefix_serial() { - 0 => { - return QueryResult { - prefix: None, - prefix_meta: vec![], - match_type: MatchType::EmptyMatch, - less_specifics: None, - more_specifics: None, - }; - } - - _serial => { - let prefix_meta = self - .store - .retrieve_prefix(PrefixId::new(AF::zero(), 0)) - .map(|sp| sp.0.record_map.as_records()) - .unwrap_or_default(); - return QueryResult { - prefix: Prefix::new( - search_pfx.get_net().into_ipaddr(), - search_pfx.get_len(), - ) - .ok(), - prefix_meta, - match_type: MatchType::ExactMatch, - less_specifics: None, - more_specifics: None, - }; - } - } - } - - let mut stride_end = 0; - - let root_node_id = self.get_root_node_id(); - let mut node = match self.store.get_stride_for_id(root_node_id) { - 3 => self.store.retrieve_node(root_node_id).unwrap(), - 4 => self.store.retrieve_node(root_node_id).unwrap(), - _ => self.store.retrieve_node(root_node_id).unwrap(), - }; - - let mut nibble; - let mut nibble_len; - - //---- result values ------------------------------------------------ - - // These result values are kept in mutable variables, and assembled - // at the end into a QueryResult struct. This proved to result in the - // most efficient code, where we don't have to match on - // SizedStrideNode over and over. The `match_type` field in the - // QueryResult is computed at the end. - - // The final prefix - let mut match_prefix_idx: Option> = None; - - // The indexes of the less-specifics - let mut less_specifics_vec = if options.include_less_specifics { - Some(Vec::>::new()) - } else { - None - }; - - // The indexes of the more-specifics. - let mut more_specifics_vec = if options.include_more_specifics { - Some(Vec::>::new()) - } else { - None - }; - - //---- Stride Processing -------------------------------------------- - - // We're going to iterate over all the strides in the treebitmap (so - // up to the last bit in the max prefix length for that tree). When - // a final prefix is found or we get to the end of the strides, - // depending on the options.match_type (the type requested by the - // user). we ALWAYS break out of the loop. WE ALWAYS BREAK OUT OF THE - // LOOP. Just before breaking some processing is done inside the loop - // before the break (looking up more-specifics mainly), which looks a - // bit repetitious, but again it's been done like that to avoid - // having to match over a SizedStrideNode again in the - // `post-processing` section. - - for stride in self.store.get_stride_sizes() { - stride_end += stride; - - let last_stride = search_pfx.get_len() < stride_end; - - nibble_len = if last_stride { - stride + search_pfx.get_len() - stride_end - } else { - *stride - }; - - // Shift left and right to set the bits to zero that are not - // in the nibble we're handling here. - nibble = AddressFamily::get_nibble( - search_pfx.get_net(), - stride_end - stride, - nibble_len, - ); - - match node { - SizedStrideRef::Stride3(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - - // This whole match assumes that: - // - if the first value in the return tuple of - // `search_fn` holds a value, then we need to continue - // searching by following the node contained in the - // value. - // - The second value in the tuple holds the prefix that - // was found. - // The less_specifics_vec is mutated by `search_fn` to - // hold the prefixes found along the way, in the cases - // where `include_less_specifics` was requested by the - // user. - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - // This and the next match will handle all - // intermediary nodes, but they might also handle - // exit nodes. - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx); - node = self.store.retrieve_node(n).unwrap(); - - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - break; - } - } - (Some(n), None) => { - node = self.store.retrieve_node(n).unwrap(); - - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - break; - } - } - // This handles exact and longest matches: there are - // no more children, but there is a prefix on this - // node. - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - match_prefix_idx = Some(pfx_idx); - break; - } - // This handles cases where there's no prefix (and no - // child) for exact match or longest match, the empty - // match - which doesn't care about actually finding - // a prefix - just continues in search of - // more-specifics. - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - // To make sure we don't process this - // match arm more then once, we return - // early here. - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - //---- From here only repetitions for all strides ----------- - // For comments see the code above for the Stride3 arm. - SizedStrideRef::Stride4(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx); - node = self.store.retrieve_node(n).unwrap(); - - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - break; - } - } - (Some(n), None) => { - node = self.store.retrieve_node(n).unwrap(); - - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - match_prefix_idx = Some(pfx_idx); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - // To make sure we don't process this match arm more then once, we - // return early here. - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - SizedStrideRef::Stride5(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx); - node = self.store.retrieve_node(n).unwrap(); - - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - break; - } - } - (Some(n), None) => { - node = self.store.retrieve_node(n).unwrap(); - - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - } - match_prefix_idx = Some(pfx_idx); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - StrideNodeId::new_with_cleaned_id( - search_pfx.get_net(), - stride_end - stride, - ), - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - } - } - //------------------ end of Stride branch arm repetition ------------ - - //------------------ post-processing -------------------------------- - - // If the above loop finishes (so not hitting a break) we have - // processed all strides and have found a child node and maybe a - // prefix. Now we will look up more-specifics for longest-matching - // prefixes that were found in the last stride only. Note that still - // any of the match_types (as specified by the user, not the return - // type) may end up here. - - let mut match_type: MatchType = MatchType::EmptyMatch; - let prefix = None; - if let Some(pfx_idx) = match_prefix_idx { - match_type = match self.store.retrieve_prefix(pfx_idx) { - Some(prefix) => { - if prefix.0.prefix.get_len() == search_pfx.get_len() { - MatchType::ExactMatch - } else { - MatchType::LongestMatch - } - } - None => MatchType::EmptyMatch, - }; - }; - - QueryResult { - prefix: prefix.map(|pfx: (&StoredPrefix, usize)| { - pfx.0.prefix.into_pub() - }), - prefix_meta: prefix - .map(|pfx| pfx.0.record_map.as_records()) - .unwrap_or_default(), - match_type, - less_specifics: if options.include_less_specifics { - less_specifics_vec - .unwrap() - .iter() - .filter_map(move |p| { - self.store.retrieve_prefix(*p).map(|p| { - Some((p.0.prefix, p.0.record_map.as_records())) - }) - }) - .collect() - } else { - None - }, - more_specifics: if options.include_more_specifics { - more_specifics_vec.map(|vec| { - vec.into_iter() - .map(|p| { - self.store - .retrieve_prefix(p) - .unwrap_or_else(|| { - panic!( - "more specific {:?} does not exist", - p - ) - }) - .0 - }) - .map(|sp| (sp.prefix, sp.record_map.as_records())) - .collect() - }) - } else { - None - }, - } - } - - // Helper to filter out records that are not-active (Inactive or - // Withdrawn), or whose mui appears in the global withdrawn index. - fn get_filtered_records( - &self, - pfx: &StoredPrefix, - mui: Option, - guard: &Guard, - ) -> Vec> { - let bmin = unsafe { - self.store - .withdrawn_muis_bmin - .load(Ordering::Acquire, guard) - .as_ref() - } - .unwrap(); - - pfx.record_map.get_filtered_records(mui, bmin) - } -} diff --git a/src/local_array/store/atomic_types.rs b/src/local_array/store/atomic_types.rs deleted file mode 100644 index c9e0c307..00000000 --- a/src/local_array/store/atomic_types.rs +++ /dev/null @@ -1,663 +0,0 @@ -use std::collections::HashMap; -use std::sync::{Arc, Mutex, MutexGuard, RwLock}; -use std::{ - fmt::{Debug, Display}, - sync::atomic::Ordering, -}; - -use crossbeam_epoch::{self as epoch, Atomic}; - -use crossbeam_utils::Backoff; -use log::{debug, log_enabled, trace}; - -use epoch::{Guard, Owned}; -use roaring::RoaringBitmap; - -use crate::local_array::tree::*; -use crate::prefix_record::PublicRecord; -use crate::prelude::Meta; -use crate::AddressFamily; - -use super::errors::PrefixStoreError; -use super::oncebox::OnceBoxSlice; - -// ----------- Node related structs ----------------------------------------- - -#[derive(Debug)] -pub struct StoredNode -where - Self: Sized, - S: Stride, - AF: AddressFamily, -{ - pub(crate) node_id: StrideNodeId, - // The ptrbitarr and pfxbitarr for this node - pub(crate) node: TreeBitMapNode, - // Child nodes linked from this node - pub(crate) node_set: NodeSet, -} - -#[allow(clippy::type_complexity)] -#[derive(Debug)] -pub struct NodeSet( - pub OnceBoxSlice>, - // A Bitmap index that keeps track of the `multi_uniq_id`s (mui) that are - // present in value collections in the meta-data tree in the child nodes - pub RwLock, -); - -impl NodeSet { - pub fn init(p2_size: u8) -> Self { - if log_enabled!(log::Level::Debug) { - debug!( - "{} store: creating space for {} nodes", - std::thread::current().name().unwrap_or("unnamed-thread"), - 1 << p2_size - ); - } - - NodeSet(OnceBoxSlice::new(p2_size), RoaringBitmap::new().into()) - } - - pub fn update_rbm_index( - &self, - multi_uniq_id: u32, - ) -> Result - where - S: crate::local_array::atomic_stride::Stride, - AF: crate::AddressFamily, - { - let try_count = 0; - let mut rbm = self.1.write().unwrap(); - rbm.insert(multi_uniq_id); - - Ok(try_count) - } - - pub fn remove_from_rbm_index( - &self, - multi_uniq_id: u32, - _guard: &crate::epoch::Guard, - ) -> Result - where - S: crate::local_array::atomic_stride::Stride, - AF: crate::AddressFamily, - { - let try_count = 0; - - let mut rbm = self.1.write().unwrap(); - rbm.remove(multi_uniq_id); - - Ok(try_count) - } -} - -// ----------- Prefix related structs --------------------------------------- - -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub struct PathSelections { - // serial: usize, - pub(crate) path_selection_muis: (Option, Option), -} - -impl PathSelections { - pub fn best(&self) -> Option { - self.path_selection_muis.0 - } - - pub fn backup(&self) -> Option { - self.path_selection_muis.1 - } -} - -// ----------- StoredPrefix ------------------------------------------------- -// This is the top-level struct that's linked from the slots in the buckets. -// It contains a super_agg_record that is supposed to hold counters for the -// records that are stored inside it, so that iterators over its linked lists -// don't have to go into them if there's nothing there and could stop early. -#[derive(Debug)] -pub struct StoredPrefix { - // the serial number - // pub serial: usize, - // the prefix itself, - pub prefix: PrefixId, - // the aggregated data for this prefix - pub record_map: MultiMap, - // (mui of best path entry, mui of backup path entry) from the record_map - path_selections: Atomic, - // the reference to the next set of records for this prefix, if any. - pub next_bucket: PrefixSet, -} - -impl StoredPrefix { - pub(crate) fn new>( - pfx_id: PrefixId, - level: u8, - ) -> Self { - // start calculation size of next set, it's dependent on the level - // we're in. - // let pfx_id = PrefixId::new(record.net, record.len); - let this_level = PB::get_bits_for_len(pfx_id.get_len(), level); - let next_level = PB::get_bits_for_len(pfx_id.get_len(), level + 1); - - trace!("this level {} next level {}", this_level, next_level); - let next_bucket: PrefixSet = if next_level > 0 { - debug!( - "{} store: INSERT with new bucket of size {} at prefix len {}", - std::thread::current().name().unwrap_or("unnamed-thread"), - 1 << (next_level - this_level), - pfx_id.get_len() - ); - PrefixSet::init(next_level - this_level) - } else { - debug!( - "{} store: INSERT at LAST LEVEL with empty bucket at prefix len {}", - std::thread::current().name().unwrap_or("unnamed-thread"), - pfx_id.get_len() - ); - PrefixSet::init(next_level - this_level) - }; - // End of calculation - - let rec_map = HashMap::new(); - - StoredPrefix { - // serial: 1, - prefix: pfx_id, - path_selections: Atomic::init(PathSelections { - path_selection_muis: (None, None), - }), - record_map: MultiMap::new(rec_map), - next_bucket, - } - } - - pub(crate) fn get_prefix_id(&self) -> PrefixId { - self.prefix - } - - pub fn get_path_selections(&self, guard: &Guard) -> PathSelections { - let path_selections = - self.path_selections.load(Ordering::Acquire, guard); - - unsafe { path_selections.as_ref() }.map_or( - PathSelections { - path_selection_muis: (None, None), - }, - |ps| *ps, - ) - } - - pub(crate) fn set_path_selections( - &self, - path_selections: PathSelections, - guard: &Guard, - ) -> Result<(), PrefixStoreError> { - let current = self.path_selections.load(Ordering::SeqCst, guard); - - if unsafe { current.as_ref() } == Some(&path_selections) { - debug!("unchanged path_selections"); - return Ok(()); - } - - self.path_selections - .compare_exchange( - current, - // Set the tag to indicate we're updated - Owned::new(path_selections).with_tag(0), - Ordering::AcqRel, - Ordering::Acquire, - guard, - ) - .map_err(|_| PrefixStoreError::PathSelectionOutdated)?; - Ok(()) - } - - pub fn set_ps_outdated( - &self, - guard: &Guard, - ) -> Result<(), PrefixStoreError> { - self.path_selections - .fetch_update(Ordering::Acquire, Ordering::Acquire, guard, |p| { - Some(p.with_tag(1)) - }) - .map(|_| ()) - .map_err(|_| PrefixStoreError::StoreNotReadyError) - } - - pub fn is_ps_outdated(&self, guard: &Guard) -> bool { - self.path_selections.load(Ordering::Acquire, guard).tag() == 1 - } - - pub fn calculate_and_store_best_backup<'a>( - &'a self, - tbi: &M::TBI, - guard: &'a Guard, - ) -> Result<(Option, Option), super::errors::PrefixStoreError> - { - let path_selection_muis = self.record_map.best_backup(*tbi); - - self.set_path_selections( - PathSelections { - path_selection_muis, - }, - guard, - )?; - - Ok(path_selection_muis) - } - - pub(crate) fn get_next_bucket(&self) -> Option<&PrefixSet> { - if self.next_bucket.is_empty() { - None - } else { - Some(&self.next_bucket) - } - } -} - -#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq)] -pub enum RouteStatus { - Active, - InActive, - Withdrawn, -} - -impl std::fmt::Display for RouteStatus { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - RouteStatus::Active => write!(f, "active"), - RouteStatus::InActive => write!(f, "inactive"), - RouteStatus::Withdrawn => write!(f, "withdrawn"), - } - } -} - -#[derive(Clone, Debug)] -pub(crate) struct MultiMapValue { - pub meta: M, - pub ltime: u64, - pub status: RouteStatus, -} - -impl MultiMapValue { - pub(crate) fn _new(meta: M, ltime: u64, status: RouteStatus) -> Self { - Self { - meta, - ltime, - status, - } - } -} - -impl std::fmt::Display for MultiMapValue { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} {} {}", self.meta, self.ltime, self.status) - } -} - -impl From> for MultiMapValue { - fn from(value: PublicRecord) -> Self { - Self { - meta: value.meta, - ltime: value.ltime, - status: value.status, - } - } -} - -// ----------- MultiMap ------------------------------------------------------ -// This is the record that holds the aggregates at the top-level for a given -// prefix. - -#[derive(Debug)] -pub struct MultiMap( - pub(crate) Arc>>>, -); - -impl MultiMap { - pub(crate) fn new(record_map: HashMap>) -> Self { - Self(Arc::new(Mutex::new(record_map))) - } - - fn guard_with_retry( - &self, - mut retry_count: usize, - ) -> (MutexGuard>>, usize) { - let backoff = Backoff::new(); - - loop { - if let Ok(guard) = self.0.try_lock() { - return (guard, retry_count); - } - - backoff.spin(); - retry_count += 1; - } - } - - pub fn len(&self) -> usize { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - record_map.len() - } - - pub fn get_record_for_active_mui( - &self, - mui: u32, - ) -> Option> { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - - record_map.get(&mui).and_then(|r| { - if r.status == RouteStatus::Active { - Some(PublicRecord::from((mui, r.clone()))) - } else { - None - } - }) - } - - pub fn best_backup(&self, tbi: M::TBI) -> (Option, Option) { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - let ord_routes = - record_map.iter().map(|r| (r.1.meta.as_orderable(tbi), r.0)); - let (best, bckup) = - routecore::bgp::path_selection::best_backup_generic(ord_routes); - (best.map(|b| *b.1), bckup.map(|b| *b.1)) - } - - pub(crate) fn get_record_for_mui_with_rewritten_status( - &self, - mui: u32, - bmin: &RoaringBitmap, - rewrite_status: RouteStatus, - ) -> Option> { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - record_map.get(&mui).map(|r| { - // We'll return a cloned record: the record in the store remains - // untouched. - let mut r = r.clone(); - if bmin.contains(mui) { - r.status = rewrite_status; - } - PublicRecord::from((mui, r)) - }) - } - - // Helper to filter out records that are not-active (Inactive or - // Withdrawn), or whose mui appears in the global withdrawn index. - pub fn get_filtered_records( - &self, - mui: Option, - bmin: &RoaringBitmap, - ) -> Vec> { - if let Some(mui) = mui { - self.get_record_for_active_mui(mui).into_iter().collect() - } else { - self.as_active_records_not_in_bmin(bmin) - } - } - - // return all records regardless of their local status, or any globally - // set status for the mui of the record. However, the local status for a - // record whose mui appears in the specified bitmap index, will be - // rewritten with the specified RouteStatus. - pub fn as_records_with_rewritten_status( - &self, - bmin: &RoaringBitmap, - rewrite_status: RouteStatus, - ) -> Vec> { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - record_map - .iter() - .map(move |r| { - let mut rec = r.1.clone(); - if bmin.contains(*r.0) { - rec.status = rewrite_status; - } - PublicRecord::from((*r.0, rec)) - }) - .collect::>() - } - - pub fn as_records(&self) -> Vec> { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - record_map - .iter() - .map(|r| PublicRecord::from((*r.0, r.1.clone()))) - .collect::>() - } - - // Returns a vec of records whose keys are not in the supplied bitmap - // index, and whose local Status is set to Active. Used to filter out - // withdrawn routes. - pub fn as_active_records_not_in_bmin( - &self, - bmin: &RoaringBitmap, - ) -> Vec> { - let c_map = Arc::clone(&self.0); - let record_map = c_map.lock().unwrap(); - record_map - .iter() - .filter_map(|r| { - if r.1.status == RouteStatus::Active && !bmin.contains(*r.0) { - Some(PublicRecord::from((*r.0, r.1.clone()))) - } else { - None - } - }) - .collect::>() - } - - // Change the local status of the record for this mui to Withdrawn. - pub fn mark_as_withdrawn_for_mui(&self, mui: u32) { - let c_map = Arc::clone(&self.0); - let mut record_map = c_map.lock().unwrap(); - if let Some(rec) = record_map.get_mut(&mui) { - rec.status = RouteStatus::Withdrawn; - // record_map.insert(mui, rec); - } - } - - // Change the local status of the record for this mui to Active. - pub fn mark_as_active_for_mui(&self, mui: u32) { - let record_map = Arc::clone(&self.0); - let mut r_map = record_map.lock().unwrap(); - if let Some(rec) = r_map.get_mut(&mui) { - rec.status = RouteStatus::Active; - // r_map.insert(mui, rec); - } - } - - // Insert or replace the PublicRecord in the HashMap for the key of - // record.multi_uniq_id. Returns the number of entries in the HashMap - // after updating it, if it's more than 1. Returns None if this is the - // first entry. - pub fn upsert_record( - &self, - record: PublicRecord, - ) -> (Option, usize) { - let c_map = self.clone(); - let (mut record_map, retry_count) = c_map.guard_with_retry(0); - - if record_map - .insert(record.multi_uniq_id, MultiMapValue::from(record)) - .is_some() - { - (Some(record_map.len()), retry_count) - } else { - (None, retry_count) - } - } -} - -impl Clone for MultiMap { - fn clone(&self) -> Self { - Self(Arc::clone(&self.0)) - } -} - -// ----------- AtomicStoredPrefix ------------------------------------------- -// Unlike StoredNode, we don't need an Empty variant, since we're using -// serial == 0 as the empty value. We're not using an Option here, to -// avoid going outside our atomic procedure. -// #[allow(clippy::type_complexity)] -// #[derive(Debug)] -// pub struct AtomicStoredPrefix< -// AF: AddressFamily, -// M: crate::prefix_record::Meta, -// >(pub Atomic>); - -// impl -// AtomicStoredPrefix -// { -// pub(crate) fn empty() -> Self { -// AtomicStoredPrefix(Atomic::null()) -// } - -// // pub(crate) fn is_empty(&self, guard: &Guard) -> bool { -// // let pfx = self.0.load(Ordering::SeqCst, guard); -// // pfx.is_null() -// // } - -// pub(crate) fn get_stored_prefix<'a>( -// &'a self, -// guard: &'a Guard, -// ) -> Option<&'a StoredPrefix> { -// let pfx = self.0.load(Ordering::Acquire, guard); -// match pfx.is_null() { -// true => None, -// false => Some(unsafe { pfx.deref() }), -// } -// } - -// pub(crate) fn _get_stored_prefix_with_tag<'a>( -// &'a self, -// guard: &'a Guard, -// ) -> Option<(&'a StoredPrefix, usize)> { -// let pfx = self.0.load(Ordering::Acquire, guard); -// match pfx.is_null() { -// true => None, -// false => Some((unsafe { pfx.deref() }, pfx.tag())), -// } -// } - -// pub(crate) fn get_stored_prefix_mut<'a>( -// &'a self, -// guard: &'a Guard, -// ) -> Option<&'a StoredPrefix> { -// let pfx = self.0.load(Ordering::SeqCst, guard); - -// match pfx.is_null() { -// true => None, -// false => Some(unsafe { pfx.deref() }), -// } -// } - -// #[allow(dead_code)] -// pub(crate) fn get_serial(&self) -> usize { -// let guard = &epoch::pin(); -// unsafe { self.0.load(Ordering::Acquire, guard).into_owned() }.tag() -// } - -// pub(crate) fn get_prefix_id(&self) -> PrefixId { -// let guard = &epoch::pin(); -// match self.get_stored_prefix(guard) { -// None => { -// panic!("AtomicStoredPrefix::get_prefix_id: empty prefix"); -// } -// Some(pfx) => pfx.prefix, -// } -// } - -// // PrefixSet is an Atomic that might be a null pointer, which is -// // UB! Therefore we keep the prefix record in an Option: If -// // that Option is None, then the PrefixSet is a null pointer and -// // we'll return None -// pub(crate) fn get_next_bucket<'a>( -// &'a self, -// guard: &'a Guard, -// ) -> Option<&PrefixSet> { -// // let guard = &epoch::pin(); -// if let Some(stored_prefix) = self.get_stored_prefix(guard) { -// // if stored_prefix.super_agg_record.is_some() { -// if !&stored_prefix -// .next_bucket -// .0 -// .load(Ordering::SeqCst, guard) -// .is_null() -// { -// Some(&stored_prefix.next_bucket) -// } else { -// None -// } -// } else { -// None -// } -// } -// } - -// ----------- FamilyBuckets Trait ------------------------------------------ -// -// Implementations of this trait are done by a proc-macro called -// `stride_sizes`from the `rotonda-macros` crate. - -pub trait NodeBuckets { - fn init() -> Self; - fn len_to_store_bits(len: u8, level: u8) -> u8; - fn get_stride_sizes(&self) -> &[u8]; - fn get_stride_for_id(&self, id: StrideNodeId) -> u8; - fn get_store3(&self, id: StrideNodeId) -> &NodeSet; - fn get_store4(&self, id: StrideNodeId) -> &NodeSet; - fn get_store5(&self, id: StrideNodeId) -> &NodeSet; - fn get_strides_len() -> u8; - fn get_first_stride_size() -> u8; -} - -pub trait PrefixBuckets -where - Self: Sized, -{ - fn init() -> Self; - fn remove(&mut self, id: PrefixId) -> Option; - fn get_root_prefix_set(&self, len: u8) -> &'_ PrefixSet; - fn get_bits_for_len(len: u8, level: u8) -> u8; -} - -//------------ PrefixSet ---------------------------------------------------- - -// The PrefixSet is the ARRAY that holds all the child prefixes in a node. -// Since we are storing these prefixes in the global store in a HashMap that -// is keyed on the tuple (addr_bits, len, serial number) we can get away with -// storing ONLY THE SERIAL NUMBER in the pfx_vec: The addr_bits and len are -// implied in the position in the array a serial number has. A PrefixSet -// doesn't know anything about the node it is contained in, so it needs a base -// address to be able to calculate the complete prefix of a child prefix. - -#[derive(Debug)] -#[repr(align(8))] -pub struct PrefixSet( - pub OnceBoxSlice>, -); - -impl PrefixSet { - pub fn init(p2_size: u8) -> Self { - PrefixSet(OnceBoxSlice::new(p2_size)) - } - - pub(crate) fn is_empty(&self) -> bool { - self.0.is_null() - } - - pub(crate) fn get_by_index( - &self, - index: usize, - ) -> Option<&StoredPrefix> { - self.0.get(index) - } -} diff --git a/src/local_array/store/custom_alloc.rs b/src/local_array/store/custom_alloc.rs deleted file mode 100644 index 5139e1d5..00000000 --- a/src/local_array/store/custom_alloc.rs +++ /dev/null @@ -1,1186 +0,0 @@ -// ----------- THE STORE ---------------------------------------------------- -// -// The CustomAllocStore provides in-memory storage for the BitTreeMapNodes -// and for prefixes and their meta-data. The storage for node is on the -// `buckets` field, and the prefixes are stored in, well, the `prefixes` -// field. They are both organised in the same way, as chained hash tables, -// one per (prefix|node)-length. The hashing function (that is detailed -// lower down in this file), basically takes the address part of the -// node|prefix and uses `(node|prefix)-address part % bucket size` -// as its index. -// -// Both the prefixes and the buckets field have one bucket per (prefix|node) -// -length that start out with a fixed-size array. The size of the arrays is -// set in the rotonda_macros/maps.rs file. -// -// For lower (prefix|node)-lengths the number of elements in the array is -// equal to the number of prefixes in that length, so there's exactly one -// element per (prefix|node). For greater lengths there will be collisions, -// in that case the stored (prefix|node) will have a reference to another -// bucket (also of a fixed size), that holds a (prefix|node) that collided -// with the one that was already stored. A (node|prefix) lookup will have to -// go over all (node|prefix) buckets until it matches the requested (node| -// prefix) or it reaches the end of the chain. -// -// The chained (node|prefixes) are occupied at a first-come, first-serve -// basis, and are not re-ordered on new insertions of (node|prefixes). This -// may change in the future, since it prevents iterators from being ordered. -// -// One of the nice things of having one table per (node|prefix)-length is that -// a search can start directly at the prefix-length table it wishes, and go -// go up and down into other tables if it needs to (e.g., because more- or -// less-specifics were asked for). In contrast if you do a lookup by -// traversing the tree of nodes, we would always have to go through the root- -// node first and then go up the tree to the requested node. The lower nodes -// of the tree (close to the root) would be a formidable bottle-neck then. -// -// Currently, the meta-data is an atomically stored value, that is required to -// implement the `Meta` and the `Clone` trait. New meta-data -// instances are stored atomically without further ado, but updates to a -// piece of meta-data are done by merging the previous meta-data with the new -// meta-data, through use of the `MergeUpdate` trait. -// -// The `upsert_prefix` methods retrieve only the most recent insert -// for a prefix (for now). -// -// Future work could have a user-configurable retention strategy that allows -// the meta-data to be stored as a linked-list of references, where each -// meta-data object has a reference to its predecessor. -// -// Prefix example -// -// (level 0 arrays) prefixes bucket -// /len size -// ┌──┐ -// len /0 │ 0│ 1 1 ■ -// └──┘ │ -// ┌──┬──┐ │ -// len /1 │00│01│ 2 2 │ -// └──┴──┘ perfect -// ┌──┬──┬──┬──┐ hash -// len /2 │ │ │ │ │ 4 4 │ -// └──┴──┴──┴──┘ │ -// ┌──┬──┬──┬──┬──┬──┬──┬──┐ │ -// len /3 │ │ │ │ │ │ │ │ │ 8 8 ■ -// └──┴──┴──┴──┴──┴──┴──┴──┘ -// ┌──┬──┬──┬──┬──┬──┬──┬──┐ ┌────────────┐ -// len /4 │ │ │ │ │ │ │ │ │ 8 16 ◀────────│ collision │ -// └──┴──┴──┴┬─┴──┴──┴──┴──┘ └────────────┘ -// └───┐ -// │ ┌─collision─────────┐ -// ┌───▼───┐ │ │ -// │ │ ◀────────│ 0x0100 and 0x0101 │ -// │ 0x010 │ └───────────────────┘ -// │ │ -// ├───────┴──────────────┬──┬──┐ -// │ StoredPrefix 0x0101 │ │ │ -// └──────────────────────┴─┬┴─┬┘ -// │ │ -// ┌────────────────────┘ └──┐ -// ┌──────────▼──────────┬──┐ ┌─▼┬──┐ -// ┌─▶│ metadata (current) │ │ │ 0│ 1│ (level 1 array) -// │ └─────────────────────┴──┘ └──┴──┘ -// merge└─┐ │ │ -// update │ ┌────────────┘ │ -// │┌──────────▼──────────┬──┐ ┌───▼───┐ -// ┌─▶│ metadata (previous) │ │ │ │ -// │ └─────────────────────┴──┘ │ 0x0 │ -// merge└─┐ │ │ │ -// update │ ┌────────────┘ ├───────┴──────────────┬──┐ -// │┌──────────▼──────────┬──┐ │ StoredPrefix 0x0110 │ │ -// │ metadata (oldest) │ │ └──────────────────────┴──┘ -// └─────────────────────┴──┘ │ -// ┌─────────────┘ -// ┌──────────▼──────────────┐ -// │ metadata (current) │ -// └─────────────────────────┘ - -// Note about the memory usage of the data-structures of the Buckets -// -// As said, the prefixes and nodes are stored in buckets. A bucket right now -// is of type `[MaybeUnit>]`, this has the advantage -// that the length can be variable, based on the stride size for that level. -// It saves us to have to implement a generic something. -// Another advantage is the fixed place in which an atomic StoredPrefix -// lives: this makes compare-and-swapping it relatively straight forward. -// Each accessing thread would try to read/write the exact same entry in the -// array, so shouldn't be any 'rug pulling' on the whole array. -// -// A disadvantage is that this is a fixed size, sparse array the moment it -// is created. Theoretically, an `Atomic` -// would not have this disadvantage. Manipulating the whole vec atomically -// though is very tricky (we would have to atomically compare-and-swap the -// whole vec each time the prefix meta-data is changed) and inefficient, -// since we would have to either keep the vec sorted on `PrefixId` at all -// times, or, we would have to inspect each value in the vec on *every* read -// or write. the StoredPrefix (this is a challenge in itself, since the -// StoredPrefix needs to be read atomically to retrieve the PrefixId). -// Compare-and-swapping a whole vec most probably would need a hash over the -// vec to determine whether it was changed. I gave up on this approach, -// -// Another approach to try to limit the memory use is to try to use other -// indexes in the same array on collision (the array mentioned above), before -// heading off and following the reference to the next bucket. This would -// limit the amount of (sparse) arrays being created for a typical prefix -// treebitmap, at the cost of longer average search times. Two -// implementations of this approach are Cuckoo hashing[^1], and Skip Lists. -// Skip lists[^2] are a probabilistic data-structure, famously used by Redis, -// (and by TiKv). I haven't tries either of these. Crossbeam has a SkipList -// implementation, that wasn't ready at the time I wrote this. Cuckoo -// hashing has the advantage of being easier to understand/implement. Maybe -// Cuckoo hashing can also be combined with Fibonacci hashing[^3]. Note that -// Robin Hood hashing maybe faster than Cuckoo hashing for reads, but it -// requires shifting around existing entries, which is rather costly to do -// atomically (and complex). - -// [^1]: [https://en.wikipedia.org/wiki/Cuckoo_hashing] -// [^3]: [https://docs.rs/crossbeam-skiplist/0.1.1/crossbeam_skiplist/] -// [^3]: [https://probablydance.com/2018/06/16/fibonacci-hashing- -// the-optimization-that-the-world-forgot-or-a-better-alternative- -// to-integer-modulo/] - -// Notes on memory leaks in Rotonda-store -// -// Both valgrind and miri report memory leaks on the multi-threaded prefix -// store. Valgrind only reports it when it a binary stops using the tree, -// while still keeping it around. An interrupted use of the mt-prefix-store -// does not report any memory leaks. Miri is persistent in reporting memory -// leaks in the mt-prefix-store. They both report the memory leaks in the same -// location: the init method of the node- and prefix-buckets. -// -// I have reasons to believe these reported memory leaks aren't real, or that -// crossbeam-epoch leaks a bit of memory when creating a new `Atomic` -// instance. Since neither prefix nor node buckets can or should be dropped -// this is not a big issue anyway, it just means that an `Atomic` occupies -// more memory than it could in an optimal situation. Since we're not storing -// the actual meta-data in an `Atomic` (it is stored in an `flurry Map`), this -// means memory usage won't grow on updating the meta-data on a prefix, -// (unless the meta-data itself grows of course, but that's up to the user). -// -// To get a better understanding on the nature of the reported memory leaks I -// have created a branch (`vec_set`) that replaces the dynamically sized array -// with a (equally sparse) Vec, that is not filled with `Atomic:::null()`, but -// with `Option usize { - self.nodes.load(Ordering::Relaxed) - } - - pub fn inc_nodes_count(&self) { - self.nodes.fetch_add(1, Ordering::Relaxed); - } - - pub fn get_prefixes_count(&self) -> Vec { - self.prefixes - .iter() - .map(|pc| pc.load(Ordering::Relaxed)) - .collect::>() - } - - pub fn inc_prefixes_count(&self, len: u8) { - self.prefixes[len as usize].fetch_add(1, Ordering::Relaxed); - } - - pub fn get_prefix_stats(&self) -> Vec { - self.prefixes - .iter() - .enumerate() - .filter_map(|(len, count)| { - let count = count.load(Ordering::Relaxed); - if count != 0 { - Some(CreatedNodes { - depth_level: len as u8, - count, - }) - } else { - None - } - }) - .collect() - } -} - -impl Default for Counters { - fn default() -> Self { - let mut prefixes: Vec = Vec::with_capacity(129); - for _ in 0..=128 { - prefixes.push(AtomicUsize::new(0)); - } - - Self { - nodes: AtomicUsize::new(0), - prefixes: prefixes.try_into().unwrap(), - } - } -} - -//------------ StoreStats ---------------------------------------------- - -#[derive(Debug)] -pub struct StoreStats { - pub v4: Vec, - pub v6: Vec, -} - -//------------ UpsertReport -------------------------------------------------- - -#[derive(Debug)] -pub struct UpsertReport { - // Indicates the number of Atomic Compare-and-Swap operations were - // necessary to create/update the Record entry. High numbers indicate - // contention. - pub cas_count: usize, - // Indicates whether this was the first mui record for this prefix was - // created. So, the prefix did not exist before hand. - pub prefix_new: bool, - // Indicates whether this mui was new for this prefix. False means an old - // value was overwritten. - pub mui_new: bool, - // The number of mui records for this prefix after the upsert operation. - pub mui_count: usize, -} - -// ----------- CustomAllocStorage ------------------------------------------- -// -// CustomAllocStorage is a storage backend that uses a custom allocator, that -// consists of arrays that point to other arrays on collision. -#[derive(Debug)] -pub struct CustomAllocStorage< - AF: AddressFamily, - M: crate::prefix_record::Meta, - NB: NodeBuckets, - PB: PrefixBuckets, -> { - pub(crate) buckets: NB, - pub prefixes: PB, - pub default_route_prefix_serial: AtomicUsize, - // Global Roaring Bitmap INdex that stores MUIs. - pub withdrawn_muis_bmin: Atomic, - pub counters: Counters, - _m: PhantomData, - _af: PhantomData, -} - -impl< - 'a, - AF: AddressFamily, - M: crate::prefix_record::Meta, - NB: NodeBuckets, - PB: PrefixBuckets, - > CustomAllocStorage -{ - pub(crate) fn init( - root_node: SizedStrideNode, - // A node always gets created as an intermediary to create an actual - // meta-data record. A meta-data record has an id that is unique in - // the collection of Records, that is stored as a value in the tree. - // This unique id is used to be able to decide to replace or add a - // record to the meta-data collection in a multi-map. It is also added - // to a bitmap index on each node that has children where the unique - // id appears on a Record. - // multi_uniq_id: u32, - ) -> Result> { - info!("store: initialize store {}", AF::BITS); - - let store = CustomAllocStorage { - buckets: NodeBuckets::::init(), - prefixes: PrefixBuckets::::init(), - default_route_prefix_serial: AtomicUsize::new(0), - withdrawn_muis_bmin: RoaringBitmap::new().into(), - counters: Counters::default(), - _af: PhantomData, - _m: PhantomData, - }; - - let _retry_count = store.store_node( - StrideNodeId::dangerously_new_with_id_as_is(AF::zero(), 0), - 0_u32, - root_node, - )?; - - Ok(store) - } - - pub(crate) fn acquire_new_node_id( - &self, - (prefix_net, sub_prefix_len): (AF, u8), - ) -> StrideNodeId { - StrideNodeId::new_with_cleaned_id(prefix_net, sub_prefix_len) - } - - // Create a new node in the store with payload `next_node`. - // - // Next node will be ignored if a node with the same `id` already exists, - // but the multi_uniq_id will be added to the rbm_index of the NodeSet. - // - // Returns: a tuple with the node_id of the created node and the number of - // retry_count - #[allow(clippy::type_complexity)] - pub(crate) fn store_node( - &self, - id: StrideNodeId, - multi_uniq_id: u32, - next_node: SizedStrideNode, - ) -> Result<(StrideNodeId, u32), PrefixStoreError> { - struct SearchLevel<'s, AF: AddressFamily, S: Stride> { - f: &'s dyn Fn( - &SearchLevel, - &NodeSet, - TreeBitMapNode, - u32, // multi_uniq_id - u8, // the store level - u32, // retry_count - ) -> Result< - (StrideNodeId, u32), - PrefixStoreError, - >, - } - - let search_level_3 = - store_node_closure![Stride3; id; guard; back_off;]; - let search_level_4 = - store_node_closure![Stride4; id; guard; back_off;]; - let search_level_5 = - store_node_closure![Stride5; id; guard; back_off;]; - - if log_enabled!(log::Level::Trace) { - debug!( - "{} store: Store node {}: {:?} mui {}", - std::thread::current().name().unwrap_or("unnamed-thread"), - id, - next_node, - multi_uniq_id - ); - } - self.counters.inc_nodes_count(); - - match next_node { - SizedStrideNode::Stride3(new_node) => (search_level_3.f)( - &search_level_3, - self.buckets.get_store3(id), - new_node, - multi_uniq_id, - 0, - 0, - ), - SizedStrideNode::Stride4(new_node) => (search_level_4.f)( - &search_level_4, - self.buckets.get_store4(id), - new_node, - multi_uniq_id, - 0, - 0, - ), - SizedStrideNode::Stride5(new_node) => (search_level_5.f)( - &search_level_5, - self.buckets.get_store5(id), - new_node, - multi_uniq_id, - 0, - 0, - ), - } - } - - #[allow(clippy::type_complexity)] - pub(crate) fn retrieve_node( - &'a self, - id: StrideNodeId, - ) -> Option> { - struct SearchLevel<'s, AF: AddressFamily, S: Stride> { - f: &'s dyn for<'a> Fn( - &SearchLevel, - &'a NodeSet, - u8, - ) - -> Option>, - } - - let search_level_3 = impl_search_level![Stride3; id;]; - let search_level_4 = impl_search_level![Stride4; id;]; - let search_level_5 = impl_search_level![Stride5; id;]; - - if log_enabled!(log::Level::Trace) { - trace!( - "{} store: Retrieve node {} from l{}", - std::thread::current().name().unwrap_or("unnamed-thread"), - id, - id.get_id().1 - ); - } - - match self.get_stride_for_id(id) { - 3 => (search_level_3.f)( - &search_level_3, - self.buckets.get_store3(id), - 0, - ), - 4 => (search_level_4.f)( - &search_level_4, - self.buckets.get_store4(id), - 0, - ), - _ => (search_level_5.f)( - &search_level_5, - self.buckets.get_store5(id), - 0, - ), - } - } - - // retrieve a node, but only its bitmap index contains the specified mui. - // Used for iterators per mui. - #[allow(clippy::type_complexity)] - pub(crate) fn retrieve_node_for_mui( - &'a self, - id: StrideNodeId, - // The mui that is tested to be present in the nodes bitmap index - mui: u32, - ) -> Option> { - struct SearchLevel<'s, AF: AddressFamily, S: Stride> { - f: &'s dyn for<'a> Fn( - &SearchLevel, - &'a NodeSet, - u8, - ) - -> Option>, - } - - let search_level_3 = impl_search_level_for_mui![Stride3; id; mui;]; - let search_level_4 = impl_search_level_for_mui![Stride4; id; mui;]; - let search_level_5 = impl_search_level_for_mui![Stride5; id; mui;]; - - if log_enabled!(log::Level::Trace) { - trace!( - "{} store: Retrieve node {} from l{} for mui {}", - std::thread::current().name().unwrap_or("unnamed-thread"), - id, - id.get_id().1, - mui - ); - } - - match self.get_stride_for_id(id) { - 3 => (search_level_3.f)( - &search_level_3, - self.buckets.get_store3(id), - 0, - ), - 4 => (search_level_4.f)( - &search_level_4, - self.buckets.get_store4(id), - 0, - ), - _ => (search_level_5.f)( - &search_level_5, - self.buckets.get_store5(id), - 0, - ), - } - } - - #[allow(clippy::type_complexity)] - pub(crate) fn retrieve_node_mut( - &'a self, - id: StrideNodeId, - multi_uniq_id: u32, - ) -> Option> { - struct SearchLevel<'s, AF: AddressFamily, S: Stride> { - f: &'s dyn for<'a> Fn( - &SearchLevel, - &'a NodeSet, - u8, - ) - -> Option>, - } - - let search_level_3 = - retrieve_node_mut_closure![Stride3; id; multi_uniq_id;]; - let search_level_4 = - retrieve_node_mut_closure![Stride4; id; multi_uniq_id;]; - let search_level_5 = - retrieve_node_mut_closure![Stride5; id; multi_uniq_id;]; - - if log_enabled!(log::Level::Trace) { - trace!( - "{} store: Retrieve node mut {} from l{}", - std::thread::current().name().unwrap_or("unnamed-thread"), - id, - id.get_id().1 - ); - } - - match self.buckets.get_stride_for_id(id) { - 3 => (search_level_3.f)( - &search_level_3, - self.buckets.get_store3(id), - 0, - ), - - 4 => (search_level_4.f)( - &search_level_4, - self.buckets.get_store4(id), - 0, - ), - _ => (search_level_5.f)( - &search_level_5, - self.buckets.get_store5(id), - 0, - ), - } - } - - pub(crate) fn get_root_node_id(&self) -> StrideNodeId { - StrideNodeId::dangerously_new_with_id_as_is(AF::zero(), 0) - } - - pub fn get_nodes_count(&self) -> usize { - self.counters.get_nodes_count() - } - - // Prefixes related methods - - pub(crate) fn load_default_route_prefix_serial(&self) -> usize { - self.default_route_prefix_serial.load(Ordering::SeqCst) - } - - #[allow(dead_code)] - fn increment_default_route_prefix_serial(&self) -> usize { - self.default_route_prefix_serial - .fetch_add(1, Ordering::SeqCst) - } - - // THE CRITICAL SECTION - // - // CREATING OR UPDATING A PREFIX IN THE STORE - // - // YES, THE MAGIC HAPPENS HERE! - // - // This uses the TAG feature of crossbeam_utils::epoch to ensure that we - // are not overwriting a prefix meta-data that already has been created - // or was updated by another thread. - // - // Our plan: - // - // 1. LOAD - // Load the current prefix and meta-data from the store if any. - // 2. INSERT - // If there is no current meta-data, create it. - // 3. UPDATE - // If there is a prefix, meta-data combo, then load it and merge - // the existing meta-dat with our meta-data using the `MergeUpdate` - // trait (a so-called 'Read-Copy-Update'). - // 4. SUCCESS - // See if we can successfully store the updated meta-data in the store. - // 5. DONE - // If Step 4 succeeded we're done! - // 6. FAILURE - REPEAT - // If Step 4 failed we're going to do the whole thing again. - - pub(crate) fn upsert_prefix( - &self, - prefix: PrefixId, - record: PublicRecord, - update_path_selections: Option, - guard: &Guard, - ) -> Result { - let mut prefix_new = true; - - let (mui_new, insert_retry_count) = - match self.non_recursive_retrieve_prefix_mut(prefix) { - // There's no StoredPrefix at this location yet. Create a new - // PrefixRecord and try to store it in the empty slot. - (locked_prefix, false) => { - if log_enabled!(log::Level::Debug) { - debug!( - "{} store: Create new prefix record", - std::thread::current().name().unwrap_or("unnamed-thread") - ); - } - - // We're creating a StoredPrefix without our record first, - // to avoid having to clone it on retry. - let res = locked_prefix - // .get_or_init(|| { - // StoredPrefix::new::( - // PrefixId::new(prefix.get_net(), prefix.get_len()), - // level, - // ) - // }) - // .0 - .record_map - .upsert_record(record); - - self.counters.inc_prefixes_count(prefix.get_len()); - res - } - // There already is a StoredPrefix with a record at this - // location. - (stored_prefix, true) => { - if log_enabled!(log::Level::Debug) { - debug!( - "{} store: Found existing prefix record for {}/{}", - std::thread::current().name().unwrap_or("unnamed-thread"), - prefix.get_net(), - prefix.get_len() - ); - } - prefix_new = false; - - // Update the already existing record_map with our caller's - // record. - stored_prefix.set_ps_outdated(guard)?; - let res = stored_prefix.record_map.upsert_record(record); - - if let Some(tbi) = update_path_selections { - stored_prefix - .calculate_and_store_best_backup(&tbi, guard)?; - } - - res - } - }; - - Ok(UpsertReport { - prefix_new, - cas_count: insert_retry_count, - mui_new: mui_new.is_none(), - mui_count: mui_new.unwrap_or(1), - }) - } - - // Change the status of the record for the specified (prefix, mui) - // combination to Withdrawn. - pub fn mark_mui_as_withdrawn_for_prefix( - &self, - prefix: PrefixId, - mui: u32, - ) -> Result<(), PrefixStoreError> { - let (stored_prefix, exists) = - self.non_recursive_retrieve_prefix_mut(prefix); - - if !exists { - return Err(PrefixStoreError::StoreNotReadyError); - } - - stored_prefix.record_map.mark_as_withdrawn_for_mui(mui); - - Ok(()) - } - - // Change the status of the record for the specified (prefix, mui) - // combination to Active. - pub fn mark_mui_as_active_for_prefix( - &self, - prefix: PrefixId, - mui: u32, - ) -> Result<(), PrefixStoreError> { - let (stored_prefix, exists) = - self.non_recursive_retrieve_prefix_mut(prefix); - - if !exists { - return Err(PrefixStoreError::StoreNotReadyError); - } - - stored_prefix.record_map.mark_as_active_for_mui(mui); - - Ok(()) - } - - // Change the status of the mui globally to Withdrawn. Iterators and match - // functions will by default not return any records for this mui. - pub fn mark_mui_as_withdrawn( - &self, - mui: u32, - guard: &Guard, - ) -> Result<(), PrefixStoreError> { - let current = self.withdrawn_muis_bmin.load(Ordering::Acquire, guard); - - let mut new = unsafe { current.as_ref() }.unwrap().clone(); - new.insert(mui); - - #[allow(clippy::assigning_clones)] - loop { - match self.withdrawn_muis_bmin.compare_exchange( - current, - Owned::new(new), - Ordering::AcqRel, - Ordering::Acquire, - guard, - ) { - Ok(_) => return Ok(()), - Err(updated) => { - new = - unsafe { updated.current.as_ref() }.unwrap().clone(); - } - } - } - } - - // Change the status of the mui globally to Active. Iterators and match - // functions will default to the status on the record itself. - pub fn mark_mui_as_active( - &self, - mui: u32, - guard: &Guard, - ) -> Result<(), PrefixStoreError> { - let current = self.withdrawn_muis_bmin.load(Ordering::Acquire, guard); - - let mut new = unsafe { current.as_ref() }.unwrap().clone(); - new.remove(mui); - - #[allow(clippy::assigning_clones)] - loop { - match self.withdrawn_muis_bmin.compare_exchange( - current, - Owned::new(new), - Ordering::AcqRel, - Ordering::Acquire, - guard, - ) { - Ok(_) => return Ok(()), - Err(updated) => { - new = - unsafe { updated.current.as_ref() }.unwrap().clone(); - } - } - } - } - - // Whether this mui is globally withdrawn. Note that this overrules (by - // default) any (prefix, mui) combination in iterators and match functions. - pub fn mui_is_withdrawn(&self, mui: u32, guard: &Guard) -> bool { - unsafe { - self.withdrawn_muis_bmin - .load(Ordering::Acquire, guard) - .as_ref() - } - .unwrap() - .contains(mui) - } - - // Whether this mui is globally active. Note that the local statuses of - // records (prefix, mui) may be set to withdrawn in iterators and match - // functions. - pub fn mui_is_active(&self, mui: u32, guard: &Guard) -> bool { - !unsafe { - self.withdrawn_muis_bmin - .load(Ordering::Acquire, guard) - .as_ref() - } - .unwrap() - .contains(mui) - } - - // This function is used by the upsert_prefix function above. - // - // We're using a Chained Hash Table and this function returns one of: - // - a StoredPrefix that already exists for this search_prefix_id - // - the Last StoredPrefix in the chain. - // - an error, if no StoredPrefix whatsoever can be found in the store. - // - // The error condition really shouldn't happen, because that basically - // means the root node for that particular prefix length doesn't exist. - #[allow(clippy::type_complexity)] - pub(crate) fn non_recursive_retrieve_prefix_mut( - &'a self, - search_prefix_id: PrefixId, - ) -> (&'a StoredPrefix, bool) { - trace!("non_recursive_retrieve_prefix_mut_with_guard"); - let mut prefix_set = self - .prefixes - .get_root_prefix_set(search_prefix_id.get_len()); - let mut level: u8 = 0; - - trace!("root prefix_set {:?}", prefix_set); - loop { - // HASHING FUNCTION - let index = Self::hash_prefix_id(search_prefix_id, level); - - // probe the slot with the index that's the result of the hashing. - // let locked_prefix = prefix_set.0.get(index); - let stored_prefix = match prefix_set.0.get(index) { - Some(p) => { - trace!("prefix set found."); - (p, true) - } - None => { - // We're at the end of the chain and haven't found our - // search_prefix_id anywhere. Return the end-of-the-chain - // StoredPrefix, so the caller can attach a new one. - trace!( - "no record. returning last found record in level {}, with index {}.", - level, - index - ); - let index = Self::hash_prefix_id(search_prefix_id, level); - trace!("calculate next index {}", index); - ( - prefix_set - .0 - .get_or_init(index, || { - StoredPrefix::new::( - PrefixId::new( - search_prefix_id.get_net(), - search_prefix_id.get_len(), - ), - level, - ) - }) - .0, - false, - ) - } - }; - - if search_prefix_id == stored_prefix.0.prefix { - // GOTCHA! - // Our search-prefix is stored here, so we're returning - // it, so its PrefixRecord can be updated by the caller. - trace!("found requested prefix {:?}", search_prefix_id); - return stored_prefix; - } else { - // A Collision. Follow the chain. - level += 1; - prefix_set = &stored_prefix.0.next_bucket; - continue; - } - } - } - - #[allow(clippy::type_complexity)] - pub fn non_recursive_retrieve_prefix( - &'a self, - id: PrefixId, - ) -> ( - Option<&StoredPrefix>, - Option<( - PrefixId, - u8, - &'a PrefixSet, - [Option<(&'a PrefixSet, usize)>; 32], - usize, - )>, - ) { - let mut prefix_set = self.prefixes.get_root_prefix_set(id.get_len()); - let mut parents = [None; 32]; - let mut level: u8 = 0; - let backoff = Backoff::new(); - - loop { - // The index of the prefix in this array (at this len and - // level) is calculated by performing the hash function - // over the prefix. - - // HASHING FUNCTION - let index = Self::hash_prefix_id(id, level); - - if let Some(stored_prefix) = prefix_set.0.get(index) { - if id == stored_prefix.get_prefix_id() { - trace!("found requested prefix {:?}", id); - parents[level as usize] = Some((prefix_set, index)); - return ( - Some(stored_prefix), - Some((id, level, prefix_set, parents, index)), - ); - }; - // Advance to the next level. - - prefix_set = &stored_prefix.next_bucket; - level += 1; - backoff.spin(); - continue; - } - - trace!("no prefix found for {:?}", id); - parents[level as usize] = Some((prefix_set, index)); - return (None, Some((id, level, prefix_set, parents, index))); - } - } - - #[allow(clippy::type_complexity)] - pub(crate) fn retrieve_prefix( - &'a self, - prefix_id: PrefixId, - ) -> Option<(&StoredPrefix, usize)> { - struct SearchLevel< - 's, - AF: AddressFamily, - M: crate::prefix_record::Meta, - > { - f: &'s dyn for<'a> Fn( - &SearchLevel, - &'a PrefixSet, - u8, - ) - -> Option<(&'a StoredPrefix, usize)>, - } - - let search_level = SearchLevel { - f: &|search_level: &SearchLevel, - prefix_set: &PrefixSet, - mut level: u8| { - // HASHING FUNCTION - let index = Self::hash_prefix_id(prefix_id, level); - - if let Some(stored_prefix) = prefix_set.0.get(index) { - if prefix_id == stored_prefix.prefix { - trace!("found requested prefix {:?}", prefix_id,); - return Some((stored_prefix, 0)); - }; - level += 1; - - (search_level.f)( - search_level, - &stored_prefix.next_bucket, - level, - ); - } - None - }, - }; - - (search_level.f)( - &search_level, - self.prefixes.get_root_prefix_set(prefix_id.get_len()), - 0, - ) - } - - #[allow(dead_code)] - fn remove_prefix(&mut self, index: PrefixId) -> Option { - match index.is_empty() { - false => self.prefixes.remove(index), - true => None, - } - } - - pub fn get_prefixes_count(&self) -> usize { - self.counters.get_prefixes_count().iter().sum() - } - - pub fn get_prefixes_count_for_len(&self, len: u8) -> usize { - self.counters.get_prefixes_count()[len as usize] - } - - // Stride related methods - - pub(crate) fn get_stride_for_id(&self, id: StrideNodeId) -> u8 { - self.buckets.get_stride_for_id(id) - } - - pub fn get_stride_sizes(&self) -> &[u8] { - self.buckets.get_stride_sizes() - } - - // pub(crate) fn get_strides_len() -> u8 { - // NB::get_strides_len() - // } - - pub(crate) fn get_first_stride_size() -> u8 { - NB::get_first_stride_size() - } - - // Calculates the id of the node that COULD host a prefix in its - // ptrbitarr. - pub(crate) fn get_node_id_for_prefix( - &self, - prefix: &PrefixId, - ) -> (StrideNodeId, BitSpan) { - let mut acc = 0; - for i in self.get_stride_sizes() { - acc += *i; - if acc >= prefix.get_len() { - let node_len = acc - i; - return ( - StrideNodeId::new_with_cleaned_id( - prefix.get_net(), - node_len, - ), - // NOT THE HASHING FUNCTION! - // Do the right shift in a checked manner, for the sake - // of 0/0. A search for 0/0 will perform a 0 << MAX_LEN, - // which will panic in debug mode (undefined behaviour - // in prod). - BitSpan::new( - ((prefix.get_net() << node_len).checked_shr_or_zero( - (AF::BITS - (prefix.get_len() - node_len)).into(), - )) - .dangerously_truncate_to_u32(), - prefix.get_len() - node_len, - ), - ); - } - } - panic!("prefix length for {:?} is too long", prefix); - } - - // ------- THE HASHING FUNCTION ----------------------------------------- - - // Ok, so hashing is really hard, but we're keeping it simple, and - // because we're keeping it simple we're having lots of collisions, but - // we don't care! - // - // We're using a part of bitarray representation of the address part of - // a prefix the as the hash. Sounds complicated, but isn't. - // Suppose we have an IPv4 prefix, say 130.24.55.0/24. - // The address part is 130.24.55.0 or as a bitarray that would be: - // - // pos 0 4 8 12 16 20 24 28 - // bit 1000 0010 0001 1000 0011 0111 0000 0000 - // - // First, we're discarding the bits after the length of the prefix, so - // we'll have: - // - // pos 0 4 8 12 16 20 - // bit 1000 0010 0001 1000 0011 0111 - // - // Now we're dividing this bitarray into one or more levels. A level can - // be an arbitrary number of bits between 1 and the length of the prefix, - // but the number of bits summed over all levels should be exactly the - // prefix length. So in our case they should add up to 24. A possible - // division could be: 4, 4, 4, 4, 4, 4. Another one would be: 12, 12. The - // actual division being used is described in the function - // `::get_bits_for_len` in the `rotonda-macros` crate. Each level has - // its own hash, so for our example prefix this would be: - // - // pos 0 4 8 12 16 20 - // level 0 1 - // hash 1000 0010 0001 1000 0011 0111 - // - // level 1 hash: 1000 0010 0001 - // level 2 hash: 1000 0011 0011 - // - // The hash is now converted to a usize integer, by shifting it all the - // way to the right in a u32 and then converting to a usize. Why a usize - // you ask? Because the hash is used by the CustomAllocStorage as the - // index to the array for that specific prefix length and level. - // So for our example this means that the hash on level 1 is now 0x821 - // (decimal 2081) and the hash on level 2 is 0x833 (decimal 2099). - // Now, if we only consider the hash on level 1 and that we're going to - // use that as the index to the array that stores all prefixes, you'll - // notice very quickly that all prefixes starting with 130.[16..31] will - // cause a collision: they'll all point to the same array element. These - // collisions are resolved by creating a linked list from each array - // element, where each element in the list has an array of its own that - // uses the hash function with the level incremented. - - pub(crate) fn hash_node_id(id: StrideNodeId, level: u8) -> usize { - // And, this is all of our hashing function. - let last_level = if level > 0 { - ::len_to_store_bits(id.get_id().1, level - 1) - } else { - 0 - }; - let this_level = ::len_to_store_bits(id.get_id().1, level); - trace!("bits division {}", this_level); - trace!( - "calculated index ({} << {}) >> {}", - id.get_id().0, - last_level, - ((::BITS - (this_level - last_level)) % ::BITS) as usize - ); - // HASHING FUNCTION - ((id.get_id().0 << last_level) - >> ((::BITS - (this_level - last_level)) % ::BITS)) - .dangerously_truncate_to_u32() as usize - } - - pub(crate) fn hash_prefix_id(id: PrefixId, level: u8) -> usize { - // And, this is all of our hashing function. - let last_level = if level > 0 { - ::get_bits_for_len(id.get_len(), level - 1) - } else { - 0 - }; - let this_level = ::get_bits_for_len(id.get_len(), level); - trace!( - "bits division {}; no of bits {}", - this_level, - this_level - last_level - ); - trace!( - "calculated index ({} << {}) >> {}", - id.get_net(), - last_level, - ((::BITS - (this_level - last_level)) % ::BITS) as usize - ); - // HASHING FUNCTION - ((id.get_net() << last_level) - >> ((::BITS - (this_level - last_level)) % ::BITS)) - .dangerously_truncate_to_u32() as usize - } -} - -//------------ Upsert ------------------------------------------------------- -pub enum Upsert { - Insert, - Update(T), -} - -impl std::fmt::Display for Upsert { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Upsert::Insert => write!(f, "Insert"), - Upsert::Update(_) => write!(f, "Update"), - } - } -} diff --git a/src/local_array/store/default_store.rs b/src/local_array/store/default_store.rs deleted file mode 100644 index e2ddcf4c..00000000 --- a/src/local_array/store/default_store.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::fmt; -use crate::prelude::*; -use crate::prelude::multi::*; - -// The default stride sizes for IPv4, IPv6, resp. -#[create_store(( - [5, 5, 4, 3, 3, 3, 3, 3, 3, 3], - [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 4, 4, 4, 4, 4, 4, 4, 4, 4] -))] -struct DefaultStore; - -impl< - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets - > fmt::Display for CustomAllocStorage -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "CustomAllocStorage", - std::any::type_name::() - ) - } -} - -impl< - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets - > fmt::Display for CustomAllocStorage -{ - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "CustomAllocStorage", - std::any::type_name::() - ) - } -} diff --git a/src/local_array/store/errors.rs b/src/local_array/store/errors.rs deleted file mode 100644 index b83a96f4..00000000 --- a/src/local_array/store/errors.rs +++ /dev/null @@ -1,39 +0,0 @@ -use std::fmt; - -#[derive(Debug, PartialEq, Eq)] -pub enum PrefixStoreError { - NodeCreationMaxRetryError, - NodeNotFound, - StoreNotReadyError, - PathSelectionOutdated, - PrefixNotFound, - BestPathNotFound -} - -impl std::error::Error for PrefixStoreError {} - -impl fmt::Display for PrefixStoreError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - PrefixStoreError::NodeCreationMaxRetryError => write!( - f, - "Error: Maximum number of retries for node creation reached." - ), - PrefixStoreError::NodeNotFound => { - write!(f, "Error: Node not found.") - } - PrefixStoreError::StoreNotReadyError => { - write!(f, "Error: Store isn't ready yet.") - } - PrefixStoreError::PathSelectionOutdated => { - write!(f, "Error: The Path Selection process is based on outdated paths.") - } - PrefixStoreError::PrefixNotFound => { - write!(f, "Error: The Prefix cannot be found.") - } - PrefixStoreError::BestPathNotFound => { - write!(f, "Error: The Prefix does not have a stored best path.") - } - } - } -} diff --git a/src/local_array/store/macros.rs b/src/local_array/store/macros.rs deleted file mode 100644 index 2e5d8fab..00000000 --- a/src/local_array/store/macros.rs +++ /dev/null @@ -1,379 +0,0 @@ -#[macro_export] -#[doc(hidden)] -macro_rules! impl_search_level { - ( - $( - $stride: ident; - $id: ident; - ), - * ) => { - $( - SearchLevel { - f: &|search_level: &SearchLevel, - nodes, - mut level: u8, - | { - // HASHING FUNCTION - let index = Self::hash_node_id($id, level); - - // Read the node from the block pointed to by the Atomic - // pointer. - // let stored_node = unsafe { - // &mut nodes.0[index] - // }; - // let this_node = stored_node.load(Ordering::Acquire, guard); - - match nodes.0.get(index) { - None => None, - Some(stored_node) => { - let StoredNode { node_id, node, node_set, .. } = stored_node; - if $id == *node_id { - // YES, It's the one we're looking for! - return Some(SizedStrideRef::$stride(&node)); - }; - // Meh, it's not, but we can a go to the next - // level and see if it lives there. - level += 1; - match >::len_to_store_bits($id.get_id().1, level) { - // on to the next level! - next_bit_shift if next_bit_shift > 0 => { - (search_level.f)( - search_level, - &node_set, - level, - // guard, - ) - } - // There's no next level, we found nothing. - _ => None, - } - } - } - } - } - )* - }; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! impl_search_level_for_mui { - ( - $( - $stride: ident; - $id: ident; - $mui: ident; - ), - * ) => { - $( - SearchLevel { - f: &|search_level: &SearchLevel, - nodes, - mut level: u8| { - // HASHING FUNCTION - let index = Self::hash_node_id($id, level); - - // Read the node from the block pointed to by the Atomic - // pointer. - // let stored_node = unsafe { - // &mut nodes.0[index].assume_init_ref() - // }; - // let this_node = stored_node.load(Ordering::Acquire, guard); - - match nodes.0.get(index) { - None => None, - Some(this_node) => { - let StoredNode { node_id, node, node_set, .. } = this_node; - - // early return if the mui is not in the index - // stored in this node, meaning the mui does not - // appear anywhere in the sub-tree formed from - // this node. - let bmin = node_set.1.read().unwrap(); // load(Ordering::Acquire, guard).deref() - if !bmin.contains($mui) { - return None; - } - - if $id == *node_id { - // YES, It's the one we're looking for! - return Some(SizedStrideRef::$stride(&node)); - }; - // Meh, it's not, but we can a go to the next - // level and see if it lives there. - level += 1; - match >::len_to_store_bits($id.get_id().1, level) { - // on to the next level! - next_bit_shift if next_bit_shift > 0 => { - (search_level.f)( - search_level, - &node_set, - level, - // guard, - ) - } - // There's no next level, we found nothing. - _ => None, - } - } - } - } - } - )* - }; -} - -// This macro creates a closure that is used in turn in the macro -// 'eBox', that is used in the public `insert` method on a TreeBitMap. -// -// It retrieves the node specified by $id recursively, creates it if it does -// not exist. It is responsible for setting/updating the RBMIN, but is does -// *not* set/update the pfxbitarr or ptrbitarr of the TreeBitMapNode. The -// `insert_match` takes care of the latter. -// -// This closure should not be called repeatedly to create the same node, if it -// returns `None` that is basically a data race in the store and therefore an -// error. Also the caller should make sure to stay within the limit of the -// defined number of levels, although the closure will return at the end of -// the maximum depth. -#[macro_export] -#[doc(hidden)] -macro_rules! retrieve_node_mut_closure { - ( - $( - $stride: ident; - $id: ident; - $multi_uniq_id: ident; - ), - * ) => {$( - SearchLevel { - f: &| - search_level: &SearchLevel, - nodes, - mut level: u8, - | { - // HASHING FUNCTION - let index = Self::hash_node_id($id, level); - let node; - - match nodes.0.get(index) { - // This arm only ever gets called in multi-threaded code - // where our thread (running this code *now*), andgot ahead - // of another thread: After the other thread created the - // TreeBitMapNode first, it was overtaken by our thread - // running this method, so our thread enounters an empty node - // in the store. - None => { - let this_level = >::len_to_store_bits( - $id.get_id().1, level - ); - let next_level = >::len_to_store_bits( - $id.get_id().1, level + 1 - ); - let node_set = NodeSet::init(next_level - this_level); - - // See if we can create the node - (node, _) = nodes.0.get_or_init(index, || StoredNode { - node_id: $id, - node: TreeBitMapNode::new(), - node_set - }); - - // We may have lost, and a different node than we - // intended could live here, if so go a level deeper - if $id == node.node_id { - // Nope, its ours or at least the node we need. - let _retry_count = node.node_set.update_rbm_index( - $multi_uniq_id - ).ok(); - - return Some(SizedStrideRef::$stride(&node.node)); - }; - }, - Some(this_node) => { - node = this_node; - if $id == this_node.node_id { - // YES, It's the one we're looking for! - - // Update the rbm_index in this node with the - // multi_uniq_id that the caller specified. This - // is the only atomic operation we need to do - // here. The NodeSet that the index is attached - // to, does not need to be written to, it's part - // of a trie, so it just needs to "exist" (and it - // already does). - let retry_count = this_node.node_set.update_rbm_index( - $multi_uniq_id - ).ok(); - - trace!("Retry_count rbm index {:?}", retry_count); - trace!("add multi uniq id to bitmap index {} for node {}", - $multi_uniq_id, this_node.node - ); - return Some(SizedStrideRef::$stride(&this_node.node)); - }; - } - } - // It isn't ours. Move one level deeper. - level += 1; - match >::len_to_store_bits( - $id.get_id().1, level - ) { - // on to the next level! - next_bit_shift if next_bit_shift > 0 => { - (search_level.f)( - search_level, - &node.node_set, - level, - ) - } - // There's no next level, we found nothing. - _ => None, - } - } - } - )*}; -} - -#[macro_export] -#[doc(hidden)] -macro_rules! store_node_closure { - ( - $( - $stride: ident; - $id: ident; - // $multi_uniq_id: ident; - $guard: ident; - $back_off: ident; - ), - *) => { - $( - SearchLevel { - f: &| - search_level: &SearchLevel, - nodes, - new_node: TreeBitMapNode, - multi_uniq_id: u32, - mut level: u8, - retry_count: u32| { - let this_level = >::len_to_store_bits($id.get_id().1, level); - trace!("{:032b}", $id.get_id().0); - trace!("id {:?}", $id.get_id()); - trace!("multi_uniq_id {}", multi_uniq_id); - - // HASHING FUNCTION - let index = Self::hash_node_id($id, level); - - match nodes.0.get(index) { - None => { - // No node exists, so we create one here. - let next_level = >::len_to_store_bits($id.get_id().1, level + 1); - - if log_enabled!(log::Level::Trace) { - trace!("Empty node found, creating new node {} len{} lvl{}", - $id, $id.get_id().1, level + 1 - ); - trace!("Next level {}", - next_level - ); - trace!("Creating space for {} nodes", - if next_level >= this_level { 1 << (next_level - this_level) } else { 1 } - ); - } - - trace!("multi uniq id {}", multi_uniq_id); - - let node_set = NodeSet::init(next_level - this_level); - - let ptrbitarr = new_node.ptrbitarr.load(); - let pfxbitarr = new_node.pfxbitarr.load(); - - let (stored_node, its_us) = nodes.0.get_or_init( - index, - || StoredNode { - node_id: $id, - node: new_node, - node_set - } - ); - - if stored_node.node_id == $id { - stored_node.node_set.update_rbm_index( - multi_uniq_id - )?; - - if !its_us && ptrbitarr != 0 { - stored_node.node.ptrbitarr.merge_with(ptrbitarr); - } - - if !its_us && pfxbitarr != 0 { - stored_node.node.pfxbitarr.merge_with(pfxbitarr); - } - } - - return Ok(($id, retry_count)); - } - Some(stored_node) => { - // A node exists, might be ours, might be - // another one. - - if log_enabled!(log::Level::Trace) { - trace!(" - {} store: Node here exists {:?}", - std::thread::current().name().unwrap_or("unnamed-thread"), - stored_node.node_id - ); - trace!("node_id {:?}", stored_node.node_id.get_id()); - trace!("node_id {:032b}", stored_node.node_id.get_id().0); - trace!("id {}", $id); - trace!(" id {:032b}", $id.get_id().0); - } - - // See if somebody beat us to creating our - // node already, if so, we still need to do - // work: we have to update the bitmap index - // with the multi_uniq_id we've got from the - // caller. - if $id == stored_node.node_id { - stored_node.node_set.update_rbm_index( - multi_uniq_id - )?; - - if new_node.ptrbitarr.load() != 0 { - stored_node.node.ptrbitarr.merge_with(new_node.ptrbitarr.load()); - } - if new_node.pfxbitarr.load() != 0 { - stored_node.node.pfxbitarr.merge_with(new_node.pfxbitarr.load()); - } - - return Ok(($id, retry_count)); - } else { - // it's not "our" node, make a (recursive) - // call to create it. - level += 1; - trace!("Collision with node_id {}, move to next level: {} len{} next_lvl{} index {}", - stored_node.node_id, $id, $id.get_id().1, level, index - ); - - return match >::len_to_store_bits($id.get_id().1, level) { - // on to the next level! - next_bit_shift if next_bit_shift > 0 => { - (search_level.f)( - search_level, - &stored_node.node_set, - new_node, - multi_uniq_id, - level, - retry_count - ) - } - // There's no next level! - _ => panic!("out of storage levels, current level is {}", level), - } - } - } - } - } - } - )* - }; -} diff --git a/src/local_array/store/mod.rs b/src/local_array/store/mod.rs deleted file mode 100644 index c115a5ba..00000000 --- a/src/local_array/store/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -pub mod custom_alloc; -pub mod errors; -pub mod iterators; - -pub(crate) mod atomic_types; -pub(crate) mod default_store; -pub(crate) mod oncebox; - -pub use default_store::DefaultStore; -#[macro_use] -mod macros; diff --git a/src/local_array/tests.rs b/src/local_array/tests.rs deleted file mode 100644 index 58d0d86a..00000000 --- a/src/local_array/tests.rs +++ /dev/null @@ -1,58 +0,0 @@ -#[cfg(test)] -use std::error::Error; - -//------------ AddressFamily bit flippers ----------------------------------- -#[test] -fn test_af_1() -> Result<(), Box> { - use crate::local_array::tree::StrideNodeId; - use crate::AddressFamily; - use crate::IPv4; - - let bit_addr: IPv4 = 0b1111_1111_1111_1111_1111_1111_1111_1111; - let base_prefix = - StrideNodeId::dangerously_new_with_id_as_is(bit_addr, 32); - - assert_eq!(base_prefix.get_id().0, bit_addr); - assert_eq!( - base_prefix.truncate_to_len().get_id().0, - base_prefix.get_id().0 - ); - assert_eq!( - StrideNodeId::dangerously_new_with_id_as_is( - base_prefix.get_id().0.truncate_to_len(28), - 28 - ) - .add_nibble(0b0101, 4) - .get_id() - .0, - 0b1111_1111_1111_1111_1111_1111_1111_0101 - ); - - Ok(()) -} - -#[test] -fn test_af_2() -> Result<(), Box> { - use crate::local_array::tree::StrideNodeId; - use crate::IPv4; - - let bit_addr: IPv4 = 0b1111_1111_1111_1111_1111_1111_1111_1111; - let nu_prefix = StrideNodeId::dangerously_new_with_id_as_is(bit_addr, 8); - - assert_eq!(nu_prefix.get_id().0, bit_addr); - assert_eq!( - nu_prefix.truncate_to_len().get_id().0, - 0b1111_1111_0000_0000_0000_0000_0000_0000 - ); - - assert_eq!( - nu_prefix.add_nibble(0b1010, 4).get_id().0, - 0b1111_1111_1010_0000_0000_0000_0000_0000 - ); - assert_eq!( - nu_prefix.truncate_to_len().add_nibble(0b1010, 4).get_id().0, - 0b1111_1111_1010_0000_0000_0000_0000_0000 - ); - - Ok(()) -} diff --git a/src/local_array/tree.rs b/src/local_array/tree.rs deleted file mode 100644 index a6d68c5e..00000000 --- a/src/local_array/tree.rs +++ /dev/null @@ -1,757 +0,0 @@ -use crate::prefix_record::{Meta, PublicRecord}; -use crossbeam_epoch::{self as epoch}; -use log::{error, log_enabled, trace}; - -use std::hash::Hash; -use std::sync::atomic::{ - AtomicU16, AtomicU32, AtomicU64, AtomicU8, AtomicUsize, Ordering, -}; -use std::{fmt::Debug, marker::PhantomData}; - -use crate::af::AddressFamily; -use crate::custom_alloc::{CustomAllocStorage, UpsertReport}; -use crate::insert_match; -use crate::local_array::store::atomic_types::{NodeBuckets, PrefixBuckets}; - -pub(crate) use super::atomic_stride::*; -use super::store::errors::PrefixStoreError; - -pub(crate) use crate::local_array::node::TreeBitMapNode; - -#[cfg(feature = "cli")] -use ansi_term::Colour; - -//------------------- Sized Node Enums ------------------------------------ - -// No, no, NO, NO, no, no! We're not going to Box this, because that's slow! -// This enum is never used to store nodes/prefixes, it's only to be used in -// generic code. -#[allow(clippy::large_enum_variant)] -#[derive(Debug)] -pub enum SizedStrideNode { - Stride3(TreeBitMapNode), - Stride4(TreeBitMapNode), - Stride5(TreeBitMapNode), -} - -impl Default for TreeBitMapNode -where - AF: AddressFamily, - S: Stride, -{ - fn default() -> Self { - Self { - ptrbitarr: <::AtomicPtrSize as AtomicBitmap>::new(), - pfxbitarr: <::AtomicPfxSize as AtomicBitmap>::new(), - _af: PhantomData, - } - } -} - -impl Default for SizedStrideNode -where - AF: AddressFamily, -{ - fn default() -> Self { - SizedStrideNode::Stride3(TreeBitMapNode { - ptrbitarr: AtomicStride2(AtomicU8::new(0)), - pfxbitarr: AtomicStride3(AtomicU16::new(0)), - _af: PhantomData, - }) - } -} - -// Used to create a public iterator over all nodes. -#[derive(Debug)] -pub enum SizedStrideRef<'a, AF: AddressFamily> { - Stride3(&'a TreeBitMapNode), - Stride4(&'a TreeBitMapNode), - Stride5(&'a TreeBitMapNode), -} - -pub(crate) enum NewNodeOrIndex { - NewNode(SizedStrideNode), - ExistingNode(StrideNodeId), - NewPrefix, - ExistingPrefix, -} - -#[derive(Hash, Eq, PartialEq, Debug, Copy, Clone)] -pub struct PrefixId(Option<(AF, u8)>); - -impl PrefixId { - pub fn new(net: AF, len: u8) -> Self { - PrefixId(Some((net, len))) - } - - pub fn is_empty(&self) -> bool { - self.0.is_none() - } - - pub fn get_net(&self) -> AF { - self.0.unwrap().0 - } - - pub fn get_len(&self) -> u8 { - self.0.unwrap().1 - } - - // This should never fail, since there shouldn't be a invalid prefix in - // this prefix id in the first place. - pub fn into_pub(&self) -> inetnum::addr::Prefix { - inetnum::addr::Prefix::new( - self.get_net().into_ipaddr(), - self.get_len(), - ) - .unwrap_or_else(|p| panic!("can't convert {:?} into prefix.", p)) - } - - // Increment the length of the prefix without changing the bits part. - // This is used to iterate over more-specific prefixes for this prefix, - // since the more specifics iterator includes the requested `base_prefix` - // itself. - pub fn inc_len(self) -> Self { - Self(self.0.map(|(net, len)| (net, len + 1))) - } -} - -impl std::default::Default for PrefixId { - fn default() -> Self { - PrefixId(None) - } -} - -impl From for PrefixId { - fn from(value: inetnum::addr::Prefix) -> Self { - Self(Some((AF::from_ipaddr(value.addr()), value.len()))) - } -} - -//--------------------- Per-Stride-Node-Id Type ----------------------------- - -#[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub struct StrideNodeId(Option<(AF, u8)>); - -impl StrideNodeId { - pub fn empty() -> Self { - Self(None) - } - - pub fn dangerously_new_with_id_as_is(addr_bits: AF, len: u8) -> Self { - Self(Some((addr_bits, len))) - } - - #[inline] - pub fn new_with_cleaned_id(addr_bits: AF, len: u8) -> Self { - Self(Some((addr_bits.truncate_to_len(len), len))) - } - - pub fn is_empty(&self) -> bool { - self.0.is_none() - } - - pub fn get_id(&self) -> (AF, u8) { - self.0.unwrap() - } - pub fn get_len(&self) -> u8 { - self.0.unwrap().1 - } - pub fn set_len(mut self, len: u8) -> Self { - self.0.as_mut().unwrap().1 = len; - self - } - - pub fn add_to_len(mut self, len: u8) -> Self { - self.0.as_mut().unwrap().1 += len; - self - } - - #[inline] - pub fn truncate_to_len(self) -> Self { - let (addr_bits, len) = self.0.unwrap(); - StrideNodeId::new_with_cleaned_id(addr_bits, len) - } - - // clean out all bits that are set beyond the len. This function should - // be used before doing any ORing to add a nibble. - #[inline] - pub fn unwrap_with_cleaned_id(&self) -> (AF, u8) { - let (addr_bits, len) = self.0.unwrap(); - (addr_bits.truncate_to_len(len), len) - } - - pub fn add_nibble(&self, nibble: u32, nibble_len: u8) -> Self { - let (addr_bits, len) = self.unwrap_with_cleaned_id(); - let res = addr_bits.add_nibble(len, nibble, nibble_len); - Self(Some(res)) - } - - pub fn into_inner(self) -> Option<(AF, u8)> { - self.0 - } -} - -impl std::fmt::Display for StrideNodeId { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}", - self.0 - .map(|x| format!("{}-{}", x.0, x.1)) - .unwrap_or_else(|| "-".to_string()) - ) - } -} - -impl std::convert::From> - for PrefixId -{ - fn from(id: StrideNodeId) -> Self { - let (addr_bits, len) = id.0.unwrap(); - PrefixId::new(addr_bits, len) - } -} -#[derive(Debug)] -pub struct AtomicStrideNodeId { - stride_type: StrideType, - index: AtomicU32, - serial: AtomicUsize, - _af: PhantomData, -} - -impl AtomicStrideNodeId { - pub fn new(stride_type: StrideType, index: u32) -> Self { - Self { - stride_type, - index: AtomicU32::new(index), - serial: AtomicUsize::new(1), - _af: PhantomData, - } - } - - pub fn empty() -> Self { - Self { - stride_type: StrideType::Stride4, - index: AtomicU32::new(0), - serial: AtomicUsize::new(0), - _af: PhantomData, - } - } - - // get_serial() and update_serial() are intimately linked in the - // critical section of updating a node. - // - // The layout of the critical section is as follows: - // 1. get_serial() to retrieve the serial number of the node - // 2. do work in the critical section - // 3. store work result in the node - // 4. update_serial() to update the serial number of the node if - // and only if the serial is the same as the one retrieved in step 1. - // 5. check the result of update_serial(). When successful, we're done, - // otherwise, rollback the work result & repeat from step 1. - pub fn get_serial(&self) -> usize { - let serial = self.serial.load(Ordering::SeqCst); - std::sync::atomic::fence(Ordering::SeqCst); - serial - } - - pub fn update_serial( - &self, - current_serial: usize, - ) -> Result { - std::sync::atomic::fence(Ordering::Release); - self.serial.compare_exchange( - current_serial, - current_serial + 1, - Ordering::SeqCst, - Ordering::SeqCst, - ) - } - - // The idea is that we can only set the index once. An uninitialized - // index has a value of 0, so if we encounter a non-zero value that - // means somebody else already set it. We'll return an Err(index) with - // the index that was set. - pub fn set_id(&self, index: u32) -> Result { - self.index.compare_exchange( - 0, - index, - Ordering::SeqCst, - Ordering::SeqCst, - ) - } - - pub fn is_empty(&self) -> bool { - self.serial.load(Ordering::SeqCst) == 0 - } - - pub fn into_inner(self) -> (StrideType, Option) { - match self.serial.load(Ordering::SeqCst) { - 0 => (self.stride_type, None), - _ => (self.stride_type, Some(self.index.load(Ordering::SeqCst))), - } - } - - pub fn from_stridenodeid( - stride_type: StrideType, - id: StrideNodeId, - ) -> Self { - let index: AF = id.0.map_or(AF::zero(), |i| i.0); - Self { - stride_type, - index: AtomicU32::new(index.dangerously_truncate_to_u32()), - serial: AtomicUsize::new(usize::from(index != AF::zero())), - _af: PhantomData, - } - } -} - -impl std::convert::From> for usize { - fn from(id: AtomicStrideNodeId) -> Self { - id.index.load(Ordering::SeqCst) as usize - } -} - -//------------------------- Node Collections -------------------------------- - -// pub trait NodeCollection { -// fn insert(&mut self, index: u16, insert_node: StrideNodeId); -// fn to_vec(&self) -> Vec>; -// fn as_slice(&self) -> &[AtomicStrideNodeId]; -// fn empty() -> Self; -// } - -#[derive(Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Copy, Clone)] -pub enum StrideType { - Stride3, - Stride4, - Stride5, -} - -impl From for StrideType { - fn from(level: u8) -> Self { - match level { - 3 => StrideType::Stride3, - 4 => StrideType::Stride4, - 5 => StrideType::Stride5, - _ => panic!("Invalid stride level {}", level), - } - } -} - -impl std::fmt::Display for StrideType { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - StrideType::Stride3 => write!(f, "S3"), - StrideType::Stride4 => write!(f, "S4"), - StrideType::Stride5 => write!(f, "S5"), - } - } -} - -//--------------------- TreeBitMap ------------------------------------------ - -pub struct TreeBitMap< - AF: AddressFamily, - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets, -> { - pub store: CustomAllocStorage, -} - -impl< - 'a, - AF: AddressFamily, - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets, - > TreeBitMap -{ - pub fn new( - ) -> Result, Box> { - let root_node = - match CustomAllocStorage::::get_first_stride_size() - { - 3 => SizedStrideNode::Stride3(TreeBitMapNode { - ptrbitarr: AtomicStride2(AtomicU8::new(0)), - pfxbitarr: AtomicStride3(AtomicU16::new(0)), - _af: PhantomData, - }), - 4 => SizedStrideNode::Stride4(TreeBitMapNode { - ptrbitarr: AtomicStride3(AtomicU16::new(0)), - pfxbitarr: AtomicStride4(AtomicU32::new(0)), - _af: PhantomData, - }), - 5 => SizedStrideNode::Stride5(TreeBitMapNode { - ptrbitarr: AtomicStride4(AtomicU32::new(0)), - pfxbitarr: AtomicStride5(AtomicU64::new(0)), - _af: PhantomData, - }), - unknown_stride_size => { - panic!( - "unknown stride size {} encountered in STRIDES array", - unknown_stride_size - ); - } - }; - - Ok(TreeBitMap { - store: CustomAllocStorage::::init(root_node)?, - }) - } - - // Partition for stride 4 - // - // ptr bits never happen in the first half of the bitmap for the stride-size. Consequently the ptrbitarr can be an integer type - // half the size of the pfxbitarr. - // - // ptr bit arr (u16) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 x - // pfx bit arr (u32) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - // nibble * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111 x - // nibble len offset 0 1 2 3 4 - // - // stride 3: 1 + 2 + 4 + 8 = 15 bits. 2^4 - 1 (1 << 4) - 1. ptrbitarr starts at pos 7 (1 << 3) - 1 - // stride 4: 1 + 2 + 4 + 8 + 16 = 31 bits. 2^5 - 1 (1 << 5) - 1. ptrbitarr starts at pos 15 (1 << 4) - 1 - // stride 5: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 63 bits. 2^6 - 1 - // stride 6: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 bits. 2^7 - 1 - // stride 7: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 128 = 256 bits. 2^8 - 1126 - // stride 8: 1 + 2 + 4 + 8 + 16 + 32 + 64 + 128 + 256 = 511 bits. 2^9 - 1 - // - // Ex.: - // pfx 65.0.0.252/30 0100_0001_0000_0000_0000_0000_1111_1100 - // - // nibble 1 (pfx << 0) >> 28 0000_0000_0000_0000_0000_0000_0000_0100 - // bit_pos (1 << nibble length) - 1 + nibble 0000_0000_0000_0000_0000_1000_0000_0000 - // - // nibble 2 (pfx << 4) >> 24 0000_0000_0000_0000_0000_0000_0000_0001 - // bit_pos (1 << nibble length) - 1 + nibble 0000_0000_0000_0000_1000_0000_0000_0000 - // ... - // nibble 8 (pfx << 28) >> 0 0000_0000_0000_0000_0000_0000_0000_1100 - // bit_pos (1 << nibble length) - 1 + nibble = (1 << 2) - 1 + 2 = 5 0000_0010_0000_0000_0000_0000_0000_0000 - // 5 - 5 - 5 - 4 - 4 - [4] - 5 - // startpos (2 ^ nibble length) - 1 + nibble as usize - - pub fn insert( - &self, - pfx: PrefixId, - record: PublicRecord, - update_path_selections: Option, - // user_data: Option<&::UserDataIn>, - ) -> Result { - let guard = &epoch::pin(); - // let record = MultiMapValue::new(meta, ltime, status); - - if pfx.get_len() == 0 { - let res = self.update_default_route_prefix_meta(record, guard)?; - return Ok(res); - } - - let mut stride_end: u8 = 0; - let mut cur_i = self.store.get_root_node_id(); - let mut level: u8 = 0; - let mut acc_retry_count = 0; - - loop { - let stride = self.store.get_stride_sizes()[level as usize]; - stride_end += stride; - let nibble_len = if pfx.get_len() < stride_end { - stride + pfx.get_len() - stride_end - } else { - stride - }; - - let nibble = AF::get_nibble( - pfx.get_net(), - stride_end - stride, - nibble_len, - ); - let is_last_stride = pfx.get_len() <= stride_end; - let stride_start = stride_end - stride; - // let back_off = crossbeam_utils::Backoff::new(); - - // insert_match! returns the node_id of the next node to be - // traversed. It was created if it did not exist. - let node_result = insert_match![ - // applicable to the whole outer match in the macro - self; - guard; - nibble_len; - nibble; - is_last_stride; - pfx; - record; - update_path_selections; // perform an update for the paths in this record - stride_start; // the length at the start of the stride a.k.a. start_bit - stride; - cur_i; - level; - acc_retry_count; - // Strides to create match arm for; stats level - Stride3; 0, - Stride4; 1, - Stride5; 2 - ]; - - match node_result { - Ok((next_id, retry_count)) => { - cur_i = next_id; - level += 1; - acc_retry_count += retry_count; - } - Err(err) => { - if log_enabled!(log::Level::Error) { - error!("{} failing to store (intermediate) node {}. Giving up this node. This shouldn't happen!", - std::thread::current().name().unwrap_or("unnamed-thread"), - cur_i, - ); - error!( - "{} {}", - std::thread::current().name().unwrap_or("unnamed-thread"), - err - ); - } - } - } - } - } - - pub(crate) fn get_root_node_id(&self) -> StrideNodeId { - self.store.get_root_node_id() - } - - // Yes, we're hating this. But, the root node has no room for a serial of - // the prefix 0/0 (the default route), which doesn't even matter, unless, - // UNLESS, somebody wants to store a default route. So we have to store a - // serial for this prefix. The normal place for a serial of any prefix is - // on the pfxvec of its paren. But, hey, guess what, the - // default-route-prefix lives *on* the root node, and, you know, the root - // node doesn't have a parent. We can: - // - Create a type RootTreeBitmapNode with a ptrbitarr with a size one - // bigger than a "normal" TreeBitMapNod for the first stride size. no we - // have to iterate over the root-node type in all matches on - // stride_size, just because we have exactly one instance of the - // RootTreeBitmapNode. So no. - // - Make the `get_pfx_index` method on the implementations of the - // `Stride` trait check for a length of zero and branch if it is and - // return the serial of the root node. Now each and every call to this - // method will have to check a condition for exactly one instance of - // RootTreeBitmapNode. So again, no. - // - The root node only gets used at the beginning of a search query or an - // insert. So if we provide two specialised methods that will now how to - // search for the default-route prefix and now how to set serial for - // that prefix and make sure we start searching/inserting with one of - // those specialized methods we're good to go. - fn update_default_route_prefix_meta( - &self, - record: PublicRecord, - guard: &epoch::Guard, - // user_data: Option<&::UserDataIn>, - ) -> Result { - trace!("Updating the default route..."); - - if let Some(root_node) = self.store.retrieve_node_mut( - self.store.get_root_node_id(), - record.multi_uniq_id, - // guard, - ) { - match root_node { - SizedStrideRef::Stride3(_) => { - self.store - .buckets - .get_store3(self.store.get_root_node_id()) - .update_rbm_index(record.multi_uniq_id)?; - } - SizedStrideRef::Stride4(_) => { - self.store - .buckets - .get_store4(self.store.get_root_node_id()) - .update_rbm_index(record.multi_uniq_id)?; - } - SizedStrideRef::Stride5(_) => { - self.store - .buckets - .get_store5(self.store.get_root_node_id()) - .update_rbm_index(record.multi_uniq_id)?; - } - }; - }; - - self.store.upsert_prefix( - PrefixId::new(AF::zero(), 0), - record, - // Do not update the path selection for the default route. - None, - guard, - // user_data, - ) - } - - // This function assembles all entries in the `pfx_vec` of all child nodes - // of the `start_node` into one vec, starting from itself and then - // recursively assembling adding all `pfx_vec`s of its children. - fn get_all_more_specifics_for_node( - &self, - start_node_id: StrideNodeId, - found_pfx_vec: &mut Vec>, - ) { - trace!("{:?}", self.store.retrieve_node(start_node_id)); - match self.store.retrieve_node(start_node_id) { - Some(SizedStrideRef::Stride3(n)) => { - found_pfx_vec.extend( - n.pfx_iter(start_node_id).collect::>>(), - ); - - for child_node in n.ptr_iter(start_node_id) { - self.get_all_more_specifics_for_node( - child_node, - found_pfx_vec, - ); - } - } - Some(SizedStrideRef::Stride4(n)) => { - found_pfx_vec.extend( - n.pfx_iter(start_node_id).collect::>>(), - ); - - for child_node in n.ptr_iter(start_node_id) { - self.get_all_more_specifics_for_node( - child_node, - found_pfx_vec, - ); - } - } - Some(SizedStrideRef::Stride5(n)) => { - found_pfx_vec.extend( - n.pfx_iter(start_node_id).collect::>>(), - ); - - for child_node in n.ptr_iter(start_node_id) { - self.get_all_more_specifics_for_node( - child_node, - found_pfx_vec, - ); - } - } - _ => { - panic!("can't find node {}", start_node_id); - } - } - } - - // This function assembles the prefixes of a child node starting on a - // specified bit position in a ptr_vec of `current_node` into a vec, - // then adds all prefixes of these children recursively into a vec and - // returns that. - pub(crate) fn get_all_more_specifics_from_nibble( - &'a self, - current_node: &TreeBitMapNode, - nibble: u32, - nibble_len: u8, - base_prefix: StrideNodeId, - ) -> Option>> { - let (cnvec, mut msvec) = current_node.add_more_specifics_at( - nibble, - nibble_len, - base_prefix, - ); - - for child_node in cnvec.iter() { - self.get_all_more_specifics_for_node(*child_node, &mut msvec); - } - Some(msvec) - } -} - -impl< - AF: AddressFamily, - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets, - > Default for TreeBitMap -{ - fn default() -> Self { - Self::new().unwrap() - } -} - -// This implements the funky stats for a tree -#[cfg(feature = "cli")] -impl< - AF: AddressFamily, - M: Meta, - NB: NodeBuckets, - PB: PrefixBuckets, - > std::fmt::Display for TreeBitMap -{ - fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(_f, "{} prefixes created", self.store.get_prefixes_count())?; - writeln!(_f, "{} nodes created", self.store.get_nodes_count())?; - writeln!(_f)?; - - writeln!( - _f, - "stride division {:?}", - self.store - .get_stride_sizes() - .iter() - .map_while(|s| if s > &0 { Some(*s) } else { None }) - .collect::>() - )?; - - writeln!( - _f, - "level\t[{}] prefixes-occupied/max-prefixes percentage_occupied", - Colour::Green.paint("prefixes") - )?; - - let bars = ["▏", "▎", "▍", "▌", "▋", "▊", "▉"]; - const SCALE: u32 = 5500; - - trace!( - "stride_sizes {:?}", - self.store - .get_stride_sizes() - .iter() - .map_while(|s| if s > &0 { Some(*s) } else { None }) - .enumerate() - .collect::>() - ); - - for crate::stats::CreatedNodes { - depth_level: len, - count: prefix_count, - } in self.store.counters.get_prefix_stats() - { - let max_pfx = u128::overflowing_pow(2, len as u32); - let n = (prefix_count as u32 / SCALE) as usize; - - write!(_f, "/{}\t", len)?; - - for _ in 0..n { - write!(_f, "{}", Colour::Green.paint("█"))?; - } - - write!( - _f, - "{}", - Colour::Green.paint( - bars[((prefix_count as u32 % SCALE) / (SCALE / 7)) - as usize] - ) // = scale / 7 - )?; - - write!( - _f, - " {}/{} {:.2}%", - prefix_count, - max_pfx.0, - (prefix_count as f64 / max_pfx.0 as f64) * 100.0 - )?; - - writeln!(_f)?; - } - - Ok(()) - } -} diff --git a/src/local_vec/macros.rs b/src/local_vec/macros.rs deleted file mode 100644 index 2351f4fa..00000000 --- a/src/local_vec/macros.rs +++ /dev/null @@ -1,159 +0,0 @@ -#[macro_export] - -// This macro expands into a match node {} -// with match arms for all SizedStrideNode::Stride[3-8] -// for use in insert() - -#[doc(hidden)] -macro_rules! match_node_for_strides_with_local_vec { - ( - $self: ident; - // $user_data: ident; - $nibble_len: expr; - $nibble: expr; - $is_last_stride: expr; - $pfx: ident; - $cur_i: expr; - $level: expr; - // $enum: ident; - // The strides to generate match arms for, - // $variant is the name of the enum varian (Stride[3..8]) and - // $len is the index of the stats level, so 0..5 - $( $variant: ident; $stats_level: expr ), * - ) => { - match std::mem::take($self.retrieve_node_mut($cur_i)?) { - $( SizedStrideNode::$variant(mut current_node) => - match current_node.eval_node_or_prefix_at( - $nibble, - $nibble_len, - $self.strides.get(($level + 1) as usize), - $is_last_stride, - ) { - NewNodeOrIndex::NewNode(n, bit_id) => { - $self.stats[$stats_level].inc($level); // Stride3 logs to stats[0], Stride4 logs to stats[1], etc. - // let new_id = Store::NodeType::new(&bit_id,&$cur_i.get_part()); - let new_id = $self.store.acquire_new_node_id(bit_id, $cur_i.get_part()); - current_node.ptr_vec.push(new_id); - current_node.ptr_vec.sort(); - let i = $self.store_node(Some(new_id), n).unwrap(); - - $self.store.update_node($cur_i,SizedStrideNode::$variant(current_node)); - - // let _default_val = std::mem::replace( - // $self.retrieve_node_mut($cur_i).unwrap(), - // SizedStrideNode::$variant(current_node)); - Some(i) - } - NewNodeOrIndex::ExistingNode(i) => { - $self.store.update_node($cur_i,SizedStrideNode::$variant(current_node)); - - // let _default_val = std::mem::replace( - // $self.retrieve_node_mut($cur_i).unwrap(), - // SizedStrideNode::$variant(current_node)); - Some(i) - }, - NewNodeOrIndex::NewPrefix => { - - // let pfx_len = $pfx.len.clone(); - // let pfx_net = $pfx.net.clone(); - // let i = $self.store_prefix($pfx)?; - // Construct the SortKey by default from the nibble and - // nibble_len, so that nibble_len determines the base - // position (2^nibble_len) and then nibble is the offset - // from the base position. - let new_id = $self.store.acquire_new_prefix_id(&((1 << $nibble_len) + $nibble as u16).into(), &$pfx); - $self.stats[$stats_level].inc_prefix_count($level); - - current_node - .pfx_vec - .push(new_id); - current_node.pfx_vec.sort(); - - $self.store_prefix($pfx)?; - $self.store.update_node($cur_i,SizedStrideNode::$variant(current_node)); - - // let _default_val = std::mem::replace( - // $self.retrieve_node_mut($cur_i).unwrap(), - // SizedStrideNode::$variant(current_node), - // ); - return Ok(()); - } - NewNodeOrIndex::ExistingPrefix(pfx_idx) => { - // ExistingPrefix is guaranteed to only happen at the last stride, - // so we can return from here. - // If we don't then we cannot move pfx.meta into the update_prefix_meta function, - // since the compiler can't figure out that it will happen only once. - $self.update_prefix_meta(pfx_idx, $pfx.meta)?; - $self.store.update_node($cur_i,SizedStrideNode::$variant(current_node)); - - // let _default_val = std::mem::replace( - // $self.retrieve_node_mut($cur_i).unwrap(), - // // expands into SizedStrideNode::Stride[3-8](current_node) - // SizedStrideNode::$variant(current_node), - // ); - return Ok(()); - } - } )*, - } - }; -} - -// example expansion for Stride4: - -// SizedStrideNode::Stride4(mut current_node) => match current_node -// .eval_node_or_prefix_at( -// nibble, -// nibble_len, -// // No, next_stride.is_none does *not* mean that it's the last stride -// // There may very well be a Some(next_stride), next_stride goes all the -// // way to the end of the length of the network address space (like 32 bits for IPv4 etc), -// // whereas the last stride stops at the end of the prefix length. -// // `is_last_stride` is an indicator for the upsert function to write the prefix in the -// // node's vec. -// next_stride, -// pfx_len <= stride_end, -// ) { -// NewNodeOrIndex::NewNode(n, bit_id) => { -// self.stats[1].inc(level); // [1] here corresponds to stats for Stride4 -// let i = self.store_node(n); -// current_node.ptr_vec.push(NodeId::new(bit_id, i)); -// current_node.ptr_vec.sort(); -// let _default_val = std::mem::replace( -// self.retrieve_node_mut(cur_i).unwrap(), -// SizedStrideNode::Stride4(current_node), -// ); -// Some(i) -// } -// NewNodeOrIndex::ExistingNode(i) => { -// let _default_val = std::mem::replace( -// self.retrieve_node_mut(cur_i).unwrap(), -// SizedStrideNode::Stride4(current_node), -// ); -// Some(i) -// } -// NewNodeOrIndex::NewPrefix => { -// let i = self.store_prefix(pfx); -// self.stats[1].inc_prefix_count(level); -// current_node -// .pfx_vec -// .push(((pfx_net >> (AF::BITS - pfx_len) as usize), i)); -// current_node.pfx_vec.sort(); -// let _default_val = std::mem::replace( -// self.retrieve_node_mut(cur_i).unwrap(), -// SizedStrideNode::Stride4(current_node), -// ); -// return Ok(()); -// } -// NewNodeOrIndex::ExistingPrefix(pfx_idx) => { -// // ExitingPrefix is guaranteed to only happen at the last stride, -// // so we can return from here. -// // If we don't then we cannot move pfx.meta into the update_prefix_meta function, -// // since the compiler can't figure out that it will happen only once. -// self.update_prefix_meta(pfx_idx, pfx.meta)?; -// let _default_val = std::mem::replace( -// self.retrieve_node_mut(cur_i).unwrap(), -// SizedStrideNode::Stride4(current_node), -// ); -// return Ok(()); -// } -// }, diff --git a/src/local_vec/mod.rs b/src/local_vec/mod.rs deleted file mode 100644 index a319a7aa..00000000 --- a/src/local_vec/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -pub mod tree; -pub mod query; -pub mod node; -pub mod storage_backend; -pub mod store; - -pub(crate) use tree::TreeBitMap; - -#[macro_use] -mod macros; - -mod tests; \ No newline at end of file diff --git a/src/local_vec/node.rs b/src/local_vec/node.rs deleted file mode 100644 index 96a2323f..00000000 --- a/src/local_vec/node.rs +++ /dev/null @@ -1,490 +0,0 @@ -use std::{ - fmt::{Binary, Debug}, - marker::PhantomData, -}; - -use crate::af::Zero; -use crate::node_id::SortableNodeId; -pub use crate::stride::*; -use crate::synth_int::{U256, U512}; - -use crate::local_vec::tree::{NewNodeOrIndex, SizedStrideNode}; - -use crate::af::AddressFamily; - -use super::query::PrefixId; - -pub struct TreeBitMapNode -where - S: Stride, - ::PtrSize: Debug + Binary + Copy, - AF: AddressFamily, - NodeId: SortableNodeId + Copy, -{ - pub ptrbitarr: ::PtrSize, - pub pfxbitarr: S, - // The vec of prefixes hosted by this node, - // referenced by (bit_id, global prefix index) - // This is the exact same type as for the NodeIds, - // so we reuse that. - pub pfx_vec: Vec, - // The vec of child nodes hosted by this - // node, referenced by (ptrbitarr_index, global vec index) - // We need the u16 (ptrbitarr_index) to sort the - // vec that's stored in the node. - pub ptr_vec: Vec, - pub _af: PhantomData, -} - -impl Debug for TreeBitMapNode -where - AF: AddressFamily, - S: Stride, - ::PtrSize: Debug + Binary + Copy, - NodeId: SortableNodeId + Copy, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("TreeBitMapNode") - .field("ptrbitarr", &self.ptrbitarr) - .field("pfxbitarr", &self.pfxbitarr) - .field("ptr_vec", &self.ptr_vec) - .field("pfx_vec", &self.pfx_vec) - .finish() - } -} - -impl TreeBitMapNode -where - AF: AddressFamily, - S: Stride - + std::ops::BitAnd - + std::ops::BitOr - + Zero, - ::PtrSize: Debug - + Binary - + Copy - + std::ops::BitAnd - + PartialOrd - + Zero, - NodeId: SortableNodeId + Copy, -{ - // Inspects the stride (nibble, nibble_len) to see it there's - // already a child node (if not at the last stride) or a prefix - // (if it's the last stride). - // - // Returns one of: - // - A newly created child node. - // - The index of the existing child node in the global `nodes` vec - // - A newly created Prefix - // - The index of the existing prefix in the global `prefixes` vec - pub(crate) fn eval_node_or_prefix_at( - &mut self, - nibble: u32, - nibble_len: u8, - next_stride: Option<&u8>, - is_last_stride: bool, - ) -> NewNodeOrIndex { - let bit_pos = S::get_bit_pos(nibble, nibble_len); - let new_node: SizedStrideNode; - - // Check that we're not at the last stride (pfx.len <= stride_end), - // Note that next_stride may have a value, but we still don't want to - // continue, because we've exceeded the length of the prefix to - // be inserted. - // Also note that a nibble_len < S::BITS (a smaller than full nibble) - // does indeed indicate the last stride has been reached, but the - // reverse is *not* true, i.e. a full nibble can also be the last - // stride. Hence the `is_last_stride` argument - if !is_last_stride { - // We are not at the last stride - // Check it the ptr bit is already set in this position - if (S::into_stride_size(self.ptrbitarr) & bit_pos) == S::zero() { - // Nope, set it and create a child node - self.ptrbitarr = S::into_ptrbitarr_size( - bit_pos | S::into_stride_size(self.ptrbitarr), - ); - - match next_stride.unwrap() { - 3_u8 => { - new_node = SizedStrideNode::Stride3(TreeBitMapNode { - ptrbitarr: ::PtrSize::zero(), - pfxbitarr: Stride3::zero(), - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }); - } - 4_u8 => { - new_node = SizedStrideNode::Stride4(TreeBitMapNode { - ptrbitarr: ::PtrSize::zero(), - pfxbitarr: Stride4::zero(), - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }); - } - 5_u8 => { - new_node = SizedStrideNode::Stride5(TreeBitMapNode { - ptrbitarr: ::PtrSize::zero(), - pfxbitarr: Stride5::zero(), - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }); - } - 6_u8 => { - new_node = SizedStrideNode::Stride6(TreeBitMapNode { - ptrbitarr: ::PtrSize::zero(), - pfxbitarr: Stride6::zero(), - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }); - } - 7_u8 => { - new_node = SizedStrideNode::Stride7(TreeBitMapNode { - ptrbitarr: 0_u128, - pfxbitarr: U256(0_u128, 0_u128), - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }); - } - 8_u8 => { - new_node = SizedStrideNode::Stride8(TreeBitMapNode { - ptrbitarr: U256(0_u128, 0_u128), - pfxbitarr: U512(0_u128, 0_u128, 0_u128, 0_u128), - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }); - } - _ => { - panic!("can't happen"); - } - }; - - // we can return bit_pos.leading_zeros() since bit_pos is the bitmap that - // points to the current bit in ptrbitarr (it's **not** the prefix of the node!), - // so the number of zeros in front of it should always be unique and describes - // the index of this node in the ptrbitarr. - // ex.: - // In a stride3 (ptrbitarr lenght is 8): - // bit_pos 0001 0000 - // so this is the fourth bit, so points to index = 3 - return NewNodeOrIndex::NewNode( - new_node, - (bit_pos.leading_zeros() as u16).into(), - ); - } - } else { - // only at the last stride do we create the bit in the prefix bitmap, - // and only if it doesn't exist already - if self.pfxbitarr & bit_pos - == ::Output::zero() - { - self.pfxbitarr = bit_pos | self.pfxbitarr; - return NewNodeOrIndex::NewPrefix; - } - return NewNodeOrIndex::ExistingPrefix( - self.pfx_vec - [S::get_pfx_index(self.pfxbitarr, nibble, nibble_len)] - .get_part(), - ); - } - - NewNodeOrIndex::ExistingNode( - self.ptr_vec[S::get_ptr_index(self.ptrbitarr, nibble)], - ) - } - - //-------- Search nibble functions -------------------------------------- - - // This function looks for the longest marching prefix in the provided nibble, - // by iterating over all the bits in it and comparing that with the appriopriate - // bytes from the requested prefix. - // It mutates the `less_specifics_vec` that was passed in to hold all the prefixes - // found along the way. - pub(crate) fn search_stride_for_longest_match_at( - &self, - search_pfx: PrefixId, - mut nibble: u32, - nibble_len: u8, - start_bit: u8, - less_specifics_vec: &mut Option>, - ) -> (Option, Option) { - let mut bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_pfx = None; - - for n_l in 1..(nibble_len + 1) { - // Move the bit in the right position. - nibble = AddressFamily::get_nibble( - search_pfx.get_net(), - start_bit, - n_l, - ); - bit_pos = S::get_bit_pos(nibble, n_l); - - // Check if the prefix has been set, if so select the prefix. This is not - // necessarily the final prefix that will be returned. - - // Check it there's a prefix matching in this bitmap for this nibble - if self.pfxbitarr & bit_pos > S::zero() { - let f_pfx = self.pfx_vec - [S::get_pfx_index(self.pfxbitarr, nibble, n_l)]; - - // Receiving a less_specifics_vec means that the user wants to have - // all the last-specific prefixes returned, so add the found prefix. - if let Some(ls_vec) = less_specifics_vec { - if !(search_pfx.get_len() <= start_bit + nibble_len - || (S::into_stride_size(self.ptrbitarr) - & S::get_bit_pos(nibble, nibble_len)) - == S::zero()) - { - ls_vec.push(f_pfx); - } - } - - found_pfx = Some(f_pfx); - } - } - - // Check if this the last stride, or if they're no more children to go to, - // if so return what we found up until now. - if search_pfx.get_len() <= start_bit + nibble_len - || (S::into_stride_size(self.ptrbitarr) & bit_pos) == S::zero() - // No children or at the end, return the definitive LMP we found. - { - return ( - None, /* no more children */ - found_pfx, /* The definitive LMP if any */ - ); - } - - // There's another child, return it together with the preliminary LMP we found. - ( - Some(self.ptr_vec[S::get_ptr_index(self.ptrbitarr, nibble)]), /* The node that has children the next stride */ - found_pfx, - ) - } - - // This function looks for the exactly matching prefix in the provided nibble. - // It doesn't needd to iterate over anything it just compares the complete nibble, with - // the appropriate bits in the requested prefix. - // Although this is rather efficient, there's no way to collect less-specific prefixes from - // the search prefix. - pub(crate) fn search_stride_for_exact_match_at( - &self, - search_pfx: PrefixId, - nibble: u32, - nibble_len: u8, - start_bit: u8, - _: &mut Option>, - ) -> (Option, Option) { - // This is an exact match, so we're only considering the position of the full nibble. - let bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_pfx = None; - let mut found_child = None; - - // Is this the last nibble? - // Otherwise we're not looking for a prefix (exact matching only lives at last nibble) - match search_pfx.get_len() <= start_bit + nibble_len { - // We're at the last nibble. - true => { - // Check for an actual prefix at the right position, i.e. consider the complete nibble - if self.pfxbitarr & bit_pos > S::zero() { - found_pfx = Some( - self.pfx_vec[S::get_pfx_index( - self.pfxbitarr, - nibble, - nibble_len, - )], - ); - } - } - // We're not at the last nibble. - false => { - // Check for a child node at the right position, i.e. consider the complete nibble. - if (S::into_stride_size(self.ptrbitarr) & bit_pos) > S::zero() - { - found_child = Some( - self.ptr_vec - [S::get_ptr_index(self.ptrbitarr, nibble)], - ); - } - } - } - - ( - found_child, /* The node that has children in the next stride, if any */ - found_pfx, /* The exactly matching prefix, if any */ - ) - } - - // This function looks for the exactly matching prefix in the provided nibble, - // just like the one above, but this *does* iterate over all the bytes in the nibble to collect - // the less-specific prefixes of the the search prefix. - // This is of course slower, so it should only be used when the user explicitly requests less-specifics. - pub(crate) fn search_stride_for_exact_match_with_less_specifics_at( - &self, - search_pfx: PrefixId, - mut nibble: u32, - nibble_len: u8, - start_bit: u8, - less_specifics_vec: &mut Option>, - ) -> (Option, Option) { - let mut bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_pfx = None; - - let ls_vec = less_specifics_vec - .as_mut() - .expect("You shouldn't call this function without a `less_specifics_vec` buffer. Supply one when calling this function or use `search_stride_for_exact_match_at`"); - - for n_l in 1..(nibble_len + 1) { - // Move the bit in the right position. - nibble = AddressFamily::get_nibble( - search_pfx.get_net(), - start_bit, - n_l, - ); - bit_pos = S::get_bit_pos(nibble, n_l); - - // Check if the prefix has been set, if so select the prefix. This is not - // necessarily the final prefix that will be returned. - - // Check it there's a prefix matching in this bitmap for this nibble, - - if self.pfxbitarr & bit_pos > S::zero() { - // since we want an exact match only, we will fill the prefix field only - // if we're exactly at the last bit of the nibble - if n_l == nibble_len { - found_pfx = Some( - self.pfx_vec - [S::get_pfx_index(self.pfxbitarr, nibble, n_l)], - ); - } - - // Receiving a less_specifics_vec means that the user wants to have - // all the last-specific prefixes returned, so add the found prefix. - ls_vec.push( - self.pfx_vec - [S::get_pfx_index(self.pfxbitarr, nibble, n_l)], - ); - } - } - - if found_pfx.is_none() { - // no prefix here, clear out all of the prefixes we found along the way, - // since it doesn't make sense to return less-specifics if we don't have a exact match. - ls_vec.clear(); - } - - // Check if this the last stride, or if they're no more children to go to, - // if so return what we found up until now. - match search_pfx.get_len() <= start_bit + nibble_len - || (S::into_stride_size(self.ptrbitarr) & bit_pos) - == ::Output::zero() - { - // No children or at the end, return the definitive LMP we found. - true => ( - None, /* no more children */ - found_pfx, /* The definitive LMP if any */ - ), - // There's another child, we won't return the found_pfx, since we're not - // at the last nibble and we want an exact match only. - false => ( - Some(self.ptr_vec[S::get_ptr_index(self.ptrbitarr, nibble)]), /* The node that has children the next stride */ - None, - ), - } - } - - // Search a stride for more-specific prefixes and child nodes containing - // more specifics for `search_prefix`. - pub fn add_more_specifics_at( - &self, - nibble: u32, - nibble_len: u8, - ) -> ( - // Option, /* the node with children in the next stride */ - Vec, /* child nodes with more more-specifics in this stride */ - Vec, /* more-specific prefixes in this stride */ - ) { - let mut found_children_with_more_specifics = vec![]; - let mut found_more_specifics_vec: Vec = vec![]; - - // This is an exact match, so we're only considering the position of the full nibble. - let mut bit_pos = S::get_bit_pos(nibble, nibble_len); - let mut found_child = None; - - // Is there also a child node here? - // Note that even without a child node, there may be more specifics further up in this - // pfxbitarr or children in this ptrbitarr. - if (S::into_stride_size(self.ptrbitarr) & bit_pos) > S::zero() { - found_child = - Some(self.ptr_vec[S::get_ptr_index(self.ptrbitarr, nibble)]); - } - - if let Some(child) = found_child { - found_children_with_more_specifics.push(child); - } - - // println!("{}..{}", nibble_len + start_bit, S::STRIDE_LEN + start_bit); - // println!("start nibble: {:032b}", nibble); - // println!("extra bit: {}", (S::STRIDE_LEN - nibble_len)); - - // We're expanding the search for more-specifics bit-by-bit. - // `ms_nibble_len` is the number of bits including the original nibble we're considering, - // e.g. if our prefix has a length of 25 and we've all strides sized 4, - // We would end up with a last nibble_len of 1. - // `ms_nibble_len` will expand then from 2 up and till 4. - // ex.: - // nibble: 1 , (nibble_len: 1) - // Iteration: - // ms_nibble_len=1,n_l=0: 10, n_l=1: 11 - // ms_nibble_len=2,n_l=0: 100, n_l=1: 101, n_l=2: 110, n_l=3: 111 - // ms_nibble_len=3,n_l=0: 1000, n_l=1: 1001, n_l=2: 1010, ..., n_l=7: 1111 - - for ms_nibble_len in nibble_len + 1..S::STRIDE_LEN + 1 { - // iterate over all the possible values for this `ms_nibble_len`, - // e.g. two bits can have 4 different values. - for n_l in 0..(1 << (ms_nibble_len - nibble_len)) { - // move the nibble left with the amount of bits we're going to loop over. - // e.g. a stride of size 4 with a nibble 0000 0000 0000 0011 becomes 0000 0000 0000 1100 - // then it will iterate over ...1100,...1101,...1110,...1111 - let ms_nibble = - (nibble << (ms_nibble_len - nibble_len)) + n_l as u32; - bit_pos = S::get_bit_pos(ms_nibble, ms_nibble_len); - - // println!("nibble: {:032b}", ms_nibble); - // println!("ptrbitarr: {:032b}", self.ptrbitarr); - // println!("bitpos: {:032b}", bit_pos); - - if (S::into_stride_size(self.ptrbitarr) & bit_pos) > S::zero() - { - found_children_with_more_specifics.push( - self.ptr_vec - [S::get_ptr_index(self.ptrbitarr, ms_nibble)], - ); - } - - if self.pfxbitarr & bit_pos > S::zero() { - found_more_specifics_vec.push( - self.pfx_vec[S::get_pfx_index( - self.pfxbitarr, - ms_nibble, - ms_nibble_len, - )], - ); - } - } - } - - ( - // We're done here, the caller should now go over all nodes in found_children_with_more_specifics vec and add - // ALL prefixes found in there. - found_children_with_more_specifics, - found_more_specifics_vec, - ) - } -} diff --git a/src/local_vec/query.rs b/src/local_vec/query.rs deleted file mode 100644 index 1b53a563..00000000 --- a/src/local_vec/query.rs +++ /dev/null @@ -1,795 +0,0 @@ -use crate::local_vec::node::TreeBitMapNode; -use crate::local_vec::storage_backend::*; -use crate::local_vec::tree::{SizedStrideNode, TreeBitMap}; -use crate::node_id::SortableNodeId; -use crate::prefix_record::{PublicRecord, RecordSet, RecordSingleSet}; -use crate::{MatchOptions, MatchType}; - -use crate::af::AddressFamily; -use inetnum::addr::Prefix; - -#[derive(Hash, Eq, PartialEq, Debug, Copy, Clone)] -pub struct PrefixId((AF, u8)); - -impl PrefixId { - pub fn new(net: AF, len: u8) -> Self { - PrefixId((net, len)) - } - - pub fn get_net(&self) -> AF { - self.0.0 - } - - pub fn get_len(&self) -> u8 { - self.0.1 - } -} - -//------------- QuerySingleResult -------------------------------------------- - -#[derive(Clone, Debug)] -pub struct QuerySingleResult { - pub match_type: MatchType, - pub prefix: Option, - pub prefix_meta: Option, - pub less_specifics: Option>, - pub more_specifics: Option>, -} - -impl std::fmt::Display - for QuerySingleResult -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let pfx_str = match self.prefix { - Some(pfx) => format!("{}", pfx), - None => "".to_string(), - }; - let pfx_meta_str = match &self.prefix_meta { - Some(pfx_meta) => format!("{}", pfx_meta), - None => "".to_string(), - }; - write!( - f, - "match_type: {}\nprefix: {}\nmetadata: {}\nless_specifics: {}\nmore_specifics: {}", - self.match_type, - pfx_str, - pfx_meta_str, - if let Some(ls) = self.less_specifics.as_ref() { - format!("{}", ls) - } else { - "".to_string() - }, - if let Some(ms) = self.more_specifics.as_ref() { - format!("{}", ms) - } else { - "".to_string() - }, - ) - } -} - -//------------- QueryResult -------------------------------------------- - -#[derive(Clone, Debug)] -pub struct QueryResult { - pub match_type: MatchType, - pub prefix: Option, - pub prefix_meta: PublicRecord, - pub less_specifics: Option>, - pub more_specifics: Option>, -} - -impl std::fmt::Display - for QueryResult -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let pfx_str = match self.prefix { - Some(pfx) => format!("{}", pfx), - None => "".to_string(), - }; - writeln!( - f, - "match_type: {}\nprefix: {}\nmetadata: {}\nless_specifics: {}\nmore_specifics: {}", - self.match_type, - pfx_str, - self.prefix_meta, - if let Some(ls) = self.less_specifics.as_ref() { - format!("{}", ls) - } else { - "".to_string() - }, - if let Some(ms) = self.more_specifics.as_ref() { - format!("{}", ms) - } else { - "".to_string() - }, - ) - } -} - -//------------ Longest Matching Prefix ------------------------------------- - -impl<'a, Store> TreeBitMap -where - Store: StorageBackend, -{ - // In a LMP search we have to go over all the nibble lengths in the stride up - // until the value of the actual nibble length were looking for (until we reach - // stride length for all strides that aren't the last) and see if the - // prefix bit in that posision is set. - // Note that this does not search for prefixes with length 0 (which would always - // match). - // So for matching a nibble 1010, we have to search for 1, 10, 101 and 1010 on - // resp. position 1, 5, 12 and 25: - // ↓ ↓ ↓ ↓ - // pfx bit arr (u32) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - // nibble * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111 x - // nibble len offset 0 1 2 3 4 - - pub(crate) fn match_prefix( - &'a self, - search_pfx: PrefixId, - options: &MatchOptions, - ) -> QuerySingleResult { - let mut stride_end = 0; - - let mut node = self.retrieve_node(self.get_root_node_id()).unwrap(); - let mut nibble; - let mut nibble_len; - - //---- result values ------------------------------------------------ - - // These result values are kept in mutable variables, and assembled - // at the end into a QueryResult struct. This proved to result in the - // most efficient code, where we don't have to match on - // SizedStrideNode over and over. The `match_type` field in the - // QueryResult is computed at the end. - - // The final prefix - let mut match_prefix_idx: Option< - <::NodeType as SortableNodeId>::Part, - > = None; - - // The indexes of the less-specifics - let mut less_specifics_vec = if options.include_less_specifics { - Some(Vec::::new()) - } else { - None - }; - - // The indexes of the more-specifics. - let mut more_specifics_vec = if options.include_more_specifics { - Some(Vec::::new()) - } else { - None - }; - - //-------------- Stride Processing ---------------------------------- - - // We're going to iterate over all the strides in the treebitmap (so - // up to the last bit in the max prefix lentgth for that tree). When - // a final prefix is found or we get to the end of the strides, - // depending on the options.match_type (the type requested by the user). - // we ALWAYS break out of the loop. WE ALWAYS BREAK OUT OF THE LOOP - // Just before breaking some processing is done inside the loop before - // the break (looking up more-specifics mainly), which looks at bit - // repetitious, but again it's been done like that to avoid having to - // match over a SizedStrideNode again in the `post-processing` - // section. - - for stride in self.strides.iter() { - stride_end += stride; - let last_stride = search_pfx.get_len() < stride_end; - - nibble_len = if last_stride { - stride + search_pfx.get_len() - stride_end - } else { - *stride - }; - - // Shift left and right to set the bits to zero that are not in - // the nibble we're handling here. - nibble = AddressFamily::get_nibble( - search_pfx.get_net(), - stride_end - stride, - nibble_len, - ); - - match node { - SizedStrideNode::Stride3(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - - // This whole match assumes that: - // - if the first value in the return tuple of - // `search_fn` holds a value, then we need to continue - // searching by following the node contained in the - // value. - // - The second value in the tuple holds the prefix that - // was found. - // The less_specifics_vec is mutated by `search_fn` to - // hold the prefixes found along the way, in the cases - // where `include_less_specifics` was requested by the user. - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - // This and the next match will handle all - // intermediary nodes, but they might also handle - // exit nodes. - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx.get_part()); - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (Some(n), None) => { - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - // This handles exact and longest matches: there are - // no more children, but there is a prefix on this node. - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - match_prefix_idx = Some(pfx_idx.get_part()); - break; - } - // This handles cases where there's no prefix (and no - // child) for exact match or longest match, the empty - // match - which doesn't care about actually finding - // a prefix - just continues in search of - // more-specifics. - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - // To make sure we don't process this - // match arm more then once, we return - // early here. - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - SizedStrideNode::Stride4(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx.get_part()); - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (Some(n), None) => { - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - match_prefix_idx = Some(pfx_idx.get_part()); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - // To make sure we don't process this match arm more then once, we - // return early here. - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - SizedStrideNode::Stride5(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx.get_part()); - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (Some(n), None) => { - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - match_prefix_idx = Some(pfx_idx.get_part()); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - SizedStrideNode::Stride6(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx.get_part()); - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (Some(n), None) => { - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - match_prefix_idx = Some(pfx_idx.get_part()); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - SizedStrideNode::Stride7(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx.get_part()); - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (Some(n), None) => { - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - match_prefix_idx = Some(pfx_idx.get_part()); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - SizedStrideNode::Stride8(current_node) => { - let search_fn = match options.match_type { - MatchType::ExactMatch => { - if options.include_less_specifics { - TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at - } else { - TreeBitMapNode::search_stride_for_exact_match_at - } - } - MatchType::LongestMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - MatchType::EmptyMatch => { - TreeBitMapNode::search_stride_for_longest_match_at - } - }; - match search_fn( - current_node, - search_pfx, - nibble, - nibble_len, - stride_end - stride, - &mut less_specifics_vec, - ) { - (Some(n), Some(pfx_idx)) => { - match_prefix_idx = Some(pfx_idx.get_part()); - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (Some(n), None) => { - node = self.retrieve_node(n).unwrap(); - if last_stride { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - break; - } - } - (None, Some(pfx_idx)) => { - if options.include_more_specifics { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - } - match_prefix_idx = Some(pfx_idx.get_part()); - break; - } - (None, None) => { - match options.match_type { - MatchType::EmptyMatch => { - more_specifics_vec = self - .get_all_more_specifics_from_nibble( - current_node, - nibble, - nibble_len, - ); - - match_prefix_idx = None; - break; - } - MatchType::LongestMatch => {} - MatchType::ExactMatch => { - match_prefix_idx = None; - } - } - break; - } - } - } - } - } - - //------------------ post-processing -------------------------------- - - // If the above loop finishes (so not hitting a break) we have processed all strides and have found a child node and maybe a prefix. - // Now we will look up more-specifics for longest-matching prefixes that were found in the last stride only, - // Note that still any of the match_types (as specified by the user, not the return type) may end up here. - - let mut match_type: MatchType = MatchType::EmptyMatch; - let mut prefix = None; - if let Some(pfx_idx) = match_prefix_idx { - prefix = self.retrieve_prefix(pfx_idx); - match_type = if prefix.unwrap().len == search_pfx.get_len() { - MatchType::ExactMatch - } else { - MatchType::LongestMatch - } - }; - - QuerySingleResult { - prefix: if let Some(pfx) = prefix { - Prefix::new(pfx.net.into_ipaddr(), pfx.len).ok() - } else { - None - }, - prefix_meta: prefix.map(|pfx| pfx.meta.clone()), - match_type, - less_specifics: if options.include_less_specifics { - less_specifics_vec.map(|vec| { - vec.iter() - .map(|p| self.retrieve_prefix(p.get_part()).unwrap()) - .collect::>() - }) - } else { - None - }, - more_specifics: if options.include_more_specifics { - more_specifics_vec.map(|vec| { - vec.iter() - .map(|p| self.retrieve_prefix(p.get_part()).unwrap()) - .collect() - }) - } else { - None - }, - } - } -} diff --git a/src/local_vec/storage_backend.rs b/src/local_vec/storage_backend.rs deleted file mode 100644 index 7da0f319..00000000 --- a/src/local_vec/storage_backend.rs +++ /dev/null @@ -1,260 +0,0 @@ -use crate::node_id::{InMemNodeId, SortableNodeId}; -use crate::prefix_record::InternalPrefixRecord; - -use crate::local_vec::tree::*; - -use crate::af::AddressFamily; -// use crate::prefix_record::MergeUpdate; - -use std::fmt::Debug; -use std::io::{Error, ErrorKind}; - -// pub(crate) type PrefixIter<'a, AF, Meta> = Result< -// std::slice::Iter<'a, InternalPrefixRecord>, -// Box, -// >; - -pub(crate) trait StorageBackend -where - Self::NodeType: SortableNodeId + Copy, -{ - type NodeType; - type AF: AddressFamily; - type Meta: crate::prefix_record::Meta; - - fn init( - start_node: Option>, - ) -> Self; - fn acquire_new_node_id( - &self, - sort: <::NodeType as SortableNodeId>::Sort, - part: <::NodeType as SortableNodeId>::Part, - ) -> ::NodeType; - fn store_node( - &mut self, - id: Option, - next_node: SizedStrideNode, - ) -> Option; - fn update_node( - &mut self, - current_node_id: Self::NodeType, - updated_node: SizedStrideNode, - ); - fn retrieve_node( - &self, - index: Self::NodeType, - ) -> Option<&SizedStrideNode>; - fn retrieve_node_mut( - &mut self, - index: Self::NodeType, - ) -> SizedNodeResult; - fn _retrieve_node_with_guard( - &self, - index: Self::NodeType, - ) -> CacheGuard; - fn get_root_node_id(&self) -> Self::NodeType; - fn _get_root_node_mut( - &mut self, - ) -> Option<&mut SizedStrideNode>; - fn _get_nodes(&self) -> &Vec>; - fn get_nodes_len(&self) -> usize; - fn acquire_new_prefix_id( - &self, - sort: &<::NodeType as SortableNodeId>::Sort, - part: &InternalPrefixRecord, - ) -> ::NodeType; - fn store_prefix( - &mut self, - next_node: InternalPrefixRecord, - ) -> Result< - <::NodeType as SortableNodeId>::Part, - Box, - >; - fn retrieve_prefix( - &self, - index: <::NodeType as SortableNodeId>::Part, - ) -> Option<&InternalPrefixRecord>; - fn retrieve_prefix_mut( - &mut self, - index: <::NodeType as SortableNodeId>::Part, - ) -> Option<&mut InternalPrefixRecord>; - fn _retrieve_prefix_with_guard( - &self, - index: Self::NodeType, - ) -> PrefixCacheGuard; - fn _get_prefixes( - &self, - ) -> &Vec>; - #[cfg(feature = "cli")] - fn get_prefixes_len(&self) -> usize; - // fn prefixes_iter(&self) -> PrefixIter<'_, Self::AF, Self::Meta>; -} - -#[derive(Debug)] -pub(crate) struct InMemStorage< - AF: AddressFamily, - Meta: crate::prefix_record::Meta, -> { - pub nodes: Vec>, - pub prefixes: Vec>, -} - -impl - StorageBackend for InMemStorage -{ - type NodeType = InMemNodeId; - type AF = AF; - type Meta = Meta; - - fn init( - start_node: Option>, - ) -> InMemStorage { - let mut nodes = vec![]; - if let Some(n) = start_node { - nodes = vec![n]; - } - InMemStorage { - nodes, - prefixes: vec![], - } - } - - fn acquire_new_node_id( - &self, - sort: <::NodeType as SortableNodeId>::Sort, - _part: <::NodeType as SortableNodeId>::Part, - ) -> ::NodeType { - // We're ignoring the part parameter here, because we want to store - // the index into the global self.nodes vec in the local vec. - InMemNodeId(sort, self.nodes.len() as u32) - } - - fn store_node( - &mut self, - _id: Option, - next_node: SizedStrideNode, - ) -> Option { - let id = self.nodes.len() as u32; - self.nodes.push(next_node); - //Store::NodeType::new(&bit_id, &i.into()) - //Store::NodeType::new(&((1 << $nibble_len) + $nibble as u16).into(), &i) - Some(InMemNodeId::new(&0, &id)) - } - - fn update_node( - &mut self, - current_node_id: Self::NodeType, - updated_node: SizedStrideNode, - ) { - let _default_val = std::mem::replace( - self.retrieve_node_mut(current_node_id).unwrap(), - updated_node, - ); - } - - fn retrieve_node( - &self, - id: Self::NodeType, - ) -> Option<&SizedStrideNode> { - self.nodes.get(id.get_part() as usize) - } - - fn retrieve_node_mut( - &mut self, - index: Self::NodeType, - ) -> SizedNodeResult { - self.nodes - .get_mut(index.get_part() as usize) - .ok_or_else(|| { - Box::new(Error::new(ErrorKind::Other, "Retrieve Node Error")) - .into() - }) - } - - // Don't use this function, this is just a placeholder and a really - // inefficient implementation. - fn _retrieve_node_with_guard( - &self, - _id: Self::NodeType, - ) -> CacheGuard { - panic!("Not Implemented for InMeMStorage"); - } - - fn get_root_node_id(&self) -> Self::NodeType { - InMemNodeId(0, 0) - } - - fn _get_root_node_mut( - &mut self, - ) -> Option<&mut SizedStrideNode> { - Some(&mut self.nodes[0]) - } - - fn _get_nodes(&self) -> &Vec> { - &self.nodes - } - - fn get_nodes_len(&self) -> usize { - self.nodes.len() - } - - fn acquire_new_prefix_id( - &self, - sort: &<::NodeType as SortableNodeId>::Sort, - _part: &InternalPrefixRecord< - ::AF, - ::Meta, - >, - ) -> ::NodeType { - // We're ignoring the part parameter here, because we want to store - // the index into the global self.prefixes vec in the local vec. - InMemNodeId(*sort, self.prefixes.len() as u32) - } - - fn store_prefix( - &mut self, - next_node: InternalPrefixRecord, - ) -> Result> { - let id = self.prefixes.len() as u32; - self.prefixes.push(next_node); - Ok(id) - } - - fn retrieve_prefix( - &self, - index: u32, - ) -> Option<&InternalPrefixRecord> { - self.prefixes.get(index as usize) - } - - fn retrieve_prefix_mut( - &mut self, - index: u32, - ) -> Option<&mut InternalPrefixRecord> { - self.prefixes.get_mut(index as usize) - } - - fn _retrieve_prefix_with_guard( - &self, - _index: Self::NodeType, - ) -> PrefixCacheGuard { - panic!("nOt ImPlEmEnTed for InMemNode"); - } - - fn _get_prefixes( - &self, - ) -> &Vec> { - &self.prefixes - } - - #[cfg(feature = "cli")] - fn get_prefixes_len(&self) -> usize { - self.prefixes.len() - } - - // fn prefixes_iter( - // &self, - // ) -> PrefixIter<'_, AF, Meta> { - // Ok(self.prefixes.iter()) - // } -} diff --git a/src/local_vec/store.rs b/src/local_vec/store.rs deleted file mode 100644 index d4d9a35e..00000000 --- a/src/local_vec/store.rs +++ /dev/null @@ -1,144 +0,0 @@ -use crate::local_vec::storage_backend::{InMemStorage, StorageBackend}; -use crate::local_vec::TreeBitMap; -use crate::node_id::InMemNodeId; -use crate::prefix_record::InternalPrefixRecord; -use super::query::QuerySingleResult; -use crate::{MatchOptions, Stats, Strides}; - -use crate::af::{IPv4, IPv6}; -use inetnum::addr::Prefix; - -use super::query::PrefixId; -use super::tree::SizedStrideNode; -/// A fast, memory-efficient Prefix Store, for use in single-threaded contexts. -/// -/// Can be used in multi-threaded contexts by wrapping it in a `Arc>`. -/// Be aware that this is undesirable in cases with high contention. -/// Use cases with high contention are best served by the [`crate::MultiThreadedStore`]. -pub struct Store { - v4: TreeBitMap>, - v6: TreeBitMap>, -} - -impl Store { - pub fn new(v4_strides: Vec, v6_strides: Vec) -> Self { - Store { - v4: TreeBitMap::new(v4_strides), - v6: TreeBitMap::new(v6_strides), - } - } -} - -impl<'a, M: crate::prefix_record::Meta> Store { - pub fn match_prefix( - &'a self, - search_pfx: &Prefix, - options: &MatchOptions, - ) -> QuerySingleResult { - match search_pfx.addr() { - std::net::IpAddr::V4(addr) => self.v4.match_prefix( - PrefixId::::new(addr.into(), search_pfx.len()), - options, - ), - std::net::IpAddr::V6(addr) => self.v6.match_prefix( - PrefixId::::new(addr.into(), search_pfx.len()), - options, - ), - } - } - - pub fn insert( - &mut self, - prefix: &Prefix, - meta: M, - // user_data: Option<&::UserDataIn>, - ) -> Result<(), std::boxed::Box> { - match prefix.addr() { - std::net::IpAddr::V4(addr) => { - self.v4.insert(InternalPrefixRecord::new_with_meta( - addr.into(), - prefix.len(), - meta, - )) - } - std::net::IpAddr::V6(addr) => { - self.v6.insert(InternalPrefixRecord::new_with_meta( - addr.into(), - prefix.len(), - meta, - )) - } - } - } - - pub fn prefixes_iter(&'a self) -> crate::PrefixSingleRecordIter<'a, M> { - let rs4: std::slice::Iter> = - self.v4.store.prefixes[..].iter(); - let rs6 = self.v6.store.prefixes[..].iter(); - - crate::PrefixSingleRecordIter::<'a, M> { - v4: Some(rs4), - v6: rs6, - } - } - - pub fn nodes_v4_iter( - &'a self, - ) -> impl Iterator> + 'a - { - self.v4.store.nodes.iter() - } - - pub fn nodes_v6_iter( - &'a self, - ) -> impl Iterator> + 'a - { - self.v6.store.nodes.iter() - } - - pub fn prefixes_len(&self) -> usize { - self.v4.store.prefixes.len() + self.v6.store.prefixes.len() - } - - pub fn prefixes_v4_len(&self) -> usize { - self.v4.store.prefixes.len() - } - - pub fn prefixes_v6_len(&self) -> usize { - self.v6.store.prefixes.len() - } - - pub fn nodes_len(&self) -> usize { - self.v4.store.get_nodes_len() + self.v6.store.get_nodes_len() - } - - pub fn nodes_v4_len(&self) -> usize { - self.v4.store.get_nodes_len() - } - - pub fn nodes_v6_len(&self) -> usize { - self.v6.store.get_nodes_len() - } - - #[cfg(feature = "cli")] - pub fn print_funky_stats(&self) { - println!("Stats for IPv4 multi-threaded store\n"); - println!("{}", self.v4); - println!("Stats for IPv6 multi-threaded store\n"); - println!("{}", self.v6); - } - - pub fn stats(&self) -> Stats { - Stats { - v4: &self.v4.stats, - v6: &self.v6.stats, - } - } - - pub fn strides(&'a self) -> Strides { - Strides { - v4: &self.v4.strides, - v6: &self.v6.strides, - } - } -} diff --git a/src/local_vec/tests/full_table_single.rs b/src/local_vec/tests/full_table_single.rs deleted file mode 100644 index 40826e3d..00000000 --- a/src/local_vec/tests/full_table_single.rs +++ /dev/null @@ -1,214 +0,0 @@ - -#![cfg(test)] - -mod full_table { - use inetnum::addr::Prefix; - use inetnum::asn::Asn; - - use crate::{ - prelude::*, PublicPrefixSingleRecord, SingleThreadedStore - }; - - use std::error::Error; - use std::fs::File; - use std::process; - - #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] - pub struct ComplexPrefixAs(pub Vec); - - // impl MergeUpdate for ComplexPrefixAs { - // type UserDataIn = (); - // type UserDataOut = (); - - // fn merge_update( - // &mut self, - // update_record: ComplexPrefixAs, - // _: Option<&Self::UserDataIn>, - // ) -> Result<(), Box> { - // self.0 = update_record.0; - // Ok(()) - // } - - // fn clone_merge_update( - // &self, - // update_meta: &Self, - // _: Option<&Self::UserDataIn>, - // ) -> Result<(Self, Self::UserDataOut), Box> - // where - // Self: std::marker::Sized, - // { - // let mut new_meta = update_meta.0.clone(); - // new_meta.push(self.0[0]); - // Ok((ComplexPrefixAs(new_meta), ())) - // } - // } - - impl Meta for ComplexPrefixAs { - type Orderable<'a> = Asn; - type TBI = (); - - fn as_orderable(&self, _tbi: Self::TBI) -> Asn { - self.0[0].into() - } - } - - // impl Orderable for ComplexPrefixAs { - // fn get_id(&self) -> &Self { - // &self.0 - // } - // } - - impl std::fmt::Display for ComplexPrefixAs { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "AS{:?}", self.0) - } - } - - #[test] - fn test_full_table_from_csv() -> Result<(), Box> { - // These constants are all contingent on the exact csv file, - // being loaded! - const CSV_FILE_PATH: &str = "./data/uniq_pfx_asn_dfz_rnd.csv"; - const SEARCHES_NUM: u32 = 2080800; - const INSERTS_NUM: usize = 893943; - const GLOBAL_PREFIXES_VEC_SIZE: usize = 886117; - const FOUND_PREFIXES: u32 = 1322993; - - fn load_prefixes( - pfxs: &mut Vec>, - ) -> Result<(), Box> { - let file = File::open(CSV_FILE_PATH)?; - - let mut rdr = csv::Reader::from_reader(file); - for result in rdr.records() { - let record = result?; - let ip: Vec<_> = record[0] - .split('.') - .map(|o| -> u8 { o.parse().unwrap() }) - .collect(); - let net = std::net::Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]); - let len: u8 = record[1].parse().unwrap(); - let asn: u32 = record[2].parse().unwrap(); - let pfx = PublicPrefixSingleRecord::new( - Prefix::new(net.into(), len)?, - ComplexPrefixAs(vec![asn]), - ); - pfxs.push(pfx); - } - Ok(()) - } - - let strides_vec = [ - // vec![8], - vec![4], - // vec![6, 6, 6, 6, 4, 4], - // vec![3, 4, 4, 6, 7, 8], - ]; - for _strides in strides_vec.iter().enumerate() { - let mut pfxs: Vec> = vec![]; - let v4_strides = vec![8]; - let v6_strides = vec![8]; - let mut tree_bitmap = SingleThreadedStore::::new(v4_strides, v6_strides); - - if let Err(err) = load_prefixes(&mut pfxs) { - println!("error running example: {}", err); - process::exit(1); - } - - let inserts_num = pfxs.len(); - for pfx in pfxs.into_iter() { - match tree_bitmap.insert(&pfx.prefix, pfx.meta) { - Ok(_) => {} - Err(e) => { - println!("{}", e); - panic!("STOP TESTING I CAN'T INSERT!"); - } - }; - - let query = tree_bitmap.match_prefix(&pfx.prefix, - &MatchOptions { - match_type: MatchType::LongestMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: false, - mui: None - }, - ); - - if query.prefix.is_none() { panic!("STOPSTOPSTOPST"); } - else { - assert_eq!(query.prefix.unwrap(), pfx.prefix); - } - } - - println!("done inserting {} prefixes", inserts_num); - - - let inet_max = 255; - let len_max = 32; - - let mut found_counter = 0_u32; - let mut not_found_counter = 0_u32; - let mut inet_count = 0; - let mut len_count = 0; - (0..inet_max).for_each(|i_net| { - len_count = 0; - (0..len_max).for_each(|s_len| { - - (0..inet_max).for_each(|ii_net| { - let pfx = Prefix::new_relaxed( - std::net::Ipv4Addr::new(i_net, ii_net, 0, 0) - .into(), - s_len, - ); - // print!(":{}.{}.0.0/{}:", i_net, ii_net, s_len); - let res = tree_bitmap.match_prefix( - &pfx.unwrap(), - &MatchOptions { - match_type: MatchType::LongestMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: false, - mui: None - }, - ); - if let Some(_pfx) = res.prefix { - // println!("_pfx {:?}", _pfx); - // println!("pfx {:?}", pfx); - // println!("{:#?}", res); - assert!(_pfx.len() <= pfx.unwrap().len()); - assert!(_pfx.addr() <= pfx.unwrap().addr()); - found_counter += 1; - } else { - // println!( - // "not found {:?}", - // if let Ok(e) = pfx { - // e.to_string() - // } else { - // "ok".to_string() - // } - // ); - not_found_counter += 1; - } - }); - len_count += 1; - }); - inet_count += 1; - }); - println!("found pfx: {}", found_counter); - println!("not found pfx: {}", not_found_counter); - println!("inet counter {}", inet_count); - println!("len counter {}", len_count); - - let searches_num = - inet_max as u128 * inet_max as u128 * len_max as u128; - - assert_eq!(searches_num, SEARCHES_NUM as u128); - assert_eq!(inserts_num, INSERTS_NUM); - assert_eq!(tree_bitmap.prefixes_len(), GLOBAL_PREFIXES_VEC_SIZE); - assert_eq!(found_counter, FOUND_PREFIXES); - assert_eq!(not_found_counter, SEARCHES_NUM - FOUND_PREFIXES); - } - Ok(()) - } -} \ No newline at end of file diff --git a/src/local_vec/tests/mod.rs b/src/local_vec/tests/mod.rs deleted file mode 100644 index 4f7fe29c..00000000 --- a/src/local_vec/tests/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod full_table_single; -mod more_specifics_single; \ No newline at end of file diff --git a/src/local_vec/tests/more_specifics_single.rs b/src/local_vec/tests/more_specifics_single.rs deleted file mode 100644 index 502fd806..00000000 --- a/src/local_vec/tests/more_specifics_single.rs +++ /dev/null @@ -1,163 +0,0 @@ -#![cfg(test)] -mod tests { - use inetnum::addr::Prefix; - - use crate::{ - prelude::*, - meta_examples::PrefixAs, - SingleThreadedStore - }; - - use std::error::Error; - - #[test] - fn test_more_specifics_without_less_specifics( - ) -> Result<(), Box> { - let v4_strides = vec![8]; - let v6_strides = vec![8]; - let mut tree_bitmap = SingleThreadedStore::::new(v4_strides, v6_strides); - let pfxs = vec![ - Prefix::new(std::net::Ipv4Addr::new(17, 0, 64, 0).into(), 18)?, // 0 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 109, 0).into(), 24)?, // 1 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 153, 0).into(), 24)?, // 2 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 21)?, // 3 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 176, 0).into(), 20)?, // 4 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8)?, // 5 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 184, 0).into(), 23)?, // 6 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 71, 0).into(), 24)?, // 7 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9)?, // 8 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 117, 0).into(), 24)?, // 9 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 99, 0).into(), 24)?, // 10 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 224, 0).into(), 24)?, // 11 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 128, 0).into(), 18)?, // 12 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 120, 0).into(), 24)?, // 13 - ]; - - for pfx in pfxs.iter() { - tree_bitmap.insert(pfx, PrefixAs(666))?; - } - println!("------ end of inserts\n"); - - // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); - for spfx in &[ - ( - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), // 0 - vec![0, 1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13], - ), - ( - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), // 0 - vec![0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13], - ), - ] { - println!("search for: {:?}", spfx.0); - let found_result = tree_bitmap.match_prefix( - &spfx.0.unwrap(), - &MatchOptions { - match_type: MatchType::ExactMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: true, - mui: None - }, - ); - println!("em/m-s: {:#?}", found_result); - - let more_specifics = found_result.more_specifics.unwrap(); - - assert_eq!(found_result.prefix.unwrap(), spfx.1.unwrap()); - assert_eq!(&more_specifics.len(), &spfx.2.len()); - - for i in spfx.2.iter() { - print!("{} ", i); - - let result_pfx = - more_specifics.iter().find(|pfx| pfx.prefix == pfxs[*i]); - assert!(result_pfx.is_some()); - } - println!("-----------"); - } - Ok(()) - } - - #[test] - fn test_more_specifics_with_less_specifics() -> Result<(), Box> - { - let v4_strides = vec![8]; - let v6_strides = vec![8]; - let mut tree_bitmap = SingleThreadedStore::::new(v4_strides, v6_strides); - let pfxs = vec![ - Prefix::new(std::net::Ipv4Addr::new(17, 0, 64, 0).into(), 18), // 0 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 109, 0).into(), 24), // 1 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 153, 0).into(), 24), // 2 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 21), // 3 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 176, 0).into(), 20), // 4 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), // 5 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 184, 0).into(), 23), // 6 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 71, 0).into(), 24), // 7 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), // 8 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 117, 0).into(), 24), // 9 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 99, 0).into(), 24), // 10 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 224, 0).into(), 24), // 11 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 128, 0).into(), 18), // 12 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 120, 0).into(), 24), // 13 - ]; - - for pfx in pfxs.iter() { - tree_bitmap.insert(&pfx.unwrap(), PrefixAs(666))?; - } - println!("------ end of inserts\n"); - - for spfx in &[ - ( - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), - Some(&Prefix::new( - std::net::Ipv4Addr::new(17, 0, 0, 0).into(), - 9, - )), // 0 - vec![0, 1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13], - ), - ( - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), - Some(&Prefix::new( - std::net::Ipv4Addr::new(17, 0, 0, 0).into(), - 8, - )), // 0 - vec![0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13], - ), - ] { - println!("search for: {:#}", (*spfx.0)?); - let found_result = tree_bitmap.match_prefix( - &spfx.0.unwrap(), - &MatchOptions { - match_type: MatchType::LongestMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: true, - mui: None - }, - ); - println!("em/m-s: {}", found_result); - - let more_specifics = found_result.more_specifics.unwrap(); - - assert_eq!( - found_result.prefix.unwrap(), - spfx.1.unwrap().unwrap() - ); - assert_eq!(&more_specifics.len(), &spfx.2.len()); - - for i in spfx.2.iter() { - print!("{} ", i); - - let result_pfx = more_specifics - .iter() - .find(|pfx| pfx.prefix == pfxs[*i].unwrap()); - assert!(result_pfx.is_some()); - } - println!("-----------"); - } - Ok(()) - } -} diff --git a/src/local_vec/tree.rs b/src/local_vec/tree.rs deleted file mode 100644 index 91b30078..00000000 --- a/src/local_vec/tree.rs +++ /dev/null @@ -1,562 +0,0 @@ -use std::{ - fmt::{Binary, Debug}, - marker::PhantomData, -}; - -use crate::{af::{AddressFamily, Zero}, custom_alloc::UpsertReport}; -use crate::local_vec::node::TreeBitMapNode; -use crate::local_vec::storage_backend::StorageBackend; -use crate::match_node_for_strides_with_local_vec; -use crate::node_id::SortableNodeId; -use crate::prefix_record::InternalPrefixRecord; -use crate::stats::{SizedStride, StrideStats}; -use crate::stride::*; -use crate::synth_int::{U256, U512}; - -#[cfg(feature = "cli")] -use crate::node_id::InMemNodeId; - -#[cfg(feature = "cli")] -use ansi_term::Colour; - -#[derive(Debug)] -pub enum SizedStrideNode { - Stride3(TreeBitMapNode), - Stride4(TreeBitMapNode), - Stride5(TreeBitMapNode), - Stride6(TreeBitMapNode), - Stride7(TreeBitMapNode), - Stride8(TreeBitMapNode), -} - -pub(crate) type SizedNodeResult<'a, AF, NodeType> = - Result<&'a mut SizedStrideNode, Box>; - -impl Default for SizedStrideNode -where - AF: AddressFamily, - NodeId: SortableNodeId + Copy, -{ - fn default() -> Self { - SizedStrideNode::Stride3(TreeBitMapNode { - ptrbitarr: 0, - pfxbitarr: 0, - pfx_vec: vec![], - ptr_vec: vec![], - _af: PhantomData, - }) - } -} - -pub struct CacheGuard< - 'a, - AF: 'static + AddressFamily, - NodeId: SortableNodeId + Copy, -> { - pub guard: std::cell::Ref<'a, SizedStrideNode>, -} - -impl<'a, AF: 'static + AddressFamily, NodeId: SortableNodeId + Copy> - std::ops::Deref for CacheGuard<'a, AF, NodeId> -{ - type Target = SizedStrideNode; - - fn deref(&self) -> &Self::Target { - &self.guard - } -} - -pub struct PrefixCacheGuard< - 'a, - AF: 'static + AddressFamily, - Meta: crate::prefix_record::Meta, -> { - pub guard: std::cell::Ref<'a, InternalPrefixRecord>, -} - -impl<'a, AF: 'static + AddressFamily, Meta: crate::prefix_record::Meta> - std::ops::Deref for PrefixCacheGuard<'a, AF, Meta> -{ - type Target = InternalPrefixRecord; - - fn deref(&self) -> &Self::Target { - &self.guard - } -} - -pub(crate) enum NewNodeOrIndex< - AF: AddressFamily, - NodeId: SortableNodeId + Copy, -> { - NewNode(SizedStrideNode, NodeId::Sort), // New Node and bit_id of the new node - ExistingNode(NodeId), - NewPrefix, - ExistingPrefix(NodeId::Part), -} - -pub(crate) struct TreeBitMap -where - Store: StorageBackend, -{ - pub strides: Vec, - pub stats: Vec, - pub store: Store, -} - -impl TreeBitMap -where - Store: StorageBackend, -{ - pub fn new(_strides_vec: Vec) -> TreeBitMap { - // Check if the strides division makes sense - let mut strides = vec![]; - let mut strides_sum = 0; - for s in _strides_vec.iter().cycle() { - strides.push(*s); - strides_sum += s; - if strides_sum >= Store::AF::BITS - 1 { - break; - } - } - assert_eq!(strides.iter().sum::(), Store::AF::BITS); - - let mut stride_stats: Vec = vec![ - StrideStats::new(SizedStride::Stride3, strides.len() as u8), // 0 - StrideStats::new(SizedStride::Stride4, strides.len() as u8), // 1 - StrideStats::new(SizedStride::Stride5, strides.len() as u8), // 2 - StrideStats::new(SizedStride::Stride6, strides.len() as u8), // 3 - StrideStats::new(SizedStride::Stride7, strides.len() as u8), // 4 - StrideStats::new(SizedStride::Stride8, strides.len() as u8), // 5 - ]; - - let node: SizedStrideNode< - ::AF, - ::NodeType, - >; - - match strides[0] { - 3 => { - node = SizedStrideNode::Stride3(TreeBitMapNode { - ptrbitarr: 0, - pfxbitarr: 0, - ptr_vec: vec![], - pfx_vec: vec![], - _af: PhantomData, - }); - stride_stats[0].inc(0); - } - 4 => { - node = SizedStrideNode::Stride4(TreeBitMapNode { - ptrbitarr: 0, - pfxbitarr: 0, - ptr_vec: vec![], - pfx_vec: vec![], - _af: PhantomData, - }); - stride_stats[1].inc(0); - } - 5 => { - node = SizedStrideNode::Stride5(TreeBitMapNode { - ptrbitarr: 0, - pfxbitarr: 0, - ptr_vec: vec![], - pfx_vec: vec![], - _af: PhantomData, - }); - stride_stats[2].inc(0); - } - 6 => { - node = SizedStrideNode::Stride6(TreeBitMapNode { - ptrbitarr: 0, - pfxbitarr: 0, - ptr_vec: vec![], - pfx_vec: vec![], - _af: PhantomData, - }); - stride_stats[3].inc(0); - } - 7 => { - node = SizedStrideNode::Stride7(TreeBitMapNode { - ptrbitarr: 0, - pfxbitarr: U256(0, 0), - ptr_vec: vec![], - pfx_vec: vec![], - _af: PhantomData, - }); - stride_stats[4].inc(0); - } - 8 => { - node = SizedStrideNode::Stride8(TreeBitMapNode { - ptrbitarr: U256(0, 0), - pfxbitarr: U512(0, 0, 0, 0), - ptr_vec: vec![], - pfx_vec: vec![], - _af: PhantomData, - }); - stride_stats[5].inc(0); - } - _ => { - panic!("unknown stride size encountered in STRIDES array"); - } - }; - - TreeBitMap { - strides, - stats: stride_stats, - store: Store::init(Some(node)), - } - } - - // Partition for stride 4 - // - // ptr bits never happen in the first half of the bitmap for the stride-size. Consequently the ptrbitarr can be an integer type - // half the size of the pfxbitarr. - // - // ptr bit arr (u16) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 x - // pfx bit arr (u32) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 - // nibble * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111 x - // nibble len offset 0 1 2 3 4 - // - // stride 3: 1 + 2 + 4 + 8 = 15 bits. 2^4 - 1 (1 << 4) - 1. ptrbitarr starts at pos 7 (1 << 3) - 1 - // stride 4: 1 + 2 + 4 + 8 + 16 = 31 bits. 2^5 - 1 (1 << 5) - 1. ptrbitarr starts at pos 15 (1 << 4) - 1 - // stride 5: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 63 bits. 2^6 - 1 - // stride 6: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 bits. 2^7 - 1 - // stride 7: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 128 = 256 bits. 2^8 - 1126 - // stride 8: 1 + 2 + 4 + 8 + 16 + 32 + 64 + 128 + 256 = 511 bits. 2^9 - 1 - // - // Ex.: - // pfx 65.0.0.252/30 0100_0001_0000_0000_0000_0000_1111_1100 - // - // nibble 1 (pfx << 0) >> 28 0000_0000_0000_0000_0000_0000_0000_0100 - // bit_pos (1 << nibble length) - 1 + nibble 0000_0000_0000_0000_0000_1000_0000_0000 - // - // nibble 2 (pfx << 4) >> 24 0000_0000_0000_0000_0000_0000_0000_0001 - // bit_pos (1 << nibble length) - 1 + nibble 0000_0000_0000_0000_1000_0000_0000_0000 - // ... - // nibble 8 (pfx << 28) >> 0 0000_0000_0000_0000_0000_0000_0000_1100 - // bit_pos (1 << nibble length) - 1 + nibble = (1 << 2) - 1 + 2 = 5 0000_0010_0000_0000_0000_0000_0000_0000 - // 5 - 5 - 5 - 4 - 4 - [4] - 5 - // startpos (2 ^ nibble length) - 1 + nibble as usize - - pub(crate) fn insert( - &mut self, - pfx: InternalPrefixRecord, - // user_data: Option<&::UserDataIn>, - ) -> Result<(), Box> { - let mut stride_end: u8 = 0; - let mut cur_i = self.store.get_root_node_id(); - let mut level: u8 = 0; - - loop { - let stride = self.strides[level as usize]; - stride_end += stride; - let nibble_len = if pfx.len < stride_end { - stride + pfx.len - stride_end - } else { - stride - }; - - let nibble = Store::AF::get_nibble( - pfx.net, - stride_end - stride, - nibble_len, - ); - let is_last_stride = pfx.len <= stride_end; - - let next_node_idx = match_node_for_strides_with_local_vec![ - // applicable to the whole outer match in the marco - self; - // user_data; - nibble_len; - nibble; - is_last_stride; - pfx; - cur_i; - level; - // Strides to create match arm for; stats level - Stride3; 0, - Stride4; 1, - Stride5; 2, - Stride6; 3, - Stride7; 4, - Stride8; 5 - ]; - - if let Some(i) = next_node_idx { - cur_i = i; - level += 1; - } else { - return Ok(()); - } - } - } - - pub(crate) fn store_node( - &mut self, - id: Option, - next_node: SizedStrideNode, - ) -> Option { - self.store.store_node(id, next_node) - } - - #[inline] - pub(crate) fn retrieve_node( - &self, - id: Store::NodeType, - ) -> Option<&SizedStrideNode> { - self.store.retrieve_node(id) - } - - pub(crate) fn get_root_node_id(&self) -> Store::NodeType { - self.store.get_root_node_id() - } - - #[inline] - pub(crate) fn retrieve_node_mut( - &mut self, - index: Store::NodeType, - ) -> SizedNodeResult { - self.store.retrieve_node_mut(index) - } - - pub(crate) fn store_prefix( - &mut self, - next_node: InternalPrefixRecord, - ) -> Result< - <::NodeType as SortableNodeId>::Part, - Box, - > { - // let id = self.prefixes.len() as u32; - self.store.store_prefix(next_node) - // id - } - - pub(crate) fn update_prefix_meta( - &mut self, - update_node_idx: <::NodeType as SortableNodeId>::Part, - meta: Store::Meta, - // user_data: Option<&::UserDataIn>, - ) -> Result> { - match self.store.retrieve_prefix_mut(update_node_idx) { - Some(update_pfx) => { - update_pfx.meta = meta; - Ok(UpsertReport { cas_count: 0, prefix_new: false, mui_new: false, mui_count: 0 }) - // ::merge_update(&mut update_pfx.meta, meta) - } - // TODO - // Use/create proper error types - None => Err("Prefix not found".into()), - } - } - - #[inline] - pub(crate) fn retrieve_prefix( - &self, - index: <::NodeType as SortableNodeId>::Part, - ) -> Option<&InternalPrefixRecord> { - self.store.retrieve_prefix(index) - } - - // This function assembles all entries in the `pfx_vec` of all child nodes of the - // `start_node` into one vec, starting from iself and then recursively assembling - // adding all `pfx_vec`s of its children. - fn get_all_more_specifics_for_node( - &self, - start_node: &SizedStrideNode, - found_pfx_vec: &mut Vec, - ) { - match start_node { - SizedStrideNode::Stride3(n) => { - found_pfx_vec.extend_from_slice(&n.pfx_vec); - - for nn in n.ptr_vec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*nn).unwrap(), - found_pfx_vec, - ); - } - } - SizedStrideNode::Stride4(n) => { - found_pfx_vec.extend_from_slice(&n.pfx_vec); - - for nn in n.ptr_vec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*nn).unwrap(), - found_pfx_vec, - ); - } - } - SizedStrideNode::Stride5(n) => { - found_pfx_vec.extend_from_slice(&n.pfx_vec); - - for nn in n.ptr_vec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*nn).unwrap(), - found_pfx_vec, - ); - } - } - SizedStrideNode::Stride6(n) => { - found_pfx_vec.extend_from_slice(&n.pfx_vec); - - for nn in n.ptr_vec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*nn).unwrap(), - found_pfx_vec, - ); - } - } - SizedStrideNode::Stride7(n) => { - found_pfx_vec.extend_from_slice(&n.pfx_vec); - - for nn in n.ptr_vec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*nn).unwrap(), - found_pfx_vec, - ); - } - } - SizedStrideNode::Stride8(n) => { - found_pfx_vec.extend_from_slice(&n.pfx_vec); - - for nn in n.ptr_vec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*nn).unwrap(), - found_pfx_vec, - ); - } - } - } - } - - // This function assembles the prefixes of a child node starting on a specified bit position in a ptr_vec of - // `current_node` into a vec, then adds all prefixes of these children recursively into a vec and returns that. - pub fn get_all_more_specifics_from_nibble( - &self, - current_node: &TreeBitMapNode, - nibble: u32, - nibble_len: u8, - ) -> Option> - where - S: Stride - + std::ops::BitAnd - + std::ops::BitOr - + Zero, - ::PtrSize: Debug - + Binary - + Copy - + std::ops::BitAnd - + PartialOrd - + Zero, - { - let (cnvec, mut msvec) = - current_node.add_more_specifics_at(nibble, nibble_len); - - for child_node in cnvec.iter() { - self.get_all_more_specifics_for_node( - self.retrieve_node(*child_node).unwrap(), - &mut msvec, - ); - } - Some(msvec) - } -} - -// This implements the funky stats for a tree -#[cfg(feature = "cli")] -impl std::fmt::Display for TreeBitMap { - fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let total_nodes = self.store.get_nodes_len(); - - writeln!(_f, "prefix vec size {}", self.store.get_prefixes_len())?; - writeln!(_f, "finished building tree...")?; - writeln!(_f, "{:?} nodes created", total_nodes)?; - writeln!(_f, - "size of node: {} bytes", - std::mem::size_of::>() - )?; - writeln!( - _f, - "memory used by nodes: {}kb", - self.store.get_nodes_len() - * std::mem::size_of::>() - / 1024 - )?; - - writeln!(_f, "stride division {:?}", self.strides)?; - for s in &self.stats { - writeln!(_f, "{:?}", s)?; - } - - writeln!( - _f, - "level\t[{}|{}] nodes occupied/max nodes percentage_max_nodes_occupied prefixes", - Colour::Blue.paint("nodes"), - Colour::Green.paint("prefixes") - )?; - let bars = ["▏", "▎", "▍", "▌", "▋", "▊", "▉"]; - let mut stride_bits = [0, 0]; - const SCALE: u32 = 5500; - - for stride in self.strides.iter().enumerate() { - // let level = stride.0; - stride_bits = [stride_bits[1] + 1, stride_bits[1] + stride.1]; - let nodes_num = self - .stats - .iter() - .find(|s| s.stride_len == *stride.1) - .unwrap() - .created_nodes[stride.0] - .count as u32; - let prefixes_num = self - .stats - .iter() - .find(|s| s.stride_len == *stride.1) - .unwrap() - .prefixes_num[stride.0] - .count as u32; - - let n = (nodes_num / SCALE) as usize; - let max_pfx = u128::overflowing_pow(2, stride_bits[1] as u32); - - write!(_f, "{}-{}\t", stride_bits[0], stride_bits[1])?; - - for _ in 0..n { - write!(_f, "{}", Colour::Blue.paint("█"))?; - } - - writeln!( - _f, - "{}", - Colour::Blue.paint( - bars[((nodes_num % SCALE) / (SCALE / 7)) as usize] - ) // = scale / 7 - )?; - - writeln!( - _f, - " {}/{} {:.2}%", - nodes_num, - max_pfx.0, - (nodes_num as f64 / max_pfx.0 as f64) * 100.0 - )?; - write!(_f, "\n\t")?; - - let n = (prefixes_num / SCALE) as usize; - for _ in 0..n { - write!(_f, "{}", Colour::Green.paint("█"))?; - } - - write!( - _f, - "{}", - Colour::Green.paint( - bars[((nodes_num % SCALE) / (SCALE / 7)) as usize] - ) // = scale / 7 - )?; - - writeln!(_f," {}", prefixes_num)?; - } - Ok(()) - } -} diff --git a/src/lsm_tree/mod.rs b/src/lsm_tree/mod.rs new file mode 100644 index 00000000..ebc6530b --- /dev/null +++ b/src/lsm_tree/mod.rs @@ -0,0 +1,781 @@ +use std::marker::PhantomData; +use std::path::Path; + +use inetnum::addr::Prefix; +use log::trace; +use lsm_tree::{AbstractTree, KvPair}; +use roaring::RoaringBitmap; +use zerocopy::{ + FromBytes, Immutable, IntoBytes, KnownLayout, NativeEndian, TryFromBytes, + Unaligned, U32, U64, +}; + +use crate::errors::{FatalError, FatalResult, PrefixStoreError}; +use crate::prefix_record::Meta; +use crate::stats::Counters; +use crate::types::prefix_record::{ValueHeader, ZeroCopyRecord}; +use crate::types::{AddressFamily, Record}; +use crate::types::{PrefixId, RouteStatus}; + +//------------ Key ----------------------------------------------------------- + +// The type of key used to create entries in the LsmTree. Can be short or +// long. Short keys overwrite existing values for existing (prefix, mui) +// pairs, whereas long keys append values with existing (prefix, mui), thus +// creating persisted historical records. + +pub(crate) trait Key: + TryFromBytes + KnownLayout + IntoBytes + Unaligned + Immutable +{ + // Try to extract a header from the bytes for reading only. If this + // somehow fails, we don't know what to do anymore. Data may be corrupted, + // so it probably should not be retried. + fn header(bytes: &[u8]) -> Result<&LongKey, FatalError> { + LongKey::try_ref_from_bytes(bytes.as_bytes()).map_err(|_| FatalError) + } + + // Try to extract a header for writing. If this somehow fails, we most + //probably cannot write to it anymore. This is fatal. The application + //should exit, data integrity (on disk) should be verified. + fn header_mut(bytes: &mut [u8]) -> Result<&mut LongKey, FatalError> { + trace!("key size {}", KEY_SIZE); + trace!("bytes len {}", bytes.len()); + LongKey::try_mut_from_bytes(bytes.as_mut_bytes()) + .map_err(|_| FatalError) + } +} + +#[derive(Debug, KnownLayout, Immutable, FromBytes, Unaligned, IntoBytes)] +#[repr(C)] +pub struct ShortKey { + prefix: PrefixId, + mui: U32, +} + +#[derive( + Copy, + Clone, + Debug, + KnownLayout, + Immutable, + TryFromBytes, + Unaligned, + IntoBytes, +)] +#[repr(C)] +pub struct LongKey { + prefix: PrefixId, // 1 + (4 or 16) + mui: U32, // 4 + ltime: U64, // 8 + status: RouteStatus, // 1 +} // 18 or 30 + +impl Key + for ShortKey +{ +} + +impl From<(PrefixId, u32)> for ShortKey { + fn from(value: (PrefixId, u32)) -> Self { + Self { + prefix: value.0, + mui: value.1.into(), + } + } +} + +impl Key + for LongKey +{ +} + +impl From<(PrefixId, u32, u64, RouteStatus)> + for LongKey +{ + fn from(value: (PrefixId, u32, u64, RouteStatus)) -> Self { + Self { + prefix: value.0, + mui: value.1.into(), + ltime: value.2.into(), + status: value.3, + } + } +} + +//------------ LsmTree ------------------------------------------------------- + +// The log-structured merge tree that backs the persistent store (on disk). + +pub struct LsmTree< + // The address family that this tree stores. IPv4 or IPv6. + AF: AddressFamily, + // The Key type for this tree. This can basically be a long key, if the + // store needs to store historical records, or a short key, if it should + // overwrite records for (prefix, mui) pairs, effectively only keeping the + // current state. + K: Key, + // The size in bytes of the complete key in the persisted storage, this + // is PREFIX_SIZE bytes (4; 16) + mui size (4) + ltime (8) + const KEY_SIZE: usize, +> { + tree: lsm_tree::Tree, + counters: Counters, + _af: PhantomData, + _k: PhantomData, +} + +impl, const KEY_SIZE: usize> + LsmTree +{ + pub fn new(persist_path: &Path) -> FatalResult> { + if let Ok(tree) = lsm_tree::Config::new(persist_path).open() { + Ok(LsmTree:: { + tree, + counters: Counters::default(), + _af: PhantomData, + _k: PhantomData, + }) + } else { + Err(FatalError) + } + } + + fn insert(&self, key: &[u8], value: &[u8]) -> (u32, u32) { + self.tree.insert::<&[u8], &[u8]>(key, value, 0) + } + + // This is not production code yet. To be re-evaluated if it does become + // production code. + #[allow(clippy::indexing_slicing)] + pub fn _remove(&self, key: &[u8]) { + self.tree.remove_weak(key, 0); + // the first byte of the prefix holds the length of the prefix. + self.counters._dec_prefixes_count(key[0]); + } + + // Based on the properties of the lsm_tree we can assume that the key and + // value concatenated in this method always has a length of greater than + // KEYS_SIZE, a global constant for the store per AF. + #[allow(clippy::indexing_slicing)] + pub fn records_for_prefix( + &self, + prefix: PrefixId, + mui: Option, + include_withdrawn: bool, + withdrawn_muis_bmin: &RoaringBitmap, + ) -> Option>>> { + match (mui, include_withdrawn) { + // Specific mui, include withdrawn routes + (Some(mui), true) => { + // get the records from the persist store for the (prefix, + // mui) tuple only. + let prefix_b = ShortKey::from((prefix, mui)); + self.tree + .prefix(prefix_b.as_bytes(), None, None) + .map(|kv| { + kv.map(|kv| { + trace!("mui i persist kv pair found: {:?}", kv); + let mut bytes = [kv.0, kv.1].concat(); + let key = K::header_mut(&mut bytes[..KEY_SIZE])?; + // If mui is in the global withdrawn muis table, + // then rewrite the routestatus of the record + // to withdrawn. + if withdrawn_muis_bmin.contains(key.mui.into()) { + key.status = RouteStatus::Withdrawn; + } + Ok(bytes) + }) + }) + .collect::>>>>() + .into_iter() + .collect::>>>>() + .ok() + .and_then( + |recs| { + if recs.is_empty() { + None + } else { + Some(recs) + } + }, + ) + } + // All muis, include withdrawn routes + (None, true) => { + // get all records for this prefix + self.tree + .prefix(prefix.as_bytes(), None, None) + .map(|kv| { + kv.map(|kv| { + trace!("n i persist kv pair found: {:?}", kv); + + // If mui is in the global withdrawn muis table, + // then rewrite the routestatus of the record + // to withdrawn. + let mut bytes = [kv.0, kv.1].concat(); + trace!("bytes {:?}", bytes); + let key = K::header_mut(&mut bytes[..KEY_SIZE])?; + trace!("key {:?}", key); + trace!("wm_bmin {:?}", withdrawn_muis_bmin); + if withdrawn_muis_bmin.contains(key.mui.into()) { + trace!("rewrite status"); + key.status = RouteStatus::Withdrawn; + } + Ok(bytes) + }) + }) + .collect::>>>>() + .into_iter() + .collect::>>>>() + .ok() + .and_then( + |recs| { + if recs.is_empty() { + None + } else { + Some(recs) + } + }, + ) + } + // All muis, exclude withdrawn routes + (None, false) => { + // get all records for this prefix + self.tree + .prefix(prefix.as_bytes(), None, None) + .filter_map(|r| { + r.map(|kv| { + trace!("n f persist kv pair found: {:?}", kv); + let mut bytes = [kv.0, kv.1].concat(); + if let Ok(header) = + K::header_mut(&mut bytes[..KEY_SIZE]) + { + // If mui is in the global withdrawn muis + // table, then skip this record + trace!( + "header {}", + Prefix::from(header.prefix) + ); + trace!( + "status {}", + header.status == RouteStatus::Withdrawn + ); + if header.status == RouteStatus::Withdrawn + || withdrawn_muis_bmin + .contains(header.mui.into()) + { + trace!( + "NOT returning {} {}", + Prefix::from(header.prefix), + header.mui + ); + return None; + } + trace!( + "RETURNING {} {}", + Prefix::from(header.prefix), + header.mui + ); + Some(Ok(bytes)) + } else { + Some(Err(FatalError)) + } + }) + .transpose() + }) + .collect::>>>>() + .into_iter() + .collect::>>>>() + .ok() + .and_then( + |recs| { + if recs.is_empty() { + None + } else { + Some(recs) + } + }, + ) + } + // Specific mui, exclude withdrawn routes + (Some(mui), false) => { + // get the records from the persist store for the (prefix, + // mui) tuple only. + let prefix_b = ShortKey::::from((prefix, mui)); + self.tree + .prefix(prefix_b.as_bytes(), None, None) + .filter_map(|kv| { + kv.map(|kv| { + trace!("mui f persist kv pair found: {:?}", kv); + let bytes = [kv.0, kv.1].concat(); + if let Ok(key) = K::header(&bytes[..KEY_SIZE]) { + // If mui is in the global withdrawn muis + // table, then skip this record + if key.status == RouteStatus::Withdrawn + || withdrawn_muis_bmin + .contains(key.mui.into()) + { + return None; + } + Some(Ok(bytes)) + } else { + Some(Err(FatalError)) + } + }) + .transpose() + }) + .collect::>>>>() + .into_iter() + .collect::>>>>() + .ok() + .and_then( + |recs| { + if recs.is_empty() { + None + } else { + Some(recs) + } + }, + ) + } + } + } + + pub fn most_recent_record_for_prefix_mui( + &self, + prefix: PrefixId, + mui: u32, + ) -> FatalResult>> { + trace!("get most recent record for prefix mui combo"); + let key_b = ShortKey::from((prefix, mui)); + let mut res: FatalResult> = Err(FatalError); + + for rkv in self.tree.prefix(key_b.as_bytes(), None, None) { + if let Ok(kvs) = rkv { + let kv = [kvs.0, kvs.1].concat(); + if let Ok(h) = K::header(&kv) { + if let Ok(r) = &res { + if let Ok(h_res) = K::header(r) { + if h_res.ltime < h.ltime { + res = Ok(kv); + } + } + } else { + res = Ok(kv); + } + } else { + return Err(FatalError); + } + } else { + return Err(FatalError); + } + } + + res.map(|r| Some(r.to_vec())) + } + + pub(crate) fn records_with_keys_for_prefix_mui( + &self, + prefix: PrefixId, + mui: u32, + ) -> Vec>> { + let key_b = ShortKey::from((prefix, mui)); + + (*self.tree.prefix(key_b.as_bytes(), None, None)) + .into_iter() + .map(|rkv| { + if let Ok(kv) = rkv { + Ok([kv.0, kv.1].concat()) + } else { + Err(FatalError) + } + }) + .collect::>() + } + + pub fn flush_to_disk(&self) -> Result<(), lsm_tree::Error> { + let segment = self.tree.flush_active_memtable(0); + + if let Ok(Some(segment)) = segment { + self.tree.register_segments(&[segment])?; + self.tree.compact( + std::sync::Arc::new(lsm_tree::compaction::Leveled::default()), + 0, + )?; + }; + + Ok(()) + } + + pub fn approximate_len(&self) -> usize { + self.tree.approximate_len() + } + + pub fn disk_space(&self) -> u64 { + self.tree.disk_space() + } + + pub fn prefixes_count(&self) -> usize { + self.counters.prefixes_count().iter().sum() + } + + pub fn routes_count(&self) -> usize { + self.counters.routes_count() + } + + #[allow(clippy::indexing_slicing)] + pub fn prefixes_count_for_len( + &self, + len: u8, + ) -> Result { + if len <= AF::BITS { + Ok(self.counters.prefixes_count()[len as usize]) + } else { + Err(PrefixStoreError::StoreNotReadyError) + } + } + + pub(crate) fn persist_record_w_long_key( + &self, + prefix: PrefixId, + record: &Record, + ) { + self.insert( + LongKey::from(( + prefix, + record.multi_uniq_id, + record.ltime, + record.status, + )) + .as_bytes(), + record.meta.as_ref(), + ); + } + + pub(crate) fn persist_record_w_short_key( + &self, + prefix: PrefixId, + record: &Record, + ) { + trace!("Record to persist {}", record); + let mut value = ValueHeader { + ltime: record.ltime, + status: record.status, + } + .as_bytes() + .to_vec(); + + trace!("header in bytes {:?}", value); + + value.extend_from_slice(record.meta.as_ref()); + + trace!("value complete {:?}", value); + + self.insert( + ShortKey::from((prefix, record.multi_uniq_id)).as_bytes(), + &value, + ); + } + + pub(crate) fn rewrite_header_for_record( + &self, + header: ValueHeader, + record_b: &[u8], + ) -> FatalResult<()> { + let record = ZeroCopyRecord::::try_ref_from_prefix(record_b) + .map_err(|_| FatalError)? + .0; + let key = ShortKey::from((record.prefix, record.multi_uniq_id)); + trace!("insert key {:?}", key); + + header + .as_bytes() + .to_vec() + .extend_from_slice(record.meta.as_ref()); + + self.insert(key.as_bytes(), header.as_bytes()); + + Ok(()) + } + + pub(crate) fn insert_empty_record( + &self, + prefix: PrefixId, + mui: u32, + ltime: u64, + ) { + self.insert( + LongKey::from((prefix, mui, ltime, RouteStatus::Withdrawn)) + .as_bytes(), + &[], + ); + } + + pub(crate) fn prefixes_iter( + &self, + ) -> impl Iterator>>> + '_ { + PersistedPrefixIter:: { + tree_iter: self.tree.iter(None, None), + cur_rec: None, + _af: PhantomData, + _k: PhantomData, + } + } +} + +impl< + AF: AddressFamily, + K: Key, + // const PREFIX_SIZE: usize, + const KEY_SIZE: usize, + > std::fmt::Debug for LsmTree +{ + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + todo!() + } +} + +// Iterator for all items in a lsm tree partition. The iterator used for +// this will scann through the entire tree, and there's no way to start at a +// specified offset. +pub(crate) struct PersistedPrefixIter< + AF: AddressFamily, + K: Key, + const KEY_SIZE: usize, +> { + cur_rec: Option>>>, + tree_iter: + Box>>, + _af: PhantomData, + _k: PhantomData, +} + +impl, const KEY_SIZE: usize> Iterator + for PersistedPrefixIter +{ + type Item = Vec>>; + fn next(&mut self) -> Option { + let rec; + + // Do we already have a record in our iter struct? + if let Some(_cur_rec) = &mut self.cur_rec { + // yes, use it. + rec = std::mem::take(&mut self.cur_rec); + } else { + // No, advance to the next record in the persist tree. + let next_rec = self.tree_iter.next(); + + match next_rec { + // The persist tree is completely done, iterator's done. + None => { + return None; + } + Some(Ok((k, v))) => { + rec = Some(vec![Ok([k, v].concat())]); + } + Some(Err(_)) => { + // This is NOT GOOD. Both that it happens, and that we are + // silently ignoring it. + self.cur_rec = None; + return None; + } + } + }; + + if let Some(mut r_rec) = rec { + let outer_pfx = if let Some(Ok(Ok(rr))) = + r_rec.first().map(|v| v.as_ref().map(|h| K::header(h))) + { + rr.prefix + } else { + return Some(vec![Err(FatalError)]); + }; + + for (k, v) in self.tree_iter.by_ref().flatten() { + let header = K::header(&k); + + if let Ok(h) = header { + if h.prefix == outer_pfx { + r_rec.push(Ok([k, v].concat())); + } else { + self.cur_rec = Some(vec![Ok([k, v].concat())]); + break; + } + } else { + r_rec.push(Err(FatalError)); + } + } + + Some(r_rec) + } else { + None + } + } +} + +// pub(crate) struct MoreSpecificPrefixIter< +// 'a, +// AF: AddressFamily + 'a, +// K: KeySize + 'a, +// // M: Meta + 'a, +// const PREFIX_SIZE: usize, +// const KEY_SIZE: usize, +// > { +// next_rec: Option<(PrefixId, Vec>)>, +// store: &'a PersistTree, +// search_prefix: PrefixId, +// search_lengths: Vec, +// cur_range: Box< +// dyn DoubleEndedIterator< +// Item = lsm_tree::Result<(lsm_tree::Slice, lsm_tree::Slice)>, +// >, +// >, +// mui: Option, +// global_withdrawn_bmin: &'a RoaringBitmap, +// include_withdrawn: bool, +// } + +// impl< +// 'a, +// AF: AddressFamily + 'a, +// K: KeySize + 'a, +// // M: Meta + 'a, +// const PREFIX_SIZE: usize, +// const KEY_SIZE: usize, +// > Iterator for MoreSpecificPrefixIter<'a, AF, K, PREFIX_SIZE, KEY_SIZE> +// { +// type Item = (PrefixId, Vec>); +// fn next(&mut self) -> Option { +// let mut cur_pfx = None; +// let mut recs = +// if let Some(next_rec) = std::mem::take(&mut self.next_rec) { +// cur_pfx = Some(next_rec.0); +// next_rec.1 +// } else { +// vec![] +// }; +// loop { +// if let Some(Ok((k, v))) = self.cur_range.next() { +// // let (pfx, mui, ltime, mut status) = +// let mut v = [k, v].concat(); +// let key = K::header_mut(&mut v); + +// if !self.include_withdrawn +// && (key.status == RouteStatus::Withdrawn) +// { +// continue; +// } + +// if self.global_withdrawn_bmin.contains(key.mui.into()) { +// if !self.include_withdrawn { +// continue; +// } else { +// key.status = RouteStatus::Withdrawn; +// } +// } + +// if let Some(m) = self.mui { +// if m != key.mui.into() { +// continue; +// } +// } + +// cur_pfx = if cur_pfx.is_some() { +// cur_pfx +// } else { +// Some(key.prefix) +// }; + +// if cur_pfx.is_some_and(|c| c == key.prefix) { +// // recs.push(PublicRecord::new( +// // mui, +// // ltime, +// // status, +// // v.as_ref().to_vec().into(), +// // )); +// recs.push(v); +// } else { +// self.next_rec = cur_pfx.map(|_| { +// (key.prefix, vec![v]) +// // vec![PublicRecord::new( +// // mui, +// // ltime, +// // status, +// // v.as_ref().to_vec().into(), +// // )], +// }); +// return Some((key.prefix, recs)); +// } +// } else { +// // See if there's a next prefix length to iterate over +// if let Some(len) = self.search_lengths.pop() { +// self.cur_range = self +// .store +// .get_records_for_more_specific_prefix_in_len( +// self.search_prefix, +// len, +// ); +// } else { +// return cur_pfx.map(|p| (p, recs)); +// } +// } +// } +// } +// } + +// pub(crate) struct LessSpecificPrefixIter< +// 'a, +// AF: AddressFamily + 'a, +// K: KeySize + 'a, +// M: Meta + 'a, +// const PREFIX_SIZE: usize, +// const KEY_SIZE: usize, +// > { +// store: &'a PersistTree, +// search_lengths: Vec>, +// mui: Option, +// global_withdrawn_bmin: &'a RoaringBitmap, +// include_withdrawn: bool, +// _m: PhantomData, +// } + +// impl< +// 'a, +// AF: AddressFamily + 'a, +// K: KeySize + 'a, +// M: Meta + 'a, +// const PREFIX_SIZE: usize, +// const KEY_SIZE: usize, +// > Iterator +// for LessSpecificPrefixIter<'a, AF, K, M, PREFIX_SIZE, KEY_SIZE> +// { +// type Item = (PrefixId, Vec>); +// fn next(&mut self) -> Option { +// loop { +// if let Some(lp) = self.search_lengths.pop() { +// let recs = self.store.get_records_for_prefix( +// lp, +// self.mui, +// self.include_withdrawn, +// self.global_withdrawn_bmin, +// ); +// // .into_iter() +// // .filter(|r| self.mui.is_none_or(|m| m == r.multi_uniq_id)) +// // .filter(|r| { +// // self.include_withdrawn +// // || (!self +// // .global_withdrawn_bmin +// // .contains(r.multi_uniq_id) +// // && r.status != RouteStatus::Withdrawn) +// // }) +// // .collect::>(); + +// if !recs.is_empty() { +// return Some((lp, recs)); +// } +// } else { +// return None; +// } +// } +// } +// } diff --git a/src/macros.rs b/src/macros.rs index e487e551..7b1fcf20 100644 --- a/src/macros.rs +++ b/src/macros.rs @@ -1,44 +1,78 @@ #[macro_export] -// This macro only works for stride with bitmaps that are <= u128, -// the ones with synthetic integers (U256, U512) don't have the trait -// implementations for left|right shift, counting ones etc. #[doc(hidden)] -macro_rules! impl_primitive_stride { - ( $( $len: expr; $bits: expr; $pfxsize:ty; $ptrsize: ty ), * ) => { - $( - impl Stride for $pfxsize { - type PtrSize = $ptrsize; - const BITS: u8 = $bits; - const STRIDE_LEN: u8 = $len; - - fn get_bit_pos(nibble: u32, len: u8) -> $pfxsize { - 1 << (::BITS - ((1 << len) - 1) as u8 - nibble as u8 - 1) - } - - fn get_pfx_index(bitmap: $pfxsize, nibble: u32, len: u8) -> usize { - (bitmap >> ((::BITS - ((1 << len) - 1) as u8 - nibble as u8 - 1) as usize)) - .count_ones() as usize - - 1 - } - fn get_ptr_index(bitmap: $ptrsize, nibble: u32) -> usize { - (bitmap >> ((::BITS >> 1) - nibble as u8 - 1) as usize).count_ones() - as usize - - 1 - } - - fn into_stride_size(bitmap: $ptrsize) -> $pfxsize { - bitmap as $pfxsize << 1 - } - - fn into_ptrbitarr_size(bitmap: $pfxsize) -> $ptrsize { - (bitmap >> 1) as $ptrsize - } - - #[inline] - fn leading_zeros(self) -> u32 { - self.leading_zeros() - } - } - )* +macro_rules! all_strategies { + ( $( $fn_name: ident; $test_name: ident; $ty: ty ), * ) => { + + $( + #[test] + fn $fn_name() -> Result<(), Box> { + use rotonda_store::rib::config::{ + MemoryOnlyConfig, + PersistOnlyConfig, + PersistHistoryConfig, + WriteAheadConfig + }; + + //------- Default (MemoryOnly) + println!("MemoryOnly strategy starting..."); + let tree_bitmap = StarCastRib::< + $ty, MemoryOnlyConfig>::try_default()?; + + $test_name(tree_bitmap)?; + + //------- PersistOnly + + println!("PersistOnly strategy starting..."); + let mut store_config = PersistOnlyConfig::default(); + store_config.set_persist_path( + "/tmp/rotonda/".into() + ); + + let tree_bitmap = StarCastRib::< + $ty, PersistOnlyConfig + >::new_with_config( + store_config + )?; + + $test_name(tree_bitmap)?; + + //------- PersistHistory + + println!("PersistHistory strategy starting..."); + let mut store_config = PersistHistoryConfig::default(); + store_config.set_persist_path( + "/tmp/rotonda/".into() + ); + + let tree_bitmap = StarCastRib::< + $ty, + PersistHistoryConfig + >::new_with_config( + store_config + )?; + + $test_name(tree_bitmap)?; + + //------- WriteAhead + + println!("WriteAhead strategy starting..."); + + let mut store_config = WriteAheadConfig::default(); + store_config.set_persist_path( + "/tmp/rotonda/".into() + ); + + let tree_bitmap = StarCastRib::< + $ty, + WriteAheadConfig + >::new_with_config( + store_config + )?; + + $test_name(tree_bitmap)?; + + Ok(()) + } + )* }; } diff --git a/src/meta_examples.rs b/src/meta_examples.rs deleted file mode 100644 index d36e8eac..00000000 --- a/src/meta_examples.rs +++ /dev/null @@ -1,105 +0,0 @@ -//------------ PrefixAs Metadata impl --------------------------------------- - -use inetnum::asn::Asn; - -use crate::Meta; - -#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct PrefixAs(pub u32); - -// impl MergeUpdate for PrefixAs { -// type UserDataIn = (); -// type UserDataOut = (); - -// fn merge_update( -// &mut self, -// update_record: PrefixAs, -// _: Option<&Self::UserDataIn>, -// ) -> Result<(), Box> { -// self.0 = update_record.0; -// Ok(()) -// } - -// fn clone_merge_update( -// &self, -// update_meta: &Self, -// _: Option<&Self::UserDataIn>, -// ) -> Result<(Self, Self::UserDataOut), Box> -// where -// Self: std::marker::Sized, -// { -// Ok((PrefixAs(update_meta.0), ())) -// } -// } - -impl Meta for PrefixAs { - type Orderable<'a> = Asn; - type TBI = (); - fn as_orderable(&self, _tbi: Self::TBI) -> Asn { - self.0.into() - } -} - -impl std::fmt::Display for PrefixAs { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "AS{}", self.0) - } -} - -// Hash implementation that always returns the same hash, so that all -// records get thrown on one big heap. -// impl std::hash::Hash for PrefixAs { -// fn hash(&self, state: &mut H) { -// 0.hash(state); -// } -// } - -/// Tree-wide empty meta-data type -/// -/// A special type that indicates that there's no metadata in the tree -/// storing the prefixes. Note that this is different from a tree with -/// optional meta-data. -#[derive(Clone, Copy, Hash)] -pub enum NoMeta { - Empty, -} - -impl std::fmt::Debug for NoMeta { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("") - } -} - -impl std::fmt::Display for NoMeta { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_str("NoMeta") - } -} - -impl Meta for NoMeta { - type Orderable<'a> = (); - type TBI = (); - fn as_orderable(&self, _tbi: Self::TBI) {} -} - - -// impl MergeUpdate for NoMeta { -// type UserDataIn = (); -// type UserDataOut = (); - -// fn merge_update( -// &mut self, -// _: NoMeta, -// _: Option<&Self::UserDataIn>, -// ) -> Result<(), Box> { -// Ok(()) -// } - -// fn clone_merge_update( -// &self, -// _: &NoMeta, -// _: Option<&Self::UserDataIn>, -// ) -> Result<(Self, Self::UserDataOut), Box> { -// Ok((NoMeta::Empty, ())) -// } -// } \ No newline at end of file diff --git a/src/node_id.rs b/src/node_id.rs deleted file mode 100644 index b1ecd071..00000000 --- a/src/node_id.rs +++ /dev/null @@ -1,62 +0,0 @@ -//------------ NodeId Types ------------------------------------------------- - -pub trait SortableNodeId -where - Self: std::cmp::Ord + std::fmt::Debug + Sized + Default, - Self::Sort: - std::cmp::Ord + std::convert::From + std::convert::Into, - Self::Part: std::cmp::Ord - + std::convert::From - + std::marker::Copy - + std::fmt::Debug, -{ - type Part; - type Sort; - // fn sort(&self, other: &Self) -> std::cmp::Ordering; - fn new(sort: &Self::Sort, part: &Self::Part) -> Self; - fn empty() -> Self; - fn get_sort(&self) -> Self::Sort; - fn get_part(&self) -> Self::Part; - fn is_empty(&self) -> bool; -} - -#[derive( - Eq, PartialEq, Ord, PartialOrd, Hash, Debug, Copy, Clone, Default, -)] -pub struct InMemNodeId(pub u16, pub u32); - -// This works for both IPv4 and IPv6 up to a certain point. -// the u16 for Sort is used for ordering the local vecs -// inside the nodes. -// The u32 Part is used as an index to the backing global vecs, -// so you CANNOT store all IPv6 prefixes that could exist! -// If you really want that you should implement your own type with trait -// SortableNodeId, e.g., Sort = u16, Part = u128. -impl SortableNodeId for InMemNodeId { - type Sort = u16; - type Part = u32; - - // fn sort(&self, other: &Self) -> std::cmp::Ordering { - // self.0.cmp(&other.0) - // } - - fn new(sort: &Self::Sort, part: &Self::Part) -> InMemNodeId { - InMemNodeId(*sort, *part) - } - - fn get_sort(&self) -> Self::Sort { - self.0 - } - - fn get_part(&self) -> Self::Part { - self.1 - } - - fn is_empty(&self) -> bool { - self.0 == 0 && self.1 == 0 - } - - fn empty() -> Self { - Self::new(&0, &0) - } -} diff --git a/src/prefix_cht/cht.rs b/src/prefix_cht/cht.rs new file mode 100644 index 00000000..58d855bc --- /dev/null +++ b/src/prefix_cht/cht.rs @@ -0,0 +1,836 @@ +use std::collections::HashMap; +use std::fmt::{Debug, Display}; +use std::sync::atomic::Ordering; +use std::sync::{Arc, Mutex, MutexGuard}; + +use crossbeam_epoch::{Atomic, Guard, Owned}; +use crossbeam_utils::Backoff; +use inetnum::addr::Prefix; +use log::{debug, log_enabled, trace}; +use roaring::RoaringBitmap; + +use crate::cht::{nodeset_size, prev_node_size}; +use crate::errors::{FatalError, FatalResult}; +use crate::prefix_record::Meta; +use crate::stats::{Counters, UpsertReport}; +use crate::types::RouteStatus; +use crate::{ + cht::{Cht, OnceBoxSlice, Value}, + types::{ + errors::PrefixStoreError, prefix_record::Record, AddressFamily, + PrefixId, + }, +}; + +//------------ MultiMap ------------------------------------------------------ +// +// This is the collection of records or a given prefix, keyed on the multi +// unique identifier ("mui"). Note that the record contains more than just +// the // meta-data typed value ("M"). + +#[derive(Debug)] +pub struct MultiMap( + Arc>>>, +); + +impl MultiMap { + pub(crate) fn new(record_map: HashMap>) -> Self { + Self(Arc::new(Mutex::new(record_map))) + } + + #[allow(clippy::type_complexity)] + fn acquire_write_lock( + &self, + ) -> FatalResult<(MutexGuard>>, usize)> + { + let mut retry_count: usize = 0; + let backoff = Backoff::new(); + + loop { + // We're using lock(), which returns an Error only if another + // thread has panicked while holding the lock. In that situtation + // we are certainly not going to write anything. + if let Ok(guard) = self.0.lock().map_err(|_| FatalError) { + return Ok((guard, retry_count)); + } + + backoff.spin(); + retry_count += 1; + } + } + + fn acquire_read_guard( + &self, + ) -> MutexGuard>> { + let backoff = Backoff::new(); + + loop { + if let Ok(guard) = self.0.try_lock() { + return guard; + } + + backoff.spin(); + } + } + + pub fn _len(&self) -> usize { + let record_map = self.acquire_read_guard(); + record_map.len() + } + + pub fn get_record_for_mui( + &self, + mui: u32, + include_withdrawn: bool, + ) -> Option> { + let record_map = self.acquire_read_guard(); + + record_map.get(&mui).and_then(|r| -> Option> { + if include_withdrawn || r.route_status() == RouteStatus::Active { + Some(Record::from((mui, r))) + } else { + None + } + }) + } + + pub fn best_backup(&self, tbi: M::TBI) -> (Option, Option) { + let record_map = self.acquire_read_guard(); + let ord_routes = record_map + .iter() + .map(|r| (r.1.meta().as_orderable(tbi), *r.0)); + let (best, bckup) = + routecore::bgp::path_selection::best_backup_generic(ord_routes); + (best.map(|b| b.1), bckup.map(|b| b.1)) + } + + pub(crate) fn get_record_for_mui_with_rewritten_status( + &self, + mui: u32, + bmin: &RoaringBitmap, + rewrite_status: RouteStatus, + ) -> Option> { + let record_map = self.acquire_read_guard(); + record_map.get(&mui).map(|r| { + // We'll return a cloned record: the record in the store remains + // untouched. + let mut r = r.clone(); + if bmin.contains(mui) { + r.set_route_status(rewrite_status); + } + Record::from((mui, &r)) + }) + } + + pub fn get_filtered_record_for_mui( + &self, + mui: u32, + include_withdrawn: bool, + bmin: &RoaringBitmap, + ) -> Option> { + match include_withdrawn { + false => self.get_record_for_mui(mui, include_withdrawn), + true => self.get_record_for_mui_with_rewritten_status( + mui, + bmin, + RouteStatus::Withdrawn, + ), + } + } + + // Helper to filter out records that are not-active (Inactive or + // Withdrawn), or whose mui appears in the global withdrawn index. + pub fn get_filtered_records( + &self, + mui: Option, + include_withdrawn: bool, + bmin: &RoaringBitmap, + ) -> Option>> { + if let Some(mui) = mui { + self.get_filtered_record_for_mui(mui, include_withdrawn, bmin) + .map(|r| vec![r]) + } else { + match include_withdrawn { + false => { + let recs = self.as_active_records_not_in_bmin(bmin); + if recs.is_empty() { + None + } else { + Some(recs) + } + } + true => { + let recs = self.as_records_with_rewritten_status( + bmin, + RouteStatus::Withdrawn, + ); + if recs.is_empty() { + None + } else { + Some(recs) + } + } + } + } + } + + // return all records regardless of their local status, or any globally + // set status for the mui of the record. However, the local status for a + // record whose mui appears in the specified bitmap index, will be + // rewritten with the specified RouteStatus. + pub fn as_records_with_rewritten_status( + &self, + bmin: &RoaringBitmap, + rewrite_status: RouteStatus, + ) -> Vec> { + let record_map = self.acquire_read_guard(); + record_map + .iter() + .map(move |r| { + let mut rec = r.1.clone(); + if bmin.contains(*r.0) { + rec.set_route_status(rewrite_status); + } + Record::from((*r.0, &rec)) + }) + .collect::>() + } + + pub fn _as_records(&self) -> Vec> { + let record_map = self.acquire_read_guard(); + record_map + .iter() + .map(|r| Record::from((*r.0, r.1))) + .collect::>() + } + + // Returns a vec of records whose keys are not in the supplied bitmap + // index, and whose local Status is set to Active. Used to filter out + // withdrawn routes. + pub fn as_active_records_not_in_bmin( + &self, + bmin: &RoaringBitmap, + ) -> Vec> { + let record_map = self.acquire_read_guard(); + record_map + .iter() + .filter_map(|r| { + if r.1.route_status() == RouteStatus::Active + && !bmin.contains(*r.0) + { + Some(Record::from((*r.0, r.1))) + } else { + None + } + }) + .collect::>() + } + + // Change the local status of the record for this mui to Withdrawn. + pub fn mark_as_withdrawn_for_mui(&self, mui: u32, ltime: u64) { + let mut record_map = self.acquire_read_guard(); + if let Some(rec) = record_map.get_mut(&mui) { + rec.set_route_status(RouteStatus::Withdrawn); + rec.set_logical_time(ltime); + } + } + + // Change the local status of the record for this mui to Active. + pub fn mark_as_active_for_mui(&self, mui: u32, ltime: u64) { + let mut record_map = self.acquire_read_guard(); + if let Some(rec) = record_map.get_mut(&mui) { + rec.set_route_status(RouteStatus::Active); + rec.set_logical_time(ltime); + } + } + + // Insert or replace the PublicRecord in the HashMap for the key of + // record.multi_uniq_id. Returns the number of entries in the HashMap + // after updating it, if it's more than 1. Returns None if this is the + // first entry. + #[allow(clippy::type_complexity)] + pub(crate) fn upsert_record( + &self, + new_rec: Record, + ) -> FatalResult<(Option<(MultiMapValue, usize)>, usize)> { + let (mut record_map, retry_count) = self.acquire_write_lock()?; + let key = new_rec.multi_uniq_id; + + match record_map.contains_key(&key) { + true => { + let old_rec = record_map + .insert(key, MultiMapValue::from(new_rec)) + .map(|r| (r, record_map.len())); + Ok((old_rec, retry_count)) + } + false => { + let new_rec = MultiMapValue::from(new_rec); + let old_rec = record_map.insert(key, new_rec); + assert!(old_rec.is_none()); + Ok((None, retry_count)) + } + } + } +} +#[derive(Clone, Debug)] +pub(crate) struct MultiMapValue { + meta: M, + ltime: u64, + route_status: RouteStatus, +} + +impl MultiMapValue { + pub(crate) fn logical_time(&self) -> u64 { + self.ltime + } + + pub(crate) fn set_logical_time(&mut self, ltime: u64) { + self.ltime = ltime; + } + + pub(crate) fn meta(&self) -> &M { + &self.meta + } + + pub(crate) fn route_status(&self) -> RouteStatus { + self.route_status + } + + pub(crate) fn set_route_status(&mut self, status: RouteStatus) { + self.route_status = status; + } +} + +impl std::fmt::Display for MultiMapValue { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "{} {}", + // self.meta(), + self.logical_time(), + self.route_status() + ) + } +} + +impl From> for MultiMapValue { + fn from(value: Record) -> Self { + Self { + ltime: value.ltime, + route_status: value.status, + meta: value.meta, + } + } +} + +impl From<(u32, &MultiMapValue)> for Record { + fn from(value: (u32, &MultiMapValue)) -> Self { + Self { + multi_uniq_id: value.0, + meta: value.1.meta().clone(), + ltime: value.1.ltime, + status: value.1.route_status, + } + } +} + +impl Clone for MultiMap { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } +} + +// ----------- Prefix related structs --------------------------------------- + +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct PathSelections { + pub(crate) path_selection_muis: (Option, Option), +} + +impl PathSelections { + pub fn best(&self) -> Option { + self.path_selection_muis.0 + } + + pub fn _backup(&self) -> Option { + self.path_selection_muis.1 + } +} +// ----------- StoredPrefix ------------------------------------------------- +// This is the top-level struct that's linked from the slots in the buckets. +// It contains a super_agg_record that is supposed to hold counters for the +// records that are stored inside it, so that iterators over its linked lists +// don't have to go into them if there's nothing there and could stop early. +#[derive(Debug)] +pub struct StoredPrefix { + // the prefix itself, + pub prefix: PrefixId, + // the aggregated data for this prefix + pub record_map: MultiMap, + // (mui of best path entry, mui of backup path entry) from the record_map + path_selections: Atomic, + // the reference to the next set of records for this prefix, if any. + pub next_bucket: PrefixSet, +} + +impl StoredPrefix { + pub(crate) fn new(pfx_id: PrefixId, level: u8) -> Self { + // start calculation size of next set, it's dependent on the level + // we're in. + // let pfx_id = PrefixId::new(record.net, record.len); + // let this_level = bits_for_len(pfx_id.get_len(), level); + let next_level = nodeset_size(pfx_id.len(), level + 1); + + trace!("next level {}", next_level); + let next_bucket: PrefixSet = if next_level > 0 { + debug!( + "{} store: INSERT with new bucket of size {} at prefix len {}", + std::thread::current().name().unwrap_or("unnamed-thread"), + 1 << next_level, + pfx_id.len() + ); + PrefixSet::init_with_p2_children(next_level as usize) + } else { + debug!( + "{} store: INSERT at LAST LEVEL with empty bucket at prefix len {}", + std::thread::current().name().unwrap_or("unnamed-thread"), + pfx_id.len() + ); + PrefixSet::init_with_p2_children(next_level as usize) + }; + // End of calculation + + let rec_map = HashMap::new(); + + StoredPrefix { + prefix: pfx_id, + path_selections: Atomic::init(PathSelections { + path_selection_muis: (None, None), + }), + record_map: MultiMap::new(rec_map), + next_bucket, + } + } + + pub(crate) fn get_prefix_id(&self) -> PrefixId { + self.prefix + } + + pub fn get_path_selections(&self, guard: &Guard) -> PathSelections { + let path_selections = + self.path_selections.load(Ordering::Acquire, guard); + + unsafe { path_selections.as_ref() }.map_or( + PathSelections { + path_selection_muis: (None, None), + }, + |ps| *ps, + ) + } + + pub(crate) fn set_path_selections( + &self, + path_selections: PathSelections, + guard: &Guard, + ) -> Result<(), PrefixStoreError> { + let current = self.path_selections.load(Ordering::SeqCst, guard); + + if unsafe { current.as_ref() } == Some(&path_selections) { + debug!("unchanged path_selections"); + return Ok(()); + } + + self.path_selections + .compare_exchange( + current, + // Set the tag to indicate we're updated + Owned::new(path_selections).with_tag(0), + Ordering::AcqRel, + Ordering::Acquire, + guard, + ) + .map_err(|_| PrefixStoreError::PathSelectionOutdated)?; + Ok(()) + } + + pub fn set_ps_outdated( + &self, + guard: &Guard, + ) -> Result<(), PrefixStoreError> { + self.path_selections + .fetch_update(Ordering::Acquire, Ordering::Acquire, guard, |p| { + Some(p.with_tag(1)) + }) + .map(|_| ()) + .map_err(|_| PrefixStoreError::StoreNotReadyError) + } + + pub fn is_ps_outdated(&self, guard: &Guard) -> bool { + self.path_selections.load(Ordering::Acquire, guard).tag() == 1 + } + + pub fn calculate_and_store_best_backup<'a>( + &'a self, + tbi: &M::TBI, + guard: &'a Guard, + ) -> Result<(Option, Option), PrefixStoreError> { + let path_selection_muis = self.record_map.best_backup(*tbi); + + self.set_path_selections( + PathSelections { + path_selection_muis, + }, + guard, + )?; + + Ok(path_selection_muis) + } +} +//------------ PrefixSet ---------------------------------------------------- + +// The PrefixSet is the ARRAY that holds all the child prefixes in a node. +// Since we are storing these prefixes in the global store in a HashMap that +// is keyed on the tuple (addr_bits, len, serial number) we can get away with +// storing ONLY THE SERIAL NUMBER in the pfx_vec: The addr_bits and len are +// implied in the position in the array a serial number has. A PrefixSet +// doesn't know anything about the node it is contained in, so it needs a base +// address to be able to calculate the complete prefix of a child prefix. + +#[derive(Debug)] +#[repr(align(8))] +pub struct PrefixSet( + pub OnceBoxSlice>, +); + +impl Value for PrefixSet { + fn init_with_p2_children(p2_size: usize) -> Self { + let size = if p2_size == 0 { 0 } else { 1 << p2_size }; + PrefixSet(OnceBoxSlice::new(size)) + } +} + +//------------ PrefixCht ----------------------------------------------------- + +// PrefixCht is a simple wrapper around Cht. It stores the meta-data for +// in-memeory strategies. + +#[derive(Debug)] +pub(crate) struct PrefixCht< + AF: AddressFamily, + M: Meta, + const ROOT_SIZE: usize, +> { + bush: Cht, ROOT_SIZE, 1>, + counters: Counters, +} + +impl + PrefixCht +{ + pub(crate) fn init() -> Self { + Self { + bush: , ROOT_SIZE, 1>>::init(), + counters: Counters::default(), + } + } + + pub(crate) fn get_records_for_prefix( + &self, + prefix: PrefixId, + mui: Option, + include_withdrawn: bool, + bmin: &RoaringBitmap, + ) -> Option>> { + let mut prefix_set = self.bush.root_for_len(prefix.len()); + let mut level: u8 = 0; + let backoff = Backoff::new(); + + loop { + // The index of the prefix in this array (at this len and + // level) is calculated by performing the hash function + // over the prefix. + + // HASHING FUNCTION + let index = Self::hash_prefix_id(prefix, level); + + if let Some(stored_prefix) = prefix_set.0.get(index) { + if prefix == stored_prefix.get_prefix_id() { + if log_enabled!(log::Level::Trace) { + trace!( + "found requested prefix {} ({:?})", + Prefix::from(prefix), + prefix + ); + } + + return stored_prefix.record_map.get_filtered_records( + mui, + include_withdrawn, + bmin, + ); + }; + + // Advance to the next level. + prefix_set = &stored_prefix.next_bucket; + level += 1; + backoff.spin(); + continue; + } + + trace!("no prefix found for {:?}", prefix); + return None; + } + } + + pub(crate) fn upsert_prefix( + &self, + prefix: PrefixId, + record: Record, + update_path_selections: Option, + guard: &Guard, + ) -> Result<(UpsertReport, Option>), PrefixStoreError> + { + let mut prefix_is_new = true; + let mut mui_is_new = true; + + let (mui_count, cas_count) = + match self.non_recursive_retrieve_prefix_mut(prefix) { + // There's no StoredPrefix at this location yet. Create a new + // PrefixRecord and try to store it in the empty slot. + (stored_prefix, false) => { + if log_enabled!(log::Level::Debug) { + debug!( + "{} store: Create new prefix record", + std::thread::current() + .name() + .unwrap_or("unnamed-thread") + ); + } + + let (mui_count, retry_count) = stored_prefix + .record_map + .upsert_record(record) + .map_err(|_| PrefixStoreError::FatalError)?; + + // See if someone beat us to creating the record. + if mui_count.is_some() { + mui_is_new = false; + prefix_is_new = false; + } else { + self.counters.inc_routes_count(); + } + + if prefix_is_new { + self.counters + .inc_prefixes_count(stored_prefix.prefix.len()); + } + (mui_count, retry_count) + } + // There already is a StoredPrefix with a record at this + // location. + (stored_prefix, true) => { + if log_enabled!(log::Level::Debug) { + debug!( + "{} store: Found existing prefix record for {}/{}", + std::thread::current() + .name() + .unwrap_or("unnamed-thread"), + prefix.bits(), + prefix.len() + ); + } + prefix_is_new = false; + + // Update the already existing record_map with our + // caller's record. + stored_prefix.set_ps_outdated(guard)?; + + let (mui_count, retry_count) = stored_prefix + .record_map + .upsert_record(record) + .map_err(|_| PrefixStoreError::FatalError)?; + + // if the mui is new, we didn't overwrite an existing + // route, so that's a new one! + if mui_count.is_none() { + mui_is_new = true; + self.counters.inc_routes_count(); + }; + + if let Some(tbi) = update_path_selections { + stored_prefix + .calculate_and_store_best_backup(&tbi, guard)?; + } + + (mui_count, retry_count) + } + }; + + let count = mui_count.as_ref().map(|m| m.1).unwrap_or(1); + Ok(( + UpsertReport { + prefix_new: prefix_is_new, + cas_count, + mui_new: mui_is_new, + mui_count: count, + }, + mui_count.map(|m| m.0), + )) + } + // This function is used by the upsert_prefix function above. + // + // We're using a Chained Hash Table and this function returns one of: + // - a StoredPrefix that already exists for this search_prefix_id + // - the Last StoredPrefix in the chain. + // - an error, if no StoredPrefix whatsoever can be found in the store. + // + // The error condition really shouldn't happen, because that basically + // means the root node for that particular prefix length doesn't exist. + pub(crate) fn non_recursive_retrieve_prefix_mut( + &self, + search_prefix_id: PrefixId, + ) -> (&StoredPrefix, bool) { + trace!("non_recursive_retrieve_prefix_mut_with_guard"); + let mut prefix_set = self.bush.root_for_len(search_prefix_id.len()); + let mut level: u8 = 0; + + trace!("root prefix_set {:?}", prefix_set); + loop { + // HASHING FUNCTION + let index = Self::hash_prefix_id(search_prefix_id, level); + + // probe the slot with the index that's the result of the hashing. + let stored_prefix = match prefix_set.0.get(index) { + Some(p) => { + trace!("prefix set found."); + (p, true) + } + None => { + // We're at the end of the chain and haven't found our + // search_prefix_id anywhere. Return the end-of-the-chain + // StoredPrefix, so the caller can attach a new one. + trace!( + "no record. returning last found record in level + {}, with index {}.", + level, + index + ); + let index = Self::hash_prefix_id(search_prefix_id, level); + trace!("calculate next index {}", index); + let var_name = ( + prefix_set + .0 + .get_or_init(index, || { + StoredPrefix::new( + PrefixId::new( + search_prefix_id.bits(), + search_prefix_id.len(), + ), + level, + ) + }) + .0, + false, + ); + var_name + } + }; + + if search_prefix_id == stored_prefix.0.prefix { + // GOTCHA! + // Our search-prefix is stored here, so we're returning + // it, so its PrefixRecord can be updated by the caller. + if log_enabled!(log::Level::Trace) { + trace!( + "found requested prefix {} ({:?})", + Prefix::from(search_prefix_id), + search_prefix_id + ); + } + return stored_prefix; + } else { + // A Collision. Follow the chain. + level += 1; + prefix_set = &stored_prefix.0.next_bucket; + continue; + } + } + } + + // This function is used by the match_prefix, and [more|less]_specifics + // public methods on the TreeBitMap (indirectly). + #[allow(clippy::type_complexity)] + // This method can never run out of levels, since it only continues if it + // finds occupied child slots. The indexing can therefore not crash. + #[allow(clippy::indexing_slicing)] + pub(crate) fn non_recursive_retrieve_prefix( + &self, + id: PrefixId, + ) -> ( + Option<&StoredPrefix>, + Option<( + PrefixId, + u8, + &PrefixSet, + [Option<(&PrefixSet, usize)>; 32], + usize, + )>, + ) { + let mut prefix_set = self.bush.root_for_len(id.len()); + let mut parents = [None; 32]; + let mut level: u8 = 0; + let backoff = Backoff::new(); + + loop { + // The index of the prefix in this array (at this len and + // level) is calculated by performing the hash function + // over the prefix. + let index = Self::hash_prefix_id(id, level); + + if let Some(stored_prefix) = prefix_set.0.get(index) { + if id == stored_prefix.get_prefix_id() { + if log_enabled!(log::Level::Trace) { + trace!( + "found requested prefix {} ({:?})", + Prefix::from(id), + id + ); + } + parents[level as usize] = Some((prefix_set, index)); + return ( + Some(stored_prefix), + Some((id, level, prefix_set, parents, index)), + ); + }; + + // Advance to the next level. + prefix_set = &stored_prefix.next_bucket; + level += 1; + backoff.spin(); + continue; + } + + trace!("no prefix found for {:?}", id); + parents[level as usize] = Some((prefix_set, index)); + return (None, Some((id, level, prefix_set, parents, index))); + } + } + + pub(crate) fn prefixes_count(&self) -> usize { + self.counters.prefixes_count().iter().sum() + } + + pub(crate) fn routes_count(&self) -> usize { + self.counters.nodes_count() + } + + fn hash_prefix_id(id: PrefixId, level: u8) -> usize { + let last_level = prev_node_size(id.len(), level); + + // HASHING FUNCTION + let size = nodeset_size(id.len(), level); + ((id.bits() << AF::from_u32(last_level as u32)) + >> AF::from_u8((::BITS - size) % ::BITS)) + .dangerously_truncate_to_u32() as usize + } +} diff --git a/src/prefix_cht/iterators.rs b/src/prefix_cht/iterators.rs new file mode 100644 index 00000000..945dffc0 --- /dev/null +++ b/src/prefix_cht/iterators.rs @@ -0,0 +1,107 @@ +use crate::{ + tree_bitmap::{NodeMoreSpecificChildIter, NodeMoreSpecificsPrefixIter}, + types::{AddressFamily, BitSpan, PrefixId}, + TreeBitMap, +}; +use log::trace; +use roaring::RoaringBitmap; + +// This iterator is unused right now: all iterators go over the in-memory +// treebitmap, and retreive metadata based on the persist_strategy per prefix +// from the relevant tree. +// +// However this tree may ultimately be more efficient for the MemoryOnly +// strategy. +pub(crate) struct _MoreSpecificPrefixIter< + 'a, + AF: AddressFamily, + const ROOT_SIZE: usize, +> { + store: &'a TreeBitMap, + cur_ptr_iter: NodeMoreSpecificChildIter, + cur_pfx_iter: NodeMoreSpecificsPrefixIter, + parent_and_position: Vec>, + // If specified, we're only iterating over records for this mui. + mui: Option, + // This is the tree-wide index of withdrawn muis, used to rewrite the + // statuses of these records, or filter them out. + global_withdrawn_bmin: &'a RoaringBitmap, + // Whether we should filter out the withdrawn records in the search result + include_withdrawn: bool, +} + +#[allow(clippy::unwrap_used)] +impl<'a, AF: AddressFamily + 'a, const ROOT_SIZE: usize> Iterator + for _MoreSpecificPrefixIter<'a, AF, ROOT_SIZE> +{ + type Item = PrefixId; + + fn next(&mut self) -> Option { + trace!("MoreSpecificsPrefixIter"); + + loop { + // first drain the current prefix iterator until empty. + let next_pfx = self.cur_pfx_iter.next(); + + if next_pfx.is_some() { + return next_pfx; + } + + // Our current prefix iterator for this node is done, look for + // the next pfx iterator of the next child node in the current + // ptr iterator. + trace!("resume ptr iterator {:?}", self.cur_ptr_iter); + + let mut next_ptr = self.cur_ptr_iter.next(); + + // Our current ptr iterator is also done, maybe we have a parent + if next_ptr.is_none() { + trace!("try for parent"); + if let Some(cur_ptr_iter) = self.parent_and_position.pop() { + trace!("continue with parent"); + self.cur_ptr_iter = cur_ptr_iter; + next_ptr = self.cur_ptr_iter.next(); + } else { + trace!("no more parents"); + return None; + } + } + + if let Some(next_ptr) = next_ptr { + let node = if self.mui.is_none() { + trace!("let's retriev node {}", next_ptr); + self.store.retrieve_node(next_ptr) + } else { + self.store + .retrieve_node_for_mui(next_ptr, self.mui.unwrap()) + }; + + match node { + Some(next_node) => { + // create new ptr iterator for this node. + self.parent_and_position.push(self.cur_ptr_iter); + let ptr_iter = next_node.more_specific_ptr_iter( + next_ptr, + BitSpan { bits: 0, len: 0 }, + ); + self.cur_ptr_iter = ptr_iter; + + trace!( + "next stride new iterator stride 4 {:?} start \ + bit_span 0 0", + self.cur_ptr_iter, + ); + self.cur_pfx_iter = next_node.more_specific_pfx_iter( + next_ptr, + BitSpan::new(0, 0), + ) + } + None => { + println!("no node here."); + return None; + } + }; + } + } + } +} diff --git a/src/local_array/store/iterators.rs b/src/prefix_cht/iterators_cp.rs similarity index 83% rename from src/local_array/store/iterators.rs rename to src/prefix_cht/iterators_cp.rs index fff93458..b1013e68 100644 --- a/src/local_array/store/iterators.rs +++ b/src/prefix_cht/iterators_cp.rs @@ -1,25 +1,37 @@ // ----------- Store Iterators ---------------------------------------------- // -// This file hosts the iterators for the CustomAllocStorage type and the -// implementations for the methods that start'em. -// Note that these iterators are only the iterators that go over the -// storage (and some over the TreeBitMap nodes, the parent of the store), -// as such all the iterators here are composed of iterators over the -// individual nodes. The Node Iterators live in the node.rs file. -use std::sync::atomic::Ordering; +// This file hosts the iterators for the Rib and implementations for the +// methods that start'em. There are 3 Iterators: +// +// 1. an iterator `PrefixIter` that iterates over ALL of the prefix buckets of +// the CHT backing the TreeBitMap. +// +// 2. a MoreSpecificsIterator that starts from a prefix in the prefix buckets +// for that particular prefix length, but uses the node in the TreeBitMap to +// find its more specifics. +// +// 3. a LessSpecificIterator, that just reduces the prefix size bit-by-bit and +// looks in the prefix buckets for the diminuishing prefix. +// +// The Iterators that start from the root node of the TreeBitMap (which +// is the only option for the single-threaded TreeBitMap) live in the +// deprecated_node.rs file. They theoretically should be slower and cause more +// contention, since every lookup has to go through the levels near the root +// in the TreeBitMap. use super::atomic_types::{NodeBuckets, PrefixBuckets, PrefixSet}; -use super::custom_alloc::CustomAllocStorage; -use crate::local_array::store::atomic_types::RouteStatus; -use crate::prefix_record::PublicRecord; +use super::node::{SizedStrideRef, StrideNodeId}; +use super::tree::{Stride3, Stride4, Stride5, TreeBitMap}; +use crate::local_array::types::RouteStatus; +use crate::PublicRecord; use crate::{ af::AddressFamily, local_array::{ bit_span::BitSpan, - node::{ - NodeMoreSpecificChildIter, NodeMoreSpecificsPrefixIter, PrefixId, - SizedStrideRef, Stride3, Stride4, Stride5, StrideNodeId, + in_memory::node::{ + NodeMoreSpecificChildIter, NodeMoreSpecificsPrefixIter, }, + types::PrefixId, }, prefix_record::Meta, }; @@ -75,7 +87,7 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> // END OF THE LENGTH // This length is done too, go to the next length - trace!("next length {}", self.cur_len + 1); + // trace!("next length {}", self.cur_len + 1); self.cur_len += 1; // a new length, a new life reset the level depth and cursor, @@ -105,7 +117,7 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> // END OF THE LENGTH // This length is done too, go to the next length - trace!("next length {}", self.cur_len); + // trace!("next length {}", self.cur_len); self.cur_len += 1; // a new length, a new life reset the level depth and @@ -191,7 +203,7 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> // }) // { return Some(( - s_pfx.get_prefix_id().into_pub(), + s_pfx.get_prefix_id().into(), s_pfx.record_map.as_records(), )); // } else { @@ -214,7 +226,7 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> // { self.cursor += 1; return Some(( - s_pfx.get_prefix_id().into_pub(), + s_pfx.get_prefix_id().into(), s_pfx.record_map.as_records(), )); // } @@ -291,10 +303,10 @@ pub(crate) struct MoreSpecificPrefixIter< NB: NodeBuckets, PB: PrefixBuckets, > { - store: &'a CustomAllocStorage, + store: &'a TreeBitMap, cur_ptr_iter: SizedNodeMoreSpecificIter, cur_pfx_iter: SizedPrefixIter, - start_bit_span: BitSpan, + // start_bit_span: BitSpan, // skip_self: bool, parent_and_position: Vec>, // If specified, we're only iterating over records for this mui. @@ -360,8 +372,9 @@ impl< { return Some((p.prefix, vec![rec])); } - } else if let Some(rec) = - p.record_map.get_record_for_active_mui(mui) + } else if let Some(rec) = p + .record_map + .get_record_for_mui(mui, self.include_withdrawn) { return Some((p.prefix, vec![rec])); } @@ -409,7 +422,8 @@ impl< // Our current prefix iterator for this node is done, look for // the next pfx iterator of the next child node in the current // ptr iterator. - trace!("start first ptr_iter"); + trace!("resume ptr iterator {:?}", self.cur_ptr_iter); + let mut next_ptr = self.cur_ptr_iter.next(); // Our current ptr iterator is also done, maybe we have a parent @@ -427,13 +441,11 @@ impl< if let Some(next_ptr) = next_ptr { let node = if self.mui.is_none() { + trace!("let's retriev node {}", next_ptr); self.store.retrieve_node(next_ptr) } else { - self.store.retrieve_node_for_mui( - next_ptr, - self.mui.unwrap(), - // self.guard, - ) + self.store + .retrieve_node_for_mui(next_ptr, self.mui.unwrap()) }; match node { @@ -443,21 +455,20 @@ impl< self.parent_and_position.push(self.cur_ptr_iter); let ptr_iter = next_node.more_specific_ptr_iter( next_ptr, - BitSpan::new(0, 0), + BitSpan { bits: 0, len: 0 }, ); self.cur_ptr_iter = ptr_iter.wrap(); - trace!( - "next stride new iterator stride 3 {:?} start \ - bit_span {:?}", - self.cur_ptr_iter, - self.start_bit_span - ); + // trace!( + // "next stride new iterator stride 3 {:?} start \ + // bit_span {:?}", + // self.cur_ptr_iter, + // self.start_bit_span + // ); self.cur_pfx_iter = next_node .more_specific_pfx_iter( next_ptr, BitSpan::new(0, 0), - false, ) .wrap(); } @@ -466,21 +477,19 @@ impl< self.parent_and_position.push(self.cur_ptr_iter); let ptr_iter = next_node.more_specific_ptr_iter( next_ptr, - BitSpan::new(0, 0), + BitSpan { bits: 0, len: 0 }, ); self.cur_ptr_iter = ptr_iter.wrap(); trace!( "next stride new iterator stride 4 {:?} start \ - bit_span {:?}", + bit_span 0 0", self.cur_ptr_iter, - self.start_bit_span ); self.cur_pfx_iter = next_node .more_specific_pfx_iter( next_ptr, BitSpan::new(0, 0), - false, ) .wrap(); } @@ -489,21 +498,20 @@ impl< self.parent_and_position.push(self.cur_ptr_iter); let ptr_iter = next_node.more_specific_ptr_iter( next_ptr, - BitSpan::new(0, 0), + BitSpan { bits: 0, len: 0 }, ); self.cur_ptr_iter = ptr_iter.wrap(); - trace!( - "next stride new iterator stride 5 {:?} start \ - bit_span {:?}", - self.cur_ptr_iter, - self.start_bit_span - ); + // trace!( + // "next stride new iterator stride 5 {:?} start \ + // bit_span {:?}", + // self.cur_ptr_iter, + // self.start_bit_span + // ); self.cur_pfx_iter = next_node .more_specific_pfx_iter( next_ptr, BitSpan::new(0, 0), - false, ) .wrap(); } @@ -590,7 +598,7 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> if this_level == 0 { // END OF THE LENGTH // This length is done too, go to the next length - trace!("next length {}", self.cur_len + 1); + // trace!("next length {}", self.cur_len + 1); self.cur_len -= 1; // a new length, a new life @@ -630,7 +638,7 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> } else { stored_prefix .record_map - .get_record_for_active_mui(mui) + .get_record_for_mui(mui, self.include_withdrawn) .into_iter() .collect() } @@ -700,10 +708,10 @@ impl<'a, AF: AddressFamily + 'a, M: Meta + 'a, PB: PrefixBuckets> } } -// ----------- Iterator initialization methods for CustomAllocStorage ------- +// ----------- Iterator initialization methods for Rib ----------------------- // These are only the methods that are starting the iterations. All other -// methods for CustomAllocStorage are in the main custom_alloc.rs file. +// methods for Rib are in the main rib.rs file. impl< 'a, @@ -711,7 +719,7 @@ impl< M: crate::prefix_record::Meta, NB: NodeBuckets, PB: PrefixBuckets, - > CustomAllocStorage + > TreeBitMap { // Iterator over all more-specific prefixes, starting from the given // prefix at the given level and cursor. @@ -721,18 +729,17 @@ impl< mui: Option, include_withdrawn: bool, guard: &'a Guard, - ) -> impl Iterator, Vec>)> + '_ { + ) -> impl Iterator, Vec>)> + 'a { trace!("more specifics for {:?}", start_prefix_id); - // A v4 /32 or a v4 /128 doesn't have more specific prefixes 🤓. + // A v4 /32 or a v6 /128 doesn't have more specific prefixes 🤓. if start_prefix_id.get_len() >= AF::BITS { None } else { // calculate the node start_prefix_id lives in. - let (start_node_id, start_bit_span) = + let (start_node_id, start_bs) = self.get_node_id_for_prefix(&start_prefix_id); trace!("start node {}", start_node_id); - trace!( "start prefix id {:032b} (len {})", start_prefix_id.get_net(), @@ -745,10 +752,18 @@ impl< start_node_id.get_len() ); trace!( - "start bit span {:032b} {}", - start_bit_span, - start_bit_span.bits + "start pfx bit span {:08b} {} len {}", + start_bs.bits, + start_bs.bits, + start_bs.len + ); + trace!( + "start ptr bit span {:08b} {} len {}", + start_bs.bits, + start_bs.bits, + start_bs.len ); + let cur_pfx_iter: SizedPrefixIter; let cur_ptr_iter: SizedNodeMoreSpecificIter; @@ -762,62 +777,39 @@ impl< match node { SizedStrideRef::Stride3(n) => { cur_pfx_iter = SizedPrefixIter::Stride3( - n.more_specific_pfx_iter( - start_node_id, - start_bit_span, - true, - ), + n.more_specific_pfx_iter(start_node_id, start_bs), ); cur_ptr_iter = SizedNodeMoreSpecificIter::Stride3( - n.more_specific_ptr_iter( - start_node_id, - start_bit_span, - ), + n.more_specific_ptr_iter(start_node_id, start_bs), ); } SizedStrideRef::Stride4(n) => { cur_pfx_iter = SizedPrefixIter::Stride4( - n.more_specific_pfx_iter( - start_node_id, - start_bit_span, - true, - ), + n.more_specific_pfx_iter(start_node_id, start_bs), ); + trace!("---------------------"); + trace!("start iterating nodes"); cur_ptr_iter = SizedNodeMoreSpecificIter::Stride4( - n.more_specific_ptr_iter( - start_node_id, - start_bit_span, - ), + n.more_specific_ptr_iter(start_node_id, start_bs), ); } SizedStrideRef::Stride5(n) => { cur_pfx_iter = SizedPrefixIter::Stride5( - n.more_specific_pfx_iter( - start_node_id, - start_bit_span, - true, - ), + n.more_specific_pfx_iter(start_node_id, start_bs), ); cur_ptr_iter = SizedNodeMoreSpecificIter::Stride5( - n.more_specific_ptr_iter( - start_node_id, - start_bit_span, - ), + n.more_specific_ptr_iter(start_node_id, start_bs), ); } }; - let global_withdrawn_bmin = unsafe { - self.withdrawn_muis_bmin - .load(Ordering::Acquire, guard) - .deref() - }; + let global_withdrawn_bmin = self.withdrawn_muis_bmin(guard); Some(MoreSpecificPrefixIter { store: self, cur_pfx_iter, cur_ptr_iter, - start_bit_span, + // start_bit_span, parent_and_position: vec![], global_withdrawn_bmin, include_withdrawn, @@ -841,7 +833,7 @@ impl< mui: Option, include_withdrawn: bool, guard: &'a Guard, - ) -> impl Iterator, Vec>)> + '_ { + ) -> impl Iterator, Vec>)> + 'a { trace!("less specifics for {:?}", start_prefix_id); trace!("level {}, len {}", 0, start_prefix_id.get_len()); @@ -853,15 +845,11 @@ impl< None } else { let cur_len = start_prefix_id.get_len() - 1; - let cur_bucket = self.prefixes.get_root_prefix_set(cur_len); - let global_withdrawn_bmin = unsafe { - self.withdrawn_muis_bmin - .load(Ordering::Acquire, guard) - .deref() - }; + let cur_bucket = self.prefix_buckets.get_root_prefix_set(cur_len); + let global_withdrawn_bmin = self.withdrawn_muis_bmin(guard); Some(LessSpecificPrefixIter { - prefixes: &self.prefixes, + prefixes: &self.prefix_buckets, cur_len, cur_bucket, cur_level: 0, @@ -875,13 +863,13 @@ impl< .flatten() } - // Iterator over all the prefixes in the storage. + // Iterator over all the prefixes in the in_memory store. pub fn prefixes_iter( &'a self, ) -> impl Iterator>)> + 'a { PrefixIter { - prefixes: &self.prefixes, - cur_bucket: self.prefixes.get_root_prefix_set(0), + prefixes: &self.prefix_buckets, + cur_bucket: self.prefix_buckets.get_root_prefix_set(0), cur_len: 0, cur_level: 0, cursor: 0, @@ -889,39 +877,3 @@ impl< } } } - -// ----------- InternalPrefixRecord -> RecordSet (public) ------------------- - -// impl<'a, AF: AddressFamily, Meta: crate::prefix_record::Meta> -// std::iter::FromIterator> -// for routecore::bgp::RecordSet<'a, Meta> -// { -// fn from_iter>>( -// iter: I, -// ) -> Self { -// let mut v4 = vec![]; -// let mut v6 = vec![]; -// for pfx in iter { -// let addr = pfx.net.into_ipaddr(); -// match addr { -// std::net::IpAddr::V4(_) => { -// v4.push( -// routecore::bgp::PrefixRecord::new_with_local_meta( -// Prefix::new(addr, pfx.len).unwrap(), -// pfx.meta, -// ), -// ); -// } -// std::net::IpAddr::V6(_) => { -// v6.push( -// routecore::bgp::PrefixRecord::new_with_local_meta( -// Prefix::new(addr, pfx.len).unwrap(), -// pfx.meta, -// ), -// ); -// } -// } -// } -// Self { v4, v6 } -// } -// } diff --git a/src/prefix_cht/mod.rs b/src/prefix_cht/mod.rs new file mode 100644 index 00000000..dbb286d0 --- /dev/null +++ b/src/prefix_cht/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod cht; +pub(crate) mod iterators; diff --git a/src/prefix_record.rs b/src/prefix_record.rs deleted file mode 100644 index 890cb41d..00000000 --- a/src/prefix_record.rs +++ /dev/null @@ -1,757 +0,0 @@ -use std::fmt; -use std::fmt::Debug; -use std::{cmp::Ordering, sync::Arc}; - -use crate::local_array::store::atomic_types::{MultiMapValue, RouteStatus}; -use crate::{af::AddressFamily, local_array::node::PrefixId}; -use inetnum::addr::Prefix; - -//------------ InternalPrefixRecord ----------------------------------------- - -// This struct is used for the SingleThreadedStore only. -#[derive(Clone, Copy)] -pub struct InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ - pub net: AF, - pub len: u8, - pub meta: M, -} - -impl InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ - pub fn new_with_meta( - net: AF, - len: u8, - meta: M, - ) -> InternalPrefixRecord { - Self { net, len, meta } - } - - // This should never fail, since there shouldn't be a invalid prefix in - // this record in the first place. - pub fn prefix_into_pub(&self) -> Prefix { - Prefix::new(self.net.into_ipaddr(), self.len) - .unwrap_or_else(|p| panic!("can't convert {:?} into prefix.", p)) - } - - pub fn get_prefix_id(&self) -> PrefixId { - PrefixId::new(self.net, self.len) - } - - pub fn get_meta(&self) -> &M { - &self.meta - } -} - -impl std::fmt::Display for InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}/{} {}", - AddressFamily::fmt_net(self.net), - self.len, - self.meta - ) - } -} - -impl Ord for InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ - fn cmp(&self, other: &Self) -> Ordering { - (self.net >> (AF::BITS - self.len)) - .cmp(&(other.net >> ((AF::BITS - other.len) % 32))) - } -} - -impl PartialEq for InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ - fn eq(&self, other: &Self) -> bool { - self.net >> (AF::BITS - self.len) - == other.net >> ((AF::BITS - other.len) % 32) - } -} - -impl PartialOrd for InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ - fn partial_cmp(&self, other: &Self) -> Option { - Some( - (self.net >> (AF::BITS - self.len)) - .cmp(&(other.net >> ((AF::BITS - other.len) % 32))), - ) - } -} - -impl Eq for InternalPrefixRecord -where - M: Meta, - AF: AddressFamily, -{ -} - -impl Debug for InternalPrefixRecord -where - AF: AddressFamily, - T: Meta, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.write_fmt(format_args!( - "{}/{} with {:?}", - AddressFamily::fmt_net(self.net), - self.len, - self.meta - )) - } -} - -// impl std::hash::Hash for InternalPrefixRecord -// where -// AF: AddressFamily + PrimInt + Debug, -// T: Meta, -// { -// fn hash(&self, state: &mut H) { -// self.net.hash(state); -// self.len.hash(state); -// } -// } - -impl From> for PrefixId -where - AF: AddressFamily, - M: Meta, -{ - fn from(record: InternalPrefixRecord) -> Self { - Self::new(record.net, record.len) - } -} - -impl From<&InternalPrefixRecord> for PrefixId -where - AF: AddressFamily, - T: Meta, -{ - fn from(record: &InternalPrefixRecord) -> Self { - Self::new(record.net, record.len) - } -} - -impl From> - for InternalPrefixRecord -{ - fn from(record: PublicPrefixSingleRecord) -> Self { - Self { - net: crate::IPv4::from_ipaddr(record.prefix.addr()), - len: record.prefix.len(), - meta: record.meta, - } - } -} - -impl From> - for InternalPrefixRecord -{ - fn from(record: PublicPrefixSingleRecord) -> Self { - Self { - net: crate::IPv6::from_ipaddr(record.prefix.addr()), - len: record.prefix.len(), - meta: record.meta, - } - } -} - -//------------ PublicPrefixSingleRecord -------------------------------------- - -#[derive(Clone, Debug)] -pub struct PublicPrefixSingleRecord { - pub prefix: Prefix, - pub meta: M, -} - -impl PublicPrefixSingleRecord { - pub fn new(prefix: Prefix, meta: M) -> Self { - Self { prefix, meta } - } - - pub fn new_from_record( - record: InternalPrefixRecord, - ) -> Self { - Self { - prefix: record.prefix_into_pub(), - meta: record.meta, - } - } -} - -impl From<(PrefixId, Arc)> for PublicPrefixSingleRecord -where - AF: AddressFamily, - M: Meta, -{ - fn from(record: (PrefixId, Arc)) -> Self { - Self { - prefix: record.0.into_pub(), - meta: (*record.1).clone(), - } - } -} - -impl std::fmt::Display for PublicPrefixSingleRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{} :{:?}", self.prefix, self.meta) - } -} - -impl From<(Prefix, M)> for PublicPrefixSingleRecord { - fn from((prefix, meta): (Prefix, M)) -> Self { - Self { prefix, meta } - } -} - - -//------------ PublicRecord ------------------------------------------- - -#[derive(Clone, Debug)] -pub struct PublicRecord { - pub multi_uniq_id: u32, - pub ltime: u64, - pub status: RouteStatus, - pub meta: M, -} - -impl PublicRecord { - pub fn new(multi_uniq_id: u32, ltime: u64, status: RouteStatus, meta: M) -> Self { - Self { meta, multi_uniq_id, ltime, status } - } -} - -impl std::fmt::Display for PublicRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{{ mui: {}, ltime: {}, status: {}, meta: {} }}", - self.multi_uniq_id, - self.ltime, - self.status, - self.meta - ) - } -} - -impl From<(u32, MultiMapValue)> for PublicRecord { - fn from(value: (u32, MultiMapValue)) -> Self { - Self { - multi_uniq_id: value.0, - meta: value.1.meta, - ltime: value.1.ltime, - status: value.1.status, - } - } -} - - -//------------ PublicPrefixRecord ------------------------------------------- - -#[derive(Clone, Debug)] -pub struct PublicPrefixRecord { - pub prefix: Prefix, - pub meta: Vec>, -} - -impl PublicPrefixRecord { - pub fn new(prefix: Prefix, meta: Vec>) -> Self { - Self { prefix, meta } - } - - pub fn get_record_for_mui(&self, mui: u32) -> Option<&PublicRecord> { - self.meta.iter().find(|r| r.multi_uniq_id == mui) - } -} - -impl From<(PrefixId, Vec>)> for PublicPrefixRecord -where - AF: AddressFamily, - M: Meta, -{ - fn from(record: (PrefixId, Vec>)) -> Self { - Self { - prefix: record.0.into_pub(), - meta: record.1, - } - } -} - -impl std::fmt::Display for PublicPrefixRecord { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{}: [", self.prefix)?; - for rec in &self.meta { - write!(f, "{},", rec)?; - } - write!(f, "]") - } -} - -impl From<(Prefix, Vec>)> for PublicPrefixRecord { - fn from((prefix, meta): (Prefix, Vec>)) -> Self { - Self { prefix, meta } - } -} - - -//------------ RecordSingleSet ----------------------------------------------- - -#[derive(Clone, Debug)] -pub struct RecordSingleSet { - pub v4: Vec>, - pub v6: Vec>, -} - -impl RecordSingleSet { - pub fn new() -> Self { - Self { - v4: Default::default(), - v6: Default::default(), - } - } - - pub fn push(&mut self, prefix: Prefix, meta: M) { - match prefix.addr() { - std::net::IpAddr::V4(_) => &mut self.v4, - std::net::IpAddr::V6(_) => &mut self.v6, - }.push(PublicPrefixSingleRecord::new(prefix, meta)); - } - - pub fn is_empty(&self) -> bool { - self.v4.is_empty() && self.v6.is_empty() - } - - pub fn iter(&self) -> RecordSetSingleIter { - RecordSetSingleIter { - v4: if self.v4.is_empty() { - None - } else { - Some(self.v4.iter()) - }, - v6: self.v6.iter(), - } - } - - #[must_use] - pub fn reverse(mut self) -> RecordSingleSet { - self.v4.reverse(); - self.v6.reverse(); - self - } - - pub fn len(&self) -> usize { - self.v4.len() + self.v6.len() - } -} - -impl Default for RecordSingleSet { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Display for RecordSingleSet { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let arr_str_v4 = - self.v4.iter().fold("".to_string(), |pfx_arr, pfx| { - format!("{} {}", pfx_arr, *pfx) - }); - let arr_str_v6 = - self.v6.iter().fold("".to_string(), |pfx_arr, pfx| { - format!("{} {}", pfx_arr, *pfx) - }); - - write!(f, "V4: [{}], V6: [{}]", arr_str_v4, arr_str_v6) - } -} - -impl - From<(Vec>, Vec>)> - for RecordSingleSet -{ - fn from( - (v4, v6): (Vec>, Vec>), - ) -> Self { - Self { v4, v6 } - } -} - -impl<'a, M: Meta + 'a> std::iter::FromIterator>> - for RecordSingleSet -{ - fn from_iter>>>( - iter: I, - ) -> Self { - let mut v4 = vec![]; - let mut v6 = vec![]; - for pfx in iter { - let u_pfx = pfx.prefix; - match u_pfx.addr() { - std::net::IpAddr::V4(_) => { - v4.push(PublicPrefixSingleRecord::new(u_pfx, pfx.meta.clone())); - } - std::net::IpAddr::V6(_) => { - v6.push(PublicPrefixSingleRecord::new(u_pfx, pfx.meta.clone())); - } - } - } - Self { v4, v6 } - } -} - -impl<'a, AF: AddressFamily, M: Meta + 'a> - std::iter::FromIterator<(PrefixId, Arc)> - for RecordSingleSet -{ - fn from_iter, Arc)>>( - iter: I, - ) -> Self { - let mut v4 = vec![]; - let mut v6 = vec![]; - for pfx in iter { - let u_pfx = pfx.0.into_pub(); - match u_pfx.addr() { - std::net::IpAddr::V4(_) => { - v4.push(PublicPrefixSingleRecord::new(u_pfx, (*pfx.1).clone())); - } - std::net::IpAddr::V6(_) => { - v6.push(PublicPrefixSingleRecord::new(u_pfx, (*pfx.1).clone())); - } - } - } - Self { v4, v6 } - } -} - -impl<'a, AF: AddressFamily, M: Meta + 'a> - std::iter::FromIterator<&'a InternalPrefixRecord> - for RecordSingleSet -{ - fn from_iter>>( - iter: I, - ) -> Self { - let mut v4 = vec![]; - let mut v6 = vec![]; - for pfx in iter { - let u_pfx = (*pfx).prefix_into_pub(); - match u_pfx.addr() { - std::net::IpAddr::V4(_) => { - v4.push(PublicPrefixSingleRecord::new(u_pfx, pfx.meta.clone())); - } - std::net::IpAddr::V6(_) => { - v6.push(PublicPrefixSingleRecord::new(u_pfx, pfx.meta.clone())); - } - } - } - Self { v4, v6 } - } -} - -impl std::ops::Index for RecordSingleSet { - type Output = PublicPrefixSingleRecord; - - fn index(&self, index: usize) -> &Self::Output { - if index < self.v4.len() { - &self.v4[index] - } else { - &self.v6[index - self.v4.len()] - } - } -} - -//------------ RecordSet ---------------------------------------------------- - -#[derive(Clone, Debug)] -pub struct RecordSet { - pub v4: Vec>, - pub v6: Vec>, -} - -impl RecordSet { - pub fn new() -> Self { - Self { - v4: Default::default(), - v6: Default::default(), - } - } - - pub fn push(&mut self, prefix: Prefix, meta: Vec>) { - match prefix.addr() { - std::net::IpAddr::V4(_) => &mut self.v4, - std::net::IpAddr::V6(_) => &mut self.v6, - }.push(PublicPrefixRecord::new(prefix, meta)); - } - - pub fn is_empty(&self) -> bool { - self.v4.is_empty() && self.v6.is_empty() - } - - pub fn iter(&self) -> RecordSetIter { - RecordSetIter { - v4: if self.v4.is_empty() { - None - } else { - Some(self.v4.iter()) - }, - v6: self.v6.iter(), - } - } - - #[must_use] - pub fn reverse(mut self) -> RecordSet { - self.v4.reverse(); - self.v6.reverse(); - self - } - - pub fn len(&self) -> usize { - self.v4.len() + self.v6.len() - } -} - -impl Default for RecordSet { - fn default() -> Self { - Self::new() - } -} - -impl fmt::Display for RecordSet { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let arr_str_v4 = - self.v4.iter().fold("".to_string(), |pfx_arr, pfx| { - format!("{} {}", pfx_arr, *pfx) - }); - let arr_str_v6 = - self.v6.iter().fold("".to_string(), |pfx_arr, pfx| { - format!("{} {}", pfx_arr, *pfx) - }); - - write!(f, "V4: [{}], V6: [{}]", arr_str_v4, arr_str_v6) - } -} - -impl - From<(Vec>, Vec>)> - for RecordSet -{ - fn from( - (v4, v6): (Vec>, Vec>), - ) -> Self { - Self { v4, v6 } - } -} - -impl<'a, M: Meta + 'a> - std::iter::FromIterator> - for RecordSet -{ - fn from_iter>>( - iter: I, - ) -> Self { - let mut v4 = vec![]; - let mut v6 = vec![]; - for pfx in iter { - let u_pfx = pfx.prefix; - match u_pfx.addr() { - std::net::IpAddr::V4(_) => { - v4.push(PublicPrefixRecord::new(u_pfx, pfx.meta)); - } - std::net::IpAddr::V6(_) => { - v6.push(PublicPrefixRecord::new(u_pfx, pfx.meta)); - } - } - } - Self { v4, v6 } - } -} - -impl<'a, AF: AddressFamily, M: Meta + 'a> - std::iter::FromIterator<(PrefixId, Vec>)> - for RecordSet -{ - fn from_iter, Vec>)>>( - iter: I, - ) -> Self { - let mut v4 = vec![]; - let mut v6 = vec![]; - for pfx in iter { - let u_pfx = pfx.0.into_pub(); - match u_pfx.addr() { - std::net::IpAddr::V4(_) => { - v4.push(PublicPrefixRecord::new(u_pfx, pfx.1)); - } - std::net::IpAddr::V6(_) => { - v6.push(PublicPrefixRecord::new(u_pfx, pfx.1)); - } - } - } - Self { v4, v6 } - } -} - -impl<'a, M: Meta + 'a> - std::iter::FromIterator<&'a PublicPrefixRecord> - for RecordSet -{ - fn from_iter>>( - iter: I, - ) -> Self { - let mut v4 = vec![]; - let mut v6 = vec![]; - for pfx in iter { - let u_pfx = pfx.prefix; - match u_pfx.addr() { - std::net::IpAddr::V4(_) => { - v4.push(PublicPrefixRecord::new(u_pfx, pfx.meta.clone())); - } - std::net::IpAddr::V6(_) => { - v6.push(PublicPrefixRecord::new(u_pfx, pfx.meta.clone())); - } - } - } - Self { v4, v6 } - } -} - -impl std::ops::Index for RecordSet { - type Output = PublicPrefixRecord; - - fn index(&self, index: usize) -> &Self::Output { - if index < self.v4.len() { - &self.v4[index] - } else { - &self.v6[index - self.v4.len()] - } - } -} - - -//------------ RecordSetSingleIter ------------------------------------------- - -#[derive(Clone, Debug)] -pub struct RecordSetSingleIter<'a, M: Meta> { - v4: Option>>, - v6: std::slice::Iter<'a, PublicPrefixSingleRecord>, -} - -impl<'a, M: Meta> Iterator for RecordSetSingleIter<'a, M> { - type Item = PublicPrefixSingleRecord; - - fn next(&mut self) -> Option { - if self.v4.is_none() { - return self.v6.next().map(|res| res.to_owned()); - } - - if let Some(res) = self.v4.as_mut().and_then(|v4| v4.next()) { - return Some(res.to_owned()); - } - self.v4 = None; - self.next() - } -} - -//------------ RecordSetIter ------------------------------------------- - -#[derive(Clone, Debug)] -pub struct RecordSetIter<'a, M: Meta> { - v4: Option>>, - v6: std::slice::Iter<'a, PublicPrefixRecord>, -} - -impl<'a, M: Meta> Iterator for RecordSetIter<'a, M> { - type Item = PublicPrefixRecord; - - fn next(&mut self) -> Option { - if self.v4.is_none() { - return self.v6.next().map(|res| res.to_owned()); - } - - if let Some(res) = self.v4.as_mut().and_then(|v4| v4.next()) { - return Some(res.to_owned()); - } - self.v4 = None; - self.next() - } -} - - -//----------------------- meta-data traits/types----------------------------- - -/// Trait that describes how an existing record gets merged -/// -/// MergeUpdate must be implemented by a type that implements Meta if it -/// wants to be able to be stored. It should describe how the metadata for an -/// existing record should be merged with newly arriving records for the same -/// key. -// pub trait MergeUpdate: Send + Sync { -// /// User-defined data to be passed in to the merge implementation. -// type UserDataIn: Debug + Sync + Send; - -// /// User-defined data returned by the users implementation of the merge -// /// operations. Set to () if not needed. -// /// TODO: Define () as the default when the 'associated_type_defaults' -// /// Rust feature is stabilized. See: -// /// https://github.com/rust-lang/rust/issues/29661 -// type UserDataOut; - -// fn merge_update( -// &mut self, -// update_meta: Self, -// user_data: Option<&Self::UserDataIn>, -// ) -> Result>; - -// // This is part of the Read-Copy-Update pattern for updating a record -// // concurrently. The Read part should be done by the caller and then -// // the result should be passed in into this function together with -// // the new meta-data that updates it. This function will then create -// // a copy (in the pattern lingo, but in Rust that would be a Clone, -// // since we're not requiring Copy for Meta) and update that with a -// // copy of the new meta-data. It then returns the result of that merge. -// // The caller should then proceed to insert that as a new entry -// // in the global store. -// fn clone_merge_update( -// &self, -// update_meta: &Self, -// user_data: Option<&Self::UserDataIn>, -// ) -> Result<(Self, Self::UserDataOut), Box> -// where -// Self: std::marker::Sized; -// } - -/// Trait for types that can be used as metadata of a record -pub trait Meta -where - Self: fmt::Debug + fmt::Display + Clone + Sized + Send + Sync { - type Orderable<'a>: Ord where Self: 'a; - type TBI: Copy; - - fn as_orderable(&self, tbi: Self::TBI) -> Self::Orderable<'_>; - } - -impl Meta for inetnum::asn::Asn { - type Orderable<'a> = inetnum::asn::Asn; - type TBI = (); - - fn as_orderable(&self, _tbi: Self::TBI) -> inetnum::asn::Asn { - *self - } -} \ No newline at end of file diff --git a/src/prelude/mod.rs b/src/prelude/mod.rs deleted file mode 100644 index f51300a1..00000000 --- a/src/prelude/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -pub use crate::{AddressFamily, IPv4, IPv6}; - -pub use crate::prefix_record::{Meta, PublicPrefixRecord as PrefixRecord}; -pub use crate::stride::{Stride3, Stride4, Stride5}; -pub use crate::{MatchOptions, MatchType, QueryResult}; -pub use inetnum::addr::Prefix; - -pub mod multi { - pub use crate::MultiThreadedStore; - pub use std::sync::atomic::Ordering; - - pub use rotonda_macros::create_store; - pub use rotonda_macros::stride_sizes; - - pub use crossbeam_epoch::{self as epoch, Guard}; - - pub use crate::local_array::store::atomic_types::RouteStatus; - pub use crate::local_array::store::atomic_types::{ - NodeBuckets, NodeSet, PrefixBuckets, PrefixSet, - }; - pub use crate::local_array::store::errors::PrefixStoreError; - pub use crate::local_array::tree::{PrefixId, StrideNodeId, TreeBitMap}; - pub use crate::prefix_record::PublicRecord as Record; - - pub use crate::custom_alloc::CustomAllocStorage; - pub use crate::custom_alloc::{ - Counters, StoreStats, Upsert, UpsertReport, - }; - - pub use routecore::bgp::path_selection::TiebreakerInfo; -} diff --git a/src/rib/config.rs b/src/rib/config.rs new file mode 100644 index 00000000..c0f87833 --- /dev/null +++ b/src/rib/config.rs @@ -0,0 +1,175 @@ +//------------ Config -------------------------------------------------------- + +//! Configuration options for a RIB for AFI/SAFIs [IPv4, IPv6] with [Unicast, +//! Multicast]. +//! +//! A Configuration is created by picking one of the `*Config` structs in +//! this module, instantiate it, set some fields on it, and pass it in as an +//! argument to [new_with_config](super::StarCastRib::new_with_config). +//! +//! ``` +//! use rotonda_store::test_types::PrefixAs; +//! use rotonda_store::rib::StarCastRib; +//! use rotonda_store::rib::config::PersistOnlyConfig; +//! +//! let config = PersistOnlyConfig::default(); +//! let tree_bitmap = StarCastRib::::new_with_config(config); +//! ``` + +/// Defines where records are stored: in-memory and/or persisted (to disk), +/// and, whether new records for a unique (prefix, mui) pair are overwritten +/// or persisted ("historical records"). +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum PersistStrategy { + /// Current records are stored both in-memory and persisted. Historical + /// records are persisted. + WriteAhead, + /// Current records are stored in-memory, historical records are + /// persisted. + PersistHistory, + /// Current records are stored in-memory, historical records are discarded + /// when newer records appear. + MemoryOnly, + /// Current records are persisted immediately. No records are stored in + /// memory. Historical records are discarded when newer records appear. + PersistOnly, +} + +pub trait Config: Clone + Default + std::fmt::Debug { + /// Returns the chosen persist strategy for this configuration + fn persist_strategy(&self) -> PersistStrategy; + /// Returns the path to the directory that is used to store persisted + /// records + fn persist_path(&self) -> Option; + /// Set the path to the directory that will be used to persist records to + fn set_persist_path(&mut self, path: String); +} + +//------------ MemoryOnlyConfig ---------------------------------------------- + +/// A configuration that sets persist strategy to +/// `PersistStrategy::MemoryOnly`: Records for unique `(prefix, mui)` pairs +/// are kept in memory, newer records will overwrite existing records. In +/// other words, no historical records are preserved. +#[derive(Copy, Clone, Debug)] +pub struct MemoryOnlyConfig; + +impl Config for MemoryOnlyConfig { + fn persist_strategy(&self) -> PersistStrategy { + PersistStrategy::MemoryOnly + } + + fn persist_path(&self) -> Option { + None + } + + fn set_persist_path(&mut self, _: String) { + unimplemented!() + } +} + +impl Default for MemoryOnlyConfig { + fn default() -> Self { + Self + } +} + +//------------ PersistOnlyConfig --------------------------------------------- + +/// A configuration that sets the persist strategy to +/// `PersistStrategy::PersistOnly`: Records for unique `(prefix, mui)` pairs +/// are persisted to disk, newer records will overwrite existing records. In +/// other words, no historical records are preserved. +#[derive(Clone, Debug)] +pub struct PersistOnlyConfig { + persist_path: String, +} + +impl Config for PersistOnlyConfig { + fn persist_strategy(&self) -> PersistStrategy { + PersistStrategy::PersistOnly + } + + fn persist_path(&self) -> Option { + Some(self.persist_path.clone()) + } + + fn set_persist_path(&mut self, path: String) { + self.persist_path = path; + } +} + +impl Default for PersistOnlyConfig { + fn default() -> Self { + Self { + persist_path: "/tmp/rotonda/".to_string(), + } + } +} + +//------------ WriteAheadConfig ---------------------------------------------- + +/// A configuration that sets the persist strategy to +///`PersistStrategy::WriteAhead`: Records for unique `(prefix, mui)` pairs +///are both kept in memory and persisted to disk, newer records will overwrite +///existing records in memory, but all records will be kept persisted on disk. +///In other words, historical records are kept on disk. +#[derive(Clone, Debug)] +pub struct WriteAheadConfig { + persist_path: String, +} + +impl Config for WriteAheadConfig { + fn persist_strategy(&self) -> PersistStrategy { + PersistStrategy::WriteAhead + } + + fn persist_path(&self) -> Option { + Some(self.persist_path.clone()) + } + + fn set_persist_path(&mut self, path: String) { + self.persist_path = path; + } +} + +impl Default for WriteAheadConfig { + fn default() -> Self { + Self { + persist_path: "/tmp/rotonda/".to_string(), + } + } +} + +//------------ PersistHistoryConfig ------------------------------------------ + +/// A configuration that sets the persist strategy to +///`PersistStrategy::PersistHistory`: Records for unique `(prefix, mui)` pairs +///are kept in memory,newer records will replace existing records, but the +///existing records will be persisted to disk. +#[derive(Clone, Debug)] +pub struct PersistHistoryConfig { + persist_path: String, +} + +impl Config for PersistHistoryConfig { + fn persist_strategy(&self) -> PersistStrategy { + PersistStrategy::PersistHistory + } + + fn persist_path(&self) -> Option { + Some(self.persist_path.clone()) + } + + fn set_persist_path(&mut self, path: String) { + self.persist_path = path; + } +} + +impl Default for PersistHistoryConfig { + fn default() -> Self { + Self { + persist_path: "/tmp/rotonda/".to_string(), + } + } +} diff --git a/src/rib/mod.rs b/src/rib/mod.rs new file mode 100644 index 00000000..319943f0 --- /dev/null +++ b/src/rib/mod.rs @@ -0,0 +1,9 @@ +pub mod config; +pub(crate) mod starcast; +pub(crate) mod starcast_af; +pub(crate) mod starcast_af_query; + +pub(crate) use starcast::BIT_SPAN_SIZE; +pub(crate) use starcast::STRIDE_SIZE; + +pub use starcast::StarCastRib; diff --git a/src/rib/starcast.rs b/src/rib/starcast.rs new file mode 100644 index 00000000..c843b7ff --- /dev/null +++ b/src/rib/starcast.rs @@ -0,0 +1,977 @@ +use crossbeam_epoch::Guard; +use inetnum::addr::Prefix; +use rand::prelude::*; + +use crate::{ + epoch, + errors::{FatalError, FatalResult}, + match_options::{MatchOptions, QueryResult}, + prefix_record::{Meta, PrefixRecord, Record}, + rib::config::Config, + types::{errors::PrefixStoreError, PrefixId}, + AddressFamily, IPv4, IPv6, +}; + +use super::starcast_af::StarCastAfRib; +use crate::rib::config::PersistStrategy; +use crate::stats::{StoreStats, UpsertCounters, UpsertReport}; + +pub const STRIDE_SIZE: u8 = 4; +pub const BIT_SPAN_SIZE: u8 = 32; + +/// A RIB that stores routes (and/or other data) for [`IPv4`, +/// `IPv6`]/[`Unicast`, `Multicast`], i.e. AFI/SAFI types `{1,2}/{1,2}`. +/// +/// Routes can be kept in memory, persisted to disk, or both. Also, historical +/// records can be persisted. +/// +/// A RIB stores "route-like" data. A `route` according to RFC4271 would be +/// specified as an IP prefix and a set of path attributes. Our StarCastRib, +/// on the other hand, does not really care whether it stores path attributes, +/// or any other type of record, for a given IP prefix. +/// +/// In order to be able to store multiple records for a `(prefix, record)` +/// pair, however, the store needs to given an extra piece of information in +/// the key. We are calling this piece of data a `multi uniq id` (called "mui" +/// throughout this repo), and uses an `u32` as its underlying data type. +/// This `mui` is completely user-defined, and has no additional semantics +/// for the store beyond establishing the uniqueness of the key. The `mui` +/// was specifically designed for use cases where Rotonda wants to store RIBs +/// that it receives from multiple peers in one StarCastRib, so that every +/// peer that Rotonda knows of gets its own, unique `mui`, and our StarCastRib +/// would store them all without overwriting already stored `(prefix, +/// record)` pairs. In other words, multiple values can be stored per unique +/// `(prefix, record)` pair. +/// +/// Next to creating `(prefix, record)` entries for `mui`, the [RouteStatus]( crate::prefix_record::RouteStatus) of a `mui` can be globally set to +/// `Withdrawn`or `Active`. A global status of `Withdrawn` overrides the +/// local status of a prefix for that `mui`. In that case, the local status +/// can still be changed and will take effect when the global status is set +/// (back) to `Active`. +/// +/// The RIB can be conceptually thought of as a MultiMap - a HashMap that can +/// store multiple values for a given key - that is keyed on `prefix`, and +/// will store multiple values for a prefix, based on the specified `mui`. +/// Furthermore, a [persist strategy](crate::rib::config::PersistStrategy), +/// chosen by the user, for a `StarCastRib` determines what happens with key +/// collisions in this multi map. +pub struct StarCastRib { + v4: StarCastAfRib, + v6: StarCastAfRib, + config: C, +} + +impl<'a, M: Meta, C: Config> StarCastRib { + /// Create a new RIB with a default configuration. + /// + /// The default configuration uses the `MemoryOnly` persistence strategy. + /// + /// This method is really infallible, but we return a result anyway to be + /// in line with the `new_with_config` method. + pub fn try_default() -> Result { + let config = C::default(); + Self::new_with_config(config) + .map_err(|_| PrefixStoreError::StoreNotReadyError) + } + + /// Create a new RIB with the specified [configuration]( + /// crate::rib::config). + /// + /// Creation may fail for all strategies that persist to disk, e.g. + /// the persistence path does not exist, it doesn't have the correct + /// permissions, etc. + pub fn new_with_config( + config: C, + ) -> Result> { + let rng = rand::rng(); + let uuid: String = rng + .sample_iter(rand::distr::Alphanumeric) + .take(12) + .map(char::from) + .collect(); + let mut config_v4 = config.clone(); + let mut config_v6 = config.clone(); + + if let Some(path) = config_v4.persist_path() { + let pp = format!("{}/{}/ipv4/", path, uuid); + config_v4.set_persist_path(pp); + }; + + if let Some(path) = config_v6.persist_path() { + config_v6.set_persist_path(format!("{}/{}/ipv6/", path, uuid)); + } + + Ok(Self { + v4: StarCastAfRib::new(config_v4)?, + v6: StarCastAfRib::new(config_v6)?, + config, + }) + } + + /// Query the RIB for a matching prefix with options. + /// + /// A reference to a [Guard](crate::Guard) must be passed in to + /// assure that the resulting prefixes are time consistent. The guard can + /// be re-used for multiple matches to assure time consistency between + /// the matches. + /// + /// Returns a [QueryResult] that may contain one or more prefixes, with or + /// without their associated records. + pub fn match_prefix( + &'a self, + search_pfx: &Prefix, + options: &MatchOptions, + guard: &'a Guard, + ) -> FatalResult> { + match search_pfx.addr() { + std::net::IpAddr::V4(addr) => self.v4.match_prefix( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + options, + guard, + ), + std::net::IpAddr::V6(addr) => self.v6.match_prefix( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + options, + guard, + ), + } + } + + /// Search the RIB for a prefix. + /// + /// Returns a bool indicating whether the prefix was found. Regardless of the chosen persist strategy + pub fn contains(&'a self, prefix: &Prefix, mui: Option) -> bool { + match prefix.addr() { + std::net::IpAddr::V4(_addr) => { + self.v4.contains(PrefixId::::from(*prefix), mui) + } + std::net::IpAddr::V6(_addr) => { + self.v6.contains(PrefixId::::from(*prefix), mui) + } + } + } + + /// Return a previously calculated best path for a prefix, if any. + /// + /// Returns `None` if the prefix was not found + ///in the RIB. Returns a [BestPathNotFound]( + /// crate::errors::PrefixStoreError::BestPathNotFound) error if the + /// best path was never calculated, or returns a [StoreNotReadyError]( + /// crate::errors::PrefixStoreError::StoreNotReadyError) if there is no + /// record for the prefix (but the prefix does exist). + pub fn best_path( + &'a self, + search_pfx: &Prefix, + guard: &Guard, + ) -> Option, PrefixStoreError>> { + match search_pfx.addr() { + std::net::IpAddr::V4(addr) => self.v4.best_path( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + guard, + ), + std::net::IpAddr::V6(addr) => self.v6.best_path( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + guard, + ), + } + } + + /// Calculate the best path for a prefix. + /// + /// This method takes all the records for a prefix, i.e. all the records + /// for different values of `mui` for this prefix, and calculates the best + /// path for them. + /// + /// Returns the values of `mui` for the best path, and the backup path, + /// respectively. + /// Returns `None` if the prefix does not exist. Returns a [StoreNotReady]() + /// crate::errors::PrefixStoreError::StoreNotReadyError) if there is no + /// record for the prefix (but the prefix does exist). + pub fn calculate_and_store_best_and_backup_path( + &self, + search_pfx: &Prefix, + tbi: &::TBI, + guard: &Guard, + ) -> Result<(Option, Option), PrefixStoreError> { + match search_pfx.addr() { + std::net::IpAddr::V4(addr) => { + self.v4.calculate_and_store_best_and_backup_path( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + tbi, + guard, + ) + } + std::net::IpAddr::V6(addr) => { + self.v6.calculate_and_store_best_and_backup_path( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + tbi, + guard, + ) + } + } + } + + /// Determine if a best path selection is based on stale records. + /// + /// Returns `Ok(true)` if the records have been updated since the last + /// best path selection was performed. + /// Returns a [StoreNotReady](crate::errors::PrefixStoreError) if the + /// prefix cannot be found in the RIB. + pub fn is_ps_outdated( + &self, + search_pfx: &Prefix, + guard: &Guard, + ) -> Result { + match search_pfx.addr() { + std::net::IpAddr::V4(addr) => self.v4.is_ps_outdated( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + guard, + ), + std::net::IpAddr::V6(addr) => self.v6.is_ps_outdated( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + guard, + ), + } + } + + /// Request all more specific prefixes in the RIB for a certain + /// prefix, including the prefix itself. + /// + /// If a `mui` is specified only prefixes for that particular `mui` + /// are returned. If `None` is specified all more specific prefixes, + /// regardless of their `mui` will be included in the returned result. + /// + /// if `include_withdrawn` is set to `true`, all more prefixes that have a + /// status of `Withdrawn` will included in the returned result. + /// + /// Returns a [QueryResult](crate::match_options::QueryResult). + pub fn more_specifics_from( + &'a self, + search_pfx: &Prefix, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> FatalResult> { + match search_pfx.addr() { + std::net::IpAddr::V4(addr) => self.v4.more_specifics_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ), + std::net::IpAddr::V6(addr) => self.v6.more_specifics_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ), + } + } + + /// Request all less specific prefixes in the RIB for a certain + /// prefix, including the prefix itself. + /// + /// If a `mui` is specified only prefixes for that particular `mui` + /// are returned. If `None` is specified all less specific prefixes, + /// regardless of their `mui` will be included in the returned result. + /// + /// if `include_withdrawn` is set to `true`, all more prefixes that have a + /// status of `Withdrawn` will included in the returned result. + /// + /// Returns a [QueryResult](crate::match_options::QueryResult). + pub fn less_specifics_from( + &'a self, + search_pfx: &Prefix, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> FatalResult> { + match search_pfx.addr() { + std::net::IpAddr::V4(addr) => self.v4.less_specifics_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ), + std::net::IpAddr::V6(addr) => self.v6.less_specifics_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ), + } + } + + /// Request an iterator over all less specific prefixes in the RIB for a + /// certain prefix, including the prefix itself. + /// + /// If a `mui` is specified only prefixes for that particular `mui` + /// are returned. If `None` is specified all less specific prefixes, + /// regardless of their `mui` will be included in the returned result. + /// + /// if `include_withdrawn` is set to `true`, all more prefixes that have a + /// status of `Withdrawn` will included in the returned result. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn less_specifics_iter_from( + &'a self, + search_pfx: &Prefix, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + let (left, right) = match search_pfx.addr() { + std::net::IpAddr::V4(addr) => ( + Some( + self.v4 + .less_specifics_iter_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ) + .map(|r| r.map(PrefixRecord::from)), + ), + None, + ), + std::net::IpAddr::V6(addr) => ( + None, + Some( + self.v6 + .less_specifics_iter_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ) + .map(|r| r.map(PrefixRecord::from)), + ), + ), + }; + + left.into_iter() + .flatten() + .chain(right.into_iter().flatten()) + } + + /// Request an iterator over all more specific prefixes in the RIB for a + /// certain prefix, including the prefix itself. + /// + /// If a `mui` is specified only prefixes for that particular `mui` + /// are returned. If `None` is specified all more specific prefixes, + /// regardless of their `mui` will be included in the returned result. + /// + /// if `include_withdrawn` is set to `true`, all more prefixes that have a + /// status of `Withdrawn` will included in the returned result. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn more_specifics_iter_from( + &'a self, + search_pfx: &Prefix, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + let (left, right) = match search_pfx.addr() { + std::net::IpAddr::V4(addr) => ( + Some( + self.v4 + .more_specifics_iter_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ) + .map(|r| r.map(PrefixRecord::from)), + ), + None, + ), + std::net::IpAddr::V6(addr) => ( + None, + Some( + self.v6 + .more_specifics_iter_from( + PrefixId::::new( + ::from_ipaddr(addr), + search_pfx.len(), + ), + mui, + include_withdrawn, + guard, + ) + .map(|r| r.map(PrefixRecord::from)), + ), + ), + }; + + left.into_iter() + .flatten() + .chain(right.into_iter().flatten()) + } + + /// Request an iterator over all IPv4 prefixes in the RIB for a certain + /// `mui`. + /// + /// if `include_withdrawn` is set to `true`, all prefixes that have a + /// status of `Withdrawn` will included in the returned result. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn iter_records_for_mui_v4( + &'a self, + mui: u32, + include_withdrawn: bool, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + if self.v4.mui_is_withdrawn(mui, guard) && !include_withdrawn { + None + } else { + Some( + self.v4 + .more_specifics_iter_from( + PrefixId::::new( + ::zero(), + 0, + ), + Some(mui), + include_withdrawn, + guard, + ) + .map(|r| r.map(PrefixRecord::from)), + ) + } + .into_iter() + .flatten() + } + + /// Request an iterator over all IPv6 prefixes in the RIB for a certain + /// `mui`. + /// + /// if `include_withdrawn` is set to `true`, all prefixes that have a + /// status of `Withdrawn` will included in the returned result. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn iter_records_for_mui_v6( + &'a self, + mui: u32, + include_withdrawn: bool, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + if self.v6.mui_is_withdrawn(mui, guard) && !include_withdrawn { + None + } else { + Some( + self.v6 + .more_specifics_iter_from( + PrefixId::::new( + ::zero(), + 0, + ), + Some(mui), + include_withdrawn, + guard, + ) + .map(|r| r.map(PrefixRecord::from)), + ) + } + .into_iter() + .flatten() + } + + /// Insert a Prefix with a [Record](crate::prefix_record::Record) into + /// the RIB. + /// + /// If `update_path_selections` is passed in with the tie breaker info + /// then perform a best path selection. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn insert( + &self, + prefix: &Prefix, + record: Record, + update_path_selections: Option, + ) -> Result { + match prefix.addr() { + std::net::IpAddr::V4(_addr) => self.v4.insert( + PrefixId::::from(*prefix), + record, + update_path_selections, + ), + std::net::IpAddr::V6(_addr) => self.v6.insert( + PrefixId::::from(*prefix), + record, + update_path_selections, + ), + } + } + + /// Request an iterator over all prefixes in the RIB. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn prefixes_iter( + &'a self, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + self.v4 + .prefixes_iter(guard) + .map(|r| r.map(PrefixRecord::from)) + .chain( + self.v6 + .prefixes_iter(guard) + .map(|r| r.map(PrefixRecord::from)), + ) + } + + /// Request an iterator over all IPv4 prefixes in the RIB. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn prefixes_iter_v4( + &'a self, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + self.v4 + .prefixes_iter(guard) + .map(|r| r.map(PrefixRecord::from)) + } + + /// Request an iterator over all IPv6 prefixes in the RIB. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn prefixes_iter_v6( + &'a self, + guard: &'a Guard, + ) -> impl Iterator>> + 'a { + self.v6 + .prefixes_iter(guard) + .map(|r| r.map(PrefixRecord::from)) + } + + /// Request an iterator over all persisted prefixes. + /// + /// Returns an over [PrefixRecord]. + pub fn persist_prefixes_iter( + &'a self, + ) -> impl Iterator, FatalError>> + 'a { + self.v4 + .persist_prefixes_iter() + .map(|rr| rr.map(PrefixRecord::from)) + .chain( + self.v6 + .persist_prefixes_iter() + .map(|rr| rr.map(PrefixRecord::from)), + ) + } + + /// Request an iterator over all persisted IPv4 prefixes. + /// + /// Returns an iterator over [PrefixRecord]( + /// crate::prefix_record::PrefixRecord). + pub fn persist_prefixes_iter_v4( + &'a self, + ) -> impl Iterator, FatalError>> + 'a { + self.v4 + .persist_prefixes_iter() + .map(|rr| rr.map(PrefixRecord::from)) + } + + /// Request an iterator over all persisted IPv6 prefixes. + /// + /// Returns an iterator over [PrefixRecord]. + pub fn persist_prefixes_iter_v6( + &'a self, + ) -> impl Iterator, FatalError>> + 'a { + self.v6 + .persist_prefixes_iter() + .map(|rr| rr.map(PrefixRecord::from)) + } + + /// Request whether the global status of a `mui` is set to `Active` for + ///both IPv4 and IPv6 prefixes. + pub fn is_mui_active(&self, mui: u32) -> bool { + let guard = &epoch::pin(); + self.v4.is_mui_active(mui, guard) || self.v6.is_mui_active(mui, guard) + } + + /// Request whether the global status of a `mui` is set to `Active` for + ///IPv4 prefixes. + pub fn is_mui_active_v4(&self, mui: u32) -> bool { + let guard = &epoch::pin(); + self.v4.is_mui_active(mui, guard) + } + + /// Request whether the global status of a `mui` is set to `Active` for + /// IPv6 prefixes. + pub fn is_mui_active_v6(&self, mui: u32) -> bool { + let guard = &epoch::pin(); + self.v6.is_mui_active(mui, guard) + } + + /// Change the local status of the record for the combination of + /// (prefix, multi_uniq_id) to Withdrawn. Note that by default the + /// global `Withdrawn` status for a mui overrides the local status + /// of a record. + pub fn mark_mui_as_withdrawn_for_prefix( + &self, + prefix: &Prefix, + mui: u32, + ltime: u64, + ) -> Result<(), PrefixStoreError> { + match prefix.addr() { + std::net::IpAddr::V4(_addr) => { + self.v4.mark_mui_as_withdrawn_for_prefix( + PrefixId::::from(*prefix), + mui, + ltime, + ) + } + std::net::IpAddr::V6(_addr) => { + self.v6.mark_mui_as_withdrawn_for_prefix( + PrefixId::::from(*prefix), + mui, + ltime, + ) + } + } + } + + /// Change the local status of the record for the combination of + /// (prefix, multi_uniq_id) to Active. Note that by default the + /// global `Withdrawn` status for a mui overrides the local status + /// of a record. + pub fn mark_mui_as_active_for_prefix( + &self, + prefix: &Prefix, + mui: u32, + ltime: u64, + ) -> FatalResult<()> { + match prefix.addr() { + std::net::IpAddr::V4(_addr) => { + self.v4.mark_mui_as_active_for_prefix( + PrefixId::::from(*prefix), + mui, + ltime, + ) + } + std::net::IpAddr::V6(_addr) => { + self.v6.mark_mui_as_active_for_prefix( + PrefixId::::from(*prefix), + mui, + ltime, + ) + } + } + } + + /// Change the status of all records for IPv4 prefixes for this + /// `multi_uniq_id` globally to Active. Note that the global + /// `Active` status will be overridden by the local status of the + /// record. + pub fn mark_mui_as_active_v4( + &self, + mui: u32, + ) -> Result<(), PrefixStoreError> { + let guard = &epoch::pin(); + + self.v4.mark_mui_as_active(mui, guard) + } + + /// Change the status of all records for IPv4 prefixes for this + /// `multi_uniq_id` globally to Withdrawn. A global `Withdrawn` + /// status for a `multi_uniq_id` overrides the local status of + /// prefixes for this mui. However the local status can still be + /// modified. This modification will take effect if the global + /// status is changed to `Active`. + pub fn mark_mui_as_withdrawn_v4( + &self, + mui: u32, + ) -> Result<(), PrefixStoreError> { + let guard = &epoch::pin(); + + self.v4.mark_mui_as_withdrawn(mui, guard) + } + + /// Change the status of all records for IPv6 prefixes for this + /// `multi_uniq_id` globally to Active. + /// + /// Note that the global `Active` status will be overridden by the local + /// status of the record. + pub fn mark_mui_as_active_v6( + &self, + mui: u32, + ) -> Result<(), PrefixStoreError> { + let guard = &epoch::pin(); + + self.v6.mark_mui_as_active(mui, guard) + } + + /// Change the status of all records for IPv6 prefixes for this + /// `multi_uniq_id` globally to Withdrawn. + /// + /// A global `Withdrawn` status for a `multi_uniq_id` overrides the local + /// status of prefixes for this mui. However the local status can still be + /// modified. This modification will take effect if the global status is + /// changed to `Active`. + pub fn mark_mui_as_withdrawn_v6( + &self, + mui: u32, + ) -> Result<(), PrefixStoreError> { + let guard = &epoch::pin(); + + self.v6.mark_mui_as_withdrawn(mui, guard) + } + + /// Change the status of all records for this `multi_uniq_id` to + /// Withdrawn. + /// + /// This method tries to mark all records: first the IPv4 records, + /// then the IPv6 records. If marking of the IPv4 records fails, + /// the method continues and tries to mark the IPv6 records. If + /// either or both fail, an error is returned. + pub fn mark_mui_as_withdrawn( + &self, + mui: u32, + ) -> Result<(), PrefixStoreError> { + let guard = &epoch::pin(); + + let res_v4 = self.v4.mark_mui_as_withdrawn(mui, guard); + let res_v6 = self.v6.mark_mui_as_withdrawn(mui, guard); + + res_v4.and(res_v6) + } + + /// Request whether the global status for IPv4 prefixes and the specified + /// `multi_uniq_id` is set to `Withdrawn`. + pub fn mui_is_withdrawn_v4(&self, mui: u32) -> bool { + let guard = &epoch::pin(); + + self.v4.mui_is_withdrawn(mui, guard) + } + + /// Request whether the global status for IPv6 prefixes and the specified + /// `multi_uniq_id` is set to `Active`. + pub fn mui_is_withdrawn_v6(&self, mui: u32) -> bool { + let guard = &epoch::pin(); + + self.v6.mui_is_withdrawn(mui, guard) + } + + /// Request the number of all prefixes in the store. + pub fn prefixes_count(&self) -> UpsertCounters { + self.v4.prefixes_count() + self.v6.prefixes_count() + } + + /// Request the number of all IPv4 prefixes in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn prefixes_v4_count(&self) -> UpsertCounters { + self.v4.prefixes_count() + } + + /// Request the number of all IPv4 prefixes with the + /// supplied prefix length in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn prefixes_v4_count_for_len( + &self, + len: u8, + ) -> Result { + self.v4.prefixes_count_for_len(len) + } + + /// Request the number of all IPv6 prefixes in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn prefixes_v6_count(&self) -> UpsertCounters { + self.v6.prefixes_count() + } + + /// Returns the number of all IPv6 prefixes with the + /// supplied prefix length in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn prefixes_v6_count_for_len( + &self, + len: u8, + ) -> Result { + self.v6.prefixes_count_for_len(len) + } + + /// Request the number of all routes in the store. + pub fn routes_count(&self) -> UpsertCounters { + self.v4.routes_count() + self.v6.routes_count() + } + + /// Request the number of all IPv4 routes in the store. + pub fn routes_count_v4(&self) -> UpsertCounters { + self.v4.routes_count() + } + + /// Request the number of all IPv6 routes in the store. + pub fn routes_count_v6(&self) -> UpsertCounters { + self.v6.routes_count() + } + + /// Request the number of nodes in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn nodes_count(&self) -> usize { + self.v4.get_nodes_count() + self.v6.get_nodes_count() + } + + /// Request the number of IPv4 nodes in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn nodes_v4_count(&self) -> usize { + self.v4.get_nodes_count() + } + + /// Request the number of IPv6 nodes in the store. + /// + /// Note that this counter may be lower than the actual + /// number in the store, due to contention at the time of + /// reading the value. + pub fn nodes_v6_count(&self) -> usize { + self.v6.get_nodes_count() + } + + /// Print the store statistics to the standard output. + #[cfg(feature = "cli")] + pub fn print_funky_stats(&self) { + println!("\nStats for IPv4 multi-threaded store\n"); + println!("{}", self.v4.tree_bitmap); + println!("Stats for IPv6 multi-threaded store\n"); + println!("{}", self.v6.tree_bitmap); + } + + // The Store statistics. + pub fn stats(&self) -> StoreStats { + StoreStats { + v4: self.v4.counters.prefix_stats(), + v6: self.v6.counters.prefix_stats(), + } + } + + // Disk Persistence + + /// Request the persist strategy as set in the [configuration]( + /// crate::rib::config) for this RIB. + pub fn persist_strategy(&self) -> PersistStrategy { + self.config.persist_strategy() + } + + /// Request all records for a prefix. + /// + /// If `mui` is specified, only the record for that specific `mui` will + /// be returned. + /// + /// if `include_withdrawn` is passed in as `true` records with status + /// `Withdrawn` will be returned, as well as records with status `Active`. + pub fn get_records_for_prefix( + &self, + prefix: &Prefix, + mui: Option, + include_withdrawn: bool, + ) -> FatalResult>>> { + let guard = &epoch::pin(); + + match prefix.is_v4() { + true => self.v4.get_value( + PrefixId::::from(*prefix), + mui, + include_withdrawn, + guard, + ), + false => self.v6.get_value( + PrefixId::::from(*prefix), + mui, + include_withdrawn, + guard, + ), + } + } + + /// Persist all relevant RIB entries to disk. + /// + /// Records that are marked for persistence are first cached in memory, + /// and only written to disk when this method is called. + //// + /// The specific behaviour is depended on the chosen [persists strategy]( + /// crate::rib::config::PersistStrategy). + pub fn flush_to_disk(&self) -> Result<(), PrefixStoreError> { + self.v4.flush_to_disk()?; + self.v6.flush_to_disk()?; + + Ok(()) + } + + /// Request the approximate number of items that are persisted + /// to disk, for IPv4 and IPv6 respectively. + pub fn approx_persisted_items(&self) -> (usize, usize) { + ( + self.v4.approx_persisted_items(), + self.v6.approx_persisted_items(), + ) + } + + /// Request an estimation of the disk space currently used by the + /// store in bytes. + pub fn disk_space(&self) -> u64 { + self.v4.disk_space() + self.v6.disk_space() + } +} diff --git a/src/rib/starcast_af.rs b/src/rib/starcast_af.rs new file mode 100644 index 00000000..84f826e2 --- /dev/null +++ b/src/rib/starcast_af.rs @@ -0,0 +1,560 @@ +use std::path::Path; + +use inetnum::addr::Prefix; +use log::{info, trace}; + +use crate::prefix_record::Meta; +use crate::rib::config::PersistStrategy; +use crate::stats::{Counters, UpsertCounters, UpsertReport}; +use crate::{epoch, Guard}; + +use crate::errors::{FatalError, FatalResult}; +use crate::prefix_cht::cht::PrefixCht; +use crate::types::prefix_record::{ValueHeader, ZeroCopyRecord}; +use crate::types::{PrefixId, RouteStatus}; +use crate::TreeBitMap; +use crate::{lsm_tree::LongKey, LsmTree}; +use crate::{types::errors::PrefixStoreError, types::prefix_record::Record}; + +use crate::{IPv4, IPv6}; + +use crate::AddressFamily; + +use super::config::Config; + +//------------ StarCastAfRib ------------------------------------------------- + +// A Routing Information Base that consists of multiple different trees for +// in-memory and on-disk (persisted storage) for one address family. Most of +// the methods on this struct are meant to be publicly available, however they +// are all behind the StarCastRib interface, that abstracts over the address +// family. +#[derive(Debug)] +pub(crate) struct StarCastAfRib< + AF: AddressFamily, + // The type that stores the route-like data + M: Meta, + // The number of root nodes for the tree bitmap (one for each 4 prefix + // lengths, so that's 9 for IPv4, 33 for IPv6) + const N_ROOT_SIZE: usize, + // The number of root nodes for the prefix CHT (one for each prefix length + // that can exists, so that's 33 for IPv4, and 129 for IPv6). + const P_ROOT_SIZE: usize, + // The configuration, each persistence strategy implements its own type. + C: Config, + // The size of the key in the persistence store, this varies per address + // family. This is 18 for IPv4 (1 octet prefix length, 4 octets address + // part prefix, 4 octets mui, 8 octets ltime, 1 octet RouteStatus). This + // corresponds to the `LongKey` struct. It's 30 for IPv6. + const KEY_SIZE: usize, +> { + pub config: C, + pub(crate) tree_bitmap: TreeBitMap, + pub(crate) prefix_cht: PrefixCht, + pub(crate) persist_tree: Option, KEY_SIZE>>, + pub counters: Counters, +} + +impl< + AF: AddressFamily, + M: Meta, + const P_ROOT_SIZE: usize, + const N_ROOT_SIZE: usize, + C: Config, + const KEY_SIZE: usize, + > StarCastAfRib +{ + pub(crate) fn new( + config: C, + ) -> Result< + StarCastAfRib, + Box, + > { + StarCastAfRib::::init( + config, + ) + } + + fn init(config: C) -> Result> { + info!("store: initialize store {}", AF::BITS); + + let persist_tree = match config.persist_strategy() { + PersistStrategy::MemoryOnly => None, + _ => { + let persist_path = if let Some(pp) = config.persist_path() { + pp + } else { + return Err(std::io::Error::new( + std::io::ErrorKind::Other, + "Missing persistence path".to_string(), + ) + .into()); + }; + let pp_ref = &Path::new(&persist_path); + Some(LsmTree::new(pp_ref).map_err(|_| { + std::io::Error::new( + std::io::ErrorKind::Other, + "Cannot create persistence store", + ) + })?) + } + }; + + let store = StarCastAfRib { + config, + tree_bitmap: TreeBitMap::::new()?, + persist_tree, + counters: Counters::default(), + prefix_cht: PrefixCht::::init(), + }; + + Ok(store) + } + + pub(crate) fn insert( + &self, + prefix: PrefixId, + record: Record, + update_path_selections: Option, + ) -> Result { + trace!("try insertingf {:?}", prefix); + let guard = &epoch::pin(); + self.tree_bitmap + .set_prefix_exists(prefix, record.multi_uniq_id) + .and_then(|(retry_count, exists)| { + trace!("exists, upsert it"); + self.upsert_prefix( + prefix, + record, + update_path_selections, + guard, + ) + .map(|mut report| { + if report.mui_new { + self.counters.inc_routes_count(); + } + report.cas_count += retry_count as usize; + if !exists { + self.counters.inc_prefixes_count(prefix.len()); + report.prefix_new = true; + } + report + }) + }) + } + + fn upsert_prefix( + &self, + prefix: PrefixId, + record: Record, + update_path_selections: Option, + guard: &Guard, + ) -> Result { + let mui = record.multi_uniq_id; + match self.config.persist_strategy() { + PersistStrategy::WriteAhead => { + if let Some(persist_tree) = &self.persist_tree { + persist_tree.persist_record_w_long_key(prefix, &record); + + self.prefix_cht + .upsert_prefix( + prefix, + record, + update_path_selections, + guard, + ) + .map(|(report, _old_rec)| report) + } else { + Err(PrefixStoreError::StoreNotReadyError) + } + } + PersistStrategy::PersistHistory => self + .prefix_cht + .upsert_prefix(prefix, record, update_path_selections, guard) + .map(|(report, old_rec)| { + if let Some(rec) = old_rec { + if let Some(persist_tree) = &self.persist_tree { + persist_tree.persist_record_w_long_key( + prefix, + &Record::from((mui, &rec)), + ); + } + } + report + }), + PersistStrategy::MemoryOnly => self + .prefix_cht + .upsert_prefix(prefix, record, update_path_selections, guard) + .map(|(report, _)| report), + PersistStrategy::PersistOnly => { + if let Some(persist_tree) = &self.persist_tree { + let (retry_count, exists) = self + .tree_bitmap + .set_prefix_exists(prefix, record.multi_uniq_id)?; + persist_tree.persist_record_w_short_key(prefix, &record); + Ok(UpsertReport { + cas_count: retry_count as usize, + prefix_new: exists, + mui_new: true, + mui_count: 0, + }) + } else { + Err(PrefixStoreError::PersistFailed) + } + } + } + } + + pub fn contains(&self, prefix: PrefixId, mui: Option) -> bool { + if let Some(mui) = mui { + self.tree_bitmap.prefix_exists_for_mui(prefix, mui) + } else { + self.tree_bitmap.prefix_exists(prefix) + } + } + + pub fn get_nodes_count(&self) -> usize { + self.tree_bitmap.nodes_count() + } + + // Change the status of the record for the specified (prefix, mui) + // combination to Withdrawn. + pub fn mark_mui_as_withdrawn_for_prefix( + &self, + prefix: PrefixId, + mui: u32, + ltime: u64, + ) -> Result<(), PrefixStoreError> { + match self.persist_strategy() { + PersistStrategy::WriteAhead | PersistStrategy::MemoryOnly => { + let (stored_prefix, exists) = + self.prefix_cht.non_recursive_retrieve_prefix_mut(prefix); + + if !exists { + return Err(PrefixStoreError::PrefixNotFound); + } + stored_prefix + .record_map + .mark_as_withdrawn_for_mui(mui, ltime); + } + PersistStrategy::PersistOnly => { + println!( + "mark as wd in persist tree {:?} for mui {:?}", + prefix, mui + ); + if let Some(p_tree) = self.persist_tree.as_ref() { + let stored_prefixes = + p_tree.records_with_keys_for_prefix_mui(prefix, mui); + + for rkv in stored_prefixes { + if let Ok(r) = rkv { + let header = ValueHeader { + ltime, + status: RouteStatus::Withdrawn, + }; + p_tree + .rewrite_header_for_record(header, &r) + .map_err(|_| { + PrefixStoreError::StoreNotReadyError + })?; + } else { + return Err(PrefixStoreError::StoreNotReadyError); + } + } + } else { + return Err(PrefixStoreError::StoreNotReadyError); + } + } + PersistStrategy::PersistHistory => { + // First do the in-memory part + let (stored_prefix, exists) = + self.prefix_cht.non_recursive_retrieve_prefix_mut(prefix); + + if !exists { + return Err(PrefixStoreError::StoreNotReadyError); + } + stored_prefix + .record_map + .mark_as_withdrawn_for_mui(mui, ltime); + + // Use the record from the in-memory RIB to persist. + if let Some(_record) = + stored_prefix.record_map.get_record_for_mui(mui, true) + { + let p_tree = + if let Some(p_tree) = self.persist_tree.as_ref() { + p_tree + } else { + return Err(PrefixStoreError::StoreNotReadyError); + }; + + p_tree.insert_empty_record(prefix, mui, ltime); + } + } + } + + Ok(()) + } + + // Change the status of the record for the specified (prefix, mui) + // combination to Active. + pub fn mark_mui_as_active_for_prefix( + &self, + prefix: PrefixId, + mui: u32, + ltime: u64, + ) -> FatalResult<()> { + match self.persist_strategy() { + PersistStrategy::WriteAhead | PersistStrategy::MemoryOnly => { + let (stored_prefix, exists) = + self.prefix_cht.non_recursive_retrieve_prefix_mut(prefix); + + if !exists { + return Err(FatalError); + } + stored_prefix.record_map.mark_as_active_for_mui(mui, ltime); + } + PersistStrategy::PersistOnly => { + if let Some(p_tree) = self.persist_tree.as_ref() { + if let Ok(Some(record_b)) = + p_tree.most_recent_record_for_prefix_mui(prefix, mui) + { + let header = ValueHeader { + ltime, + status: RouteStatus::Active, + }; + p_tree + .rewrite_header_for_record(header, &record_b)?; + } + } else { + return Err(FatalError); + } + } + PersistStrategy::PersistHistory => { + // First do the in-memory part + let (stored_prefix, exists) = + self.prefix_cht.non_recursive_retrieve_prefix_mut(prefix); + + if !exists { + return Err(FatalError); + } + stored_prefix.record_map.mark_as_active_for_mui(mui, ltime); + + // Use the record from the in-memory RIB to persist. + if let Some(_record) = + stored_prefix.record_map.get_record_for_mui(mui, true) + { + let p_tree = + if let Some(p_tree) = self.persist_tree.as_ref() { + p_tree + } else { + return Err(FatalError); + }; + + // Here we are keeping persisted history, so no removal of + // old (prefix, mui) records. + // We are inserting an empty record, since this is a + // withdrawal. + p_tree.insert_empty_record(prefix, mui, ltime); + } + } + } + + Ok(()) + } + + // Change the status of the mui globally to Withdrawn. Iterators and match + // functions will by default not return any records for this mui. + pub fn mark_mui_as_withdrawn( + &self, + mui: u32, + guard: &Guard, + ) -> Result<(), PrefixStoreError> { + self.tree_bitmap.mark_mui_as_withdrawn(mui, guard) + } + + // Change the status of the mui globally to Active. Iterators and match + // functions will default to the status on the record itself. + pub fn mark_mui_as_active( + &self, + mui: u32, + guard: &Guard, + ) -> Result<(), PrefixStoreError> { + self.tree_bitmap.mark_mui_as_active(mui, guard) + } + + // Whether this mui is globally withdrawn. Note that this overrules + // (by default) any (prefix, mui) combination in iterators and match + // functions. + pub fn mui_is_withdrawn(&self, mui: u32, guard: &Guard) -> bool { + // unsafe { + self.tree_bitmap.withdrawn_muis_bmin(guard).contains(mui) + } + + // Whether this mui is globally active. Note that the local statuses of + // records (prefix, mui) may be set to withdrawn in iterators and match + // functions. + pub(crate) fn is_mui_active(&self, mui: u32, guard: &Guard) -> bool { + // !unsafe { + !self.tree_bitmap.withdrawn_muis_bmin(guard).contains(mui) + } + + pub(crate) fn prefixes_count(&self) -> UpsertCounters { + UpsertCounters { + in_memory_count: self.prefix_cht.prefixes_count(), + persisted_count: self + .persist_tree + .as_ref() + .map_or(0, |p| p.prefixes_count()), + total_count: self.counters.prefixes_count().iter().sum(), + } + } + + pub(crate) fn routes_count(&self) -> UpsertCounters { + UpsertCounters { + in_memory_count: self.prefix_cht.routes_count(), + persisted_count: self + .persist_tree + .as_ref() + .map_or(0, |p| p.routes_count()), + total_count: self.counters.routes_count(), + } + } + + // the len check does it all. + #[allow(clippy::indexing_slicing, clippy::unwrap_used)] + pub fn prefixes_count_for_len( + &self, + len: u8, + ) -> Result { + if len <= AF::BITS { + Ok(UpsertCounters { + in_memory_count: self + .tree_bitmap + .prefixes_count_for_len(len)?, + persisted_count: self + .persist_tree + .as_ref() + .map_or(0, |p| p.prefixes_count_for_len(len).unwrap()), + total_count: self.counters.prefixes_count()[len as usize], + }) + } else { + Err(PrefixStoreError::PrefixLengthInvalid) + } + } + + pub fn prefixes_iter<'a>( + &'a self, + guard: &'a Guard, + ) -> impl Iterator>)>> + 'a + { + self.tree_bitmap.prefixes_iter().map(|p| { + if let Ok(r) = self.get_value(p.into(), None, true, guard) { + Ok((p, r.unwrap_or_default())) + } else { + Err(FatalError) + } + }) + } + + //-------- Persistence --------------------------------------------------- + + pub fn persist_strategy(&self) -> PersistStrategy { + self.config.persist_strategy() + } + + pub(crate) fn persist_prefixes_iter( + &self, + ) -> impl Iterator>)>> + '_ + { + self.persist_tree + .as_ref() + .map(|tree| { + tree.prefixes_iter().map(|recs| { + if let Some(Ok(first_rec)) = recs.first() { + if let Ok(pfx) = + ZeroCopyRecord::::from_bytes(first_rec) + { + let mut rec_vec: Vec> = vec![]; + for res_rec in recs.iter() { + if let Ok(rec) = res_rec { + if let Ok(rec) = + ZeroCopyRecord::::from_bytes(rec) + { + rec_vec.push(Record { + multi_uniq_id: rec.multi_uniq_id, + ltime: rec.ltime, + status: rec.status, + meta: rec.meta.to_vec().into(), + }); + } + } else { + return Err(FatalError); + } + } + Ok((Prefix::from(pfx.prefix), rec_vec)) + } else { + Err(FatalError) + } + } else { + Err(FatalError) + } + }) + }) + .into_iter() + .flatten() + } + + pub(crate) fn flush_to_disk(&self) -> Result<(), PrefixStoreError> { + if let Some(p) = &self.persist_tree { + p.flush_to_disk() + .map_err(|_| PrefixStoreError::PersistFailed) + } else { + Err(PrefixStoreError::PersistFailed) + } + } + + pub fn approx_persisted_items(&self) -> usize { + if let Some(p) = &self.persist_tree { + p.approximate_len() + } else { + 0 + } + } + + pub fn disk_space(&self) -> u64 { + if let Some(p) = &self.persist_tree { + p.disk_space() + } else { + 0 + } + } +} + +impl< + M: Meta, + const N_ROOT_SIZE: usize, + const P_ROOT_SIZE: usize, + C: Config, + const KEY_SIZE: usize, + > std::fmt::Display + for StarCastAfRib +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "Rib", std::any::type_name::()) + } +} + +impl< + M: Meta, + const N_ROOT_SIZE: usize, + const P_ROOT_SIZE: usize, + C: Config, + const KEY_SIZE: usize, + > std::fmt::Display + for StarCastAfRib +{ + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "Rib", std::any::type_name::()) + } +} diff --git a/src/rib/starcast_af_query.rs b/src/rib/starcast_af_query.rs new file mode 100644 index 00000000..4db2a1cb --- /dev/null +++ b/src/rib/starcast_af_query.rs @@ -0,0 +1,402 @@ +use crossbeam_epoch::{self as epoch}; +use epoch::Guard; +use log::trace; +use zerocopy::TryFromBytes; + +use crate::errors::{FatalError, FatalResult}; +use crate::match_options::{MatchOptions, MatchType, QueryResult}; +use crate::prefix_record::RecordSet; +use crate::types::prefix_record::ZeroCopyRecord; +use crate::types::Record; +use crate::AddressFamily; +use crate::{prefix_record::Meta, rib::starcast_af::StarCastAfRib}; +use inetnum::addr::Prefix; + +use crate::types::errors::PrefixStoreError; +use crate::types::PrefixId; + +use super::config::{Config, PersistStrategy}; + +//------------ Prefix Matching ---------------------------------------------- + +impl< + 'a, + AF: AddressFamily, + M: Meta, + const N_ROOT_SIZE: usize, + const P_ROOT_SIZE: usize, + C: Config, + const KEY_SIZE: usize, + > StarCastAfRib +{ + pub(crate) fn get_value( + &'a self, + prefix_id: PrefixId, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> FatalResult>>> { + match self.persist_strategy() { + PersistStrategy::PersistOnly => { + trace!("get value from persist_store for {:?}", prefix_id); + self.persist_tree + .as_ref() + .and_then(|tree| { + tree.records_for_prefix( + prefix_id, + mui, + include_withdrawn, + self.tree_bitmap.withdrawn_muis_bmin(guard), + ) + .map(|v| { + v.iter() + .map(|bytes| { + if let Ok(b) = bytes.as_ref() { + let record: &ZeroCopyRecord = + ZeroCopyRecord::try_ref_from_bytes(b) + .map_err(|_| FatalError)?; + Ok(Record:: { + multi_uniq_id: record + .multi_uniq_id, + ltime: record.ltime, + status: record.status, + meta: >::from( + record.meta.as_ref(), + ) + .into(), + }) + } else { + Err(FatalError) + } + }) + .collect::>>() + }) + }) + .transpose() + } + _ => Ok(self.prefix_cht.get_records_for_prefix( + prefix_id, + mui, + include_withdrawn, + self.tree_bitmap.withdrawn_muis_bmin(guard), + )), + } + } + + pub(crate) fn more_specifics_from( + &'a self, + prefix_id: PrefixId, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> FatalResult> { + let prefix = if !self.contains(prefix_id, mui) { + Some(Prefix::from(prefix_id)) + } else { + None + }; + + let records = self + .get_value(prefix_id, mui, include_withdrawn, guard)? + .unwrap_or_default(); + + let more_specifics = self + .tree_bitmap + .more_specific_prefix_iter_from(prefix_id) + .map(|p| { + self.get_value(prefix_id, mui, include_withdrawn, guard) + .map(|res| res.map(|v| (p, v))) + }) + .collect::>>>()?; + + Ok(QueryResult { + prefix, + records, + match_type: MatchType::EmptyMatch, + less_specifics: None, + more_specifics, + }) + } + + pub(crate) fn less_specifics_from( + &'a self, + prefix_id: PrefixId, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> FatalResult> { + let prefix = if !self.contains(prefix_id, mui) { + Some(Prefix::from(prefix_id)) + } else { + None + }; + let prefix_meta = self + .get_value(prefix_id, mui, include_withdrawn, guard)? + .unwrap_or_default(); + + let less_specifics = self + .tree_bitmap + .less_specific_prefix_iter(prefix_id) + .map(|p| { + self.get_value(prefix_id, mui, include_withdrawn, guard) + .map(|res| res.map(|v| (p, v))) + }) + .collect::>>>()?; + + Ok(QueryResult { + prefix, + records: prefix_meta, + match_type: MatchType::EmptyMatch, + less_specifics, + more_specifics: None, + }) + } + + pub(crate) fn more_specifics_iter_from( + &'a self, + prefix_id: PrefixId, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> impl Iterator, Vec>)>> + 'a + { + println!("more_specifics_iter_from fn"); + // If the user wanted a specific mui and not withdrawn prefixes, we + // may return early if the mui is globally withdrawn. + (if mui.is_some_and(|m| { + !include_withdrawn && self.mui_is_withdrawn(m, guard) + }) { + None + } else { + Some( + self.tree_bitmap + .more_specific_prefix_iter_from(prefix_id) + .filter_map(move |p| { + self.get_value(p, mui, include_withdrawn, guard) + .map(|res| res.map(|v| (p, v))) + .transpose() + }), + ) + }) + .into_iter() + .flatten() + } + + pub(crate) fn less_specifics_iter_from( + &'a self, + prefix_id: PrefixId, + mui: Option, + include_withdrawn: bool, + guard: &'a Guard, + ) -> impl Iterator, Vec>)>> + 'a + { + self.tree_bitmap + .less_specific_prefix_iter(prefix_id) + .filter_map(move |p| { + self.get_value(p, mui, include_withdrawn, guard) + .map(|res| res.map(|v| (p, v))) + .transpose() + }) + } + + pub(crate) fn match_prefix( + &'a self, + search_pfx: PrefixId, + options: &MatchOptions, + guard: &'a Guard, + ) -> FatalResult> { + trace!("match_prefix rib {:?} {:?}", search_pfx, options); + let res = self.tree_bitmap.match_prefix(search_pfx, options); + + trace!("res {:?}", res); + let mut res = QueryResult::from(res); + + if let Some(Ok(Some(m))) = res.prefix.map(|p| { + self.get_value( + p.into(), + options.mui, + options.include_withdrawn, + guard, + ) + .map(|res| { + res.and_then(|v| if v.is_empty() { None } else { Some(v) }) + }) + }) { + res.records = m; + } else { + res.prefix = None; + res.match_type = MatchType::EmptyMatch; + } + + if options.include_more_specifics { + res.more_specifics = res + .more_specifics + .map(|p| { + p.iter() + .filter_map(|mut r| { + if let Ok(mm) = self.get_value( + r.prefix.into(), + options.mui, + options.include_withdrawn, + guard, + ) { + if let Some(m) = mm { + r.meta = m; + Some(Ok(r)) + } else { + None + } + } else { + Some(Err(FatalError)) + } + }) + .collect::>>() + }) + .transpose()?; + } + if options.include_less_specifics { + res.less_specifics = res + .less_specifics + .map(|p| { + p.iter() + .filter_map(|mut r| { + if let Ok(mm) = self.get_value( + r.prefix.into(), + options.mui, + options.include_withdrawn, + guard, + ) { + if let Some(m) = mm { + r.meta = m; + Some(Ok(r)) + } else { + None + } + } else { + Some(Err(FatalError)) + } + }) + .collect::>>() + }) + .transpose()?; + } + + Ok(res) + } + + pub(crate) fn best_path( + &'a self, + search_pfx: PrefixId, + guard: &Guard, + ) -> Option, PrefixStoreError>> { + self.prefix_cht + .non_recursive_retrieve_prefix(search_pfx) + .0 + .map(|p_rec| { + p_rec.get_path_selections(guard).best().map_or_else( + || Err(PrefixStoreError::BestPathNotFound), + |mui| { + p_rec + .record_map + .get_record_for_mui(mui, false) + .ok_or(PrefixStoreError::StoreNotReadyError) + }, + ) + }) + } + + pub(crate) fn calculate_and_store_best_and_backup_path( + &self, + search_pfx: PrefixId, + tbi: &::TBI, + guard: &Guard, + ) -> Result<(Option, Option), PrefixStoreError> { + self.prefix_cht + .non_recursive_retrieve_prefix(search_pfx) + .0 + .map_or(Err(PrefixStoreError::StoreNotReadyError), |p_rec| { + p_rec.calculate_and_store_best_backup(tbi, guard) + }) + } + + pub(crate) fn is_ps_outdated( + &self, + search_pfx: PrefixId, + guard: &Guard, + ) -> Result { + self.prefix_cht + .non_recursive_retrieve_prefix(search_pfx) + .0 + .map_or(Err(PrefixStoreError::StoreNotReadyError), |p| { + Ok(p.is_ps_outdated(guard)) + }) + } +} + +#[derive(Debug)] +pub(crate) struct TreeQueryResult { + pub match_type: MatchType, + pub prefix: Option>, + pub less_specifics: Option>>, + pub more_specifics: Option>>, +} + +impl From> + for QueryResult +{ + fn from(value: TreeQueryResult) -> Self { + Self { + match_type: value.match_type, + prefix: value.prefix.map(|p| p.into()), + records: vec![], + less_specifics: value + .less_specifics + .map(|ls| ls.into_iter().map(|p| (p, vec![])).collect()), + more_specifics: value + .more_specifics + .map(|ms| ms.into_iter().map(|p| (p, vec![])).collect()), + } + } +} + +impl From> + for FamilyQueryResult +{ + fn from(value: TreeQueryResult) -> Self { + Self { + match_type: value.match_type, + prefix: value.prefix, + prefix_meta: vec![], + less_specifics: None, + more_specifics: None, + } + } +} + +pub(crate) type FamilyRecord = Vec<(PrefixId, Vec>)>; + +pub(crate) struct FamilyQueryResult { + pub match_type: MatchType, + pub prefix: Option>, + pub prefix_meta: Vec>, + pub less_specifics: Option>, + pub more_specifics: Option>, +} + +impl From> + for QueryResult +{ + fn from(value: FamilyQueryResult) -> Self { + QueryResult { + match_type: value.match_type, + prefix: value.prefix.map(|p| p.into()), + records: value.prefix_meta, + less_specifics: value + .less_specifics + .map(|ls| ls.into_iter().collect()), + more_specifics: value + .more_specifics + .map(|ms| ms.into_iter().collect()), + } + } +} diff --git a/src/rotonda_store.rs b/src/rotonda_store.rs deleted file mode 100644 index 36ed0d13..00000000 --- a/src/rotonda_store.rs +++ /dev/null @@ -1,202 +0,0 @@ -use std::{fmt, slice}; - -use crate::prefix_record::{PublicRecord, RecordSet}; -pub use crate::prefix_record::{PublicPrefixSingleRecord, Meta, RecordSingleSet}; -use crate::{prefix_record::InternalPrefixRecord, stats::StrideStats}; - -use inetnum::addr::Prefix; - -pub use crate::af::{AddressFamily, IPv4, IPv6}; - -pub use crate::local_array::store::custom_alloc; - -pub const RECORDS_MAX_NUM: usize = 3; - -//------------ The publicly available Rotonda Stores ------------------------ - -pub use crate::local_array::store::DefaultStore as MultiThreadedStore; -pub use crate::local_vec::store::Store as SingleThreadedStore; - -//------------ Types for strides displaying/monitoring ---------------------- - -type AfStrideStats = Vec; - -pub struct Stats<'a> { - pub v4: &'a AfStrideStats, - pub v6: &'a AfStrideStats, -} - -impl<'a> std::fmt::Display for Stats<'a> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - writeln!(f, "v4 ")?; - for s in self.v4.iter() { - writeln!(f, "{} ", s)?; - } - writeln!(f, "v6 ")?; - for s in self.v6.iter() { - writeln!(f, "{} ", s)?; - } - Ok(()) - } -} - -pub struct Strides<'a> { - pub v4: &'a Vec, - pub v6: &'a Vec, -} - -impl<'a> std::fmt::Debug for Strides<'a> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "v4 ")?; - for s in self.v4.iter() { - write!(f, "{} ", s)?; - } - writeln!(f, "v5 ")?; - for s in self.v6.iter() { - write!(f, "{} ", s)?; - } - Ok(()) - } -} - -//------------ MatchOptions / MatchType ------------------------------------- - -/// Options for the `match_prefix` method -/// -/// The `MatchOptions` struct is used to specify the options for the -/// `match_prefix` method on the store. -/// -/// Note that the `match_type` field may be different from the actual -/// `MatchType` returned from the result. -/// -/// See [MultiThreadedStore::match_prefix] for more details. -#[derive(Debug, Clone)] -pub struct MatchOptions { - /// The requested [MatchType] - pub match_type: MatchType, - /// Unused - pub include_withdrawn: bool, - /// Whether to include all less-specific records in the query result - pub include_less_specifics: bool, - // Whether to include all more-specific records in the query result - pub include_more_specifics: bool, - /// Whether to return records for a specific multi_uniq_id, None indicates - /// all records. - pub mui: Option -} - -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum MatchType { - ExactMatch, - LongestMatch, - EmptyMatch, -} - -impl MatchType { - pub fn is_empty(&self) -> bool { - matches!(self, Self::EmptyMatch) - } -} - -impl std::fmt::Display for MatchType { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - MatchType::ExactMatch => write!(f, "exact-match"), - MatchType::LongestMatch => write!(f, "longest-match"), - MatchType::EmptyMatch => write!(f, "empty-match"), - } - } -} - - -//------------ PrefixRecordIter --------------------------------------------- - -// Converts from the InternalPrefixRecord to the (public) PrefixRecord -// while iterating. -#[derive(Clone, Debug)] -pub struct PrefixSingleRecordIter<'a, M: Meta> { - pub(crate) v4: Option>>, - pub(crate) v6: slice::Iter<'a, InternalPrefixRecord>, -} - -impl<'a, M: Meta> Iterator - for PrefixSingleRecordIter<'a, M> -{ - type Item = PublicPrefixSingleRecord; - - fn next(&mut self) -> Option { - // V4 is already done. - if self.v4.is_none() { - return self.v6.next().map(|res| { - PublicPrefixSingleRecord::new( - Prefix::new(res.net.into_ipaddr(), res.len).unwrap(), - res.meta.clone(), - ) - }); - } - - if let Some(res) = self.v4.as_mut().and_then(|v4| v4.next()) { - return Some(PublicPrefixSingleRecord::new( - Prefix::new(res.net.into_ipaddr(), res.len).unwrap(), - res.meta.clone(), - )); - } - self.v4 = None; - self.next() - } -} - - -//------------- QueryResult ------------------------------------------------- - -/// The type that is returned by a query. -/// -/// This is the result type of a query. It contains the prefix record that was -/// found in the store, as well as less- or more-specifics as requested. -/// -/// See [MultiThreadedStore::match_prefix] for more details. - - -#[derive(Clone, Debug)] -pub struct QueryResult { - /// The match type of the resulting prefix - pub match_type: MatchType, - /// The resulting prefix record - pub prefix: Option, - /// The meta data associated with the resulting prefix record - pub prefix_meta: Vec>, - /// The less-specifics of the resulting prefix together with their meta data - pub less_specifics: Option>, - /// The more-specifics of the resulting prefix together with their meta data - pub more_specifics: Option>, -} - -impl fmt::Display for QueryResult { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let pfx_str = match self.prefix { - Some(pfx) => format!("{}", pfx), - None => "".to_string(), - }; - // let pfx_meta_str = match &self.prefix_meta { - // Some(pfx_meta) => format!("{}", pfx_meta), - // None => "".to_string(), - // }; - writeln!(f, "match_type: {}", self.match_type)?; - writeln!(f, "prefix: {}", pfx_str)?; - write!(f, "meta: [ ")?; - for rec in &self.prefix_meta { - write!(f, "{},", rec)?; - } - writeln!(f, " ]")?; - writeln!(f, "less_specifics: {{ {} }}", if let Some(ls) = self.less_specifics.as_ref() { - format!("{}", ls) - } else { - "".to_string() - })?; - writeln!(f, "more_specifics: {{ {} }}", if let Some(ms) = self.more_specifics.as_ref() { - format!("{}", ms) - } else { - "".to_string() - }) - } -} diff --git a/src/stats.rs b/src/stats.rs deleted file mode 100644 index 8ac46c05..00000000 --- a/src/stats.rs +++ /dev/null @@ -1,154 +0,0 @@ -//------------ Types for Statistics ----------------------------------------- - -use crate::stride::{Stride3, Stride4, Stride5, Stride6, Stride7, Stride8}; -use std::fmt::{Debug, Display}; - -#[derive(Debug, Copy, Clone)] -pub enum SizedStride { - Stride3, - Stride4, - Stride5, - Stride6, - Stride7, - Stride8, -} -pub struct StrideStats { - pub stride_type: SizedStride, - pub stride_size: usize, - pub stride_len: u8, - pub node_size: usize, - pub created_nodes: Vec, - pub prefixes_num: Vec, -} - -impl StrideStats { - pub fn new(stride_type: SizedStride, num_depth_levels: u8) -> Self { - match stride_type { - SizedStride::Stride3 => Self { - stride_type: SizedStride::Stride3, - stride_size: 16, - stride_len: 3, - node_size: std::mem::size_of::(), - created_nodes: Self::nodes_vec(num_depth_levels), - prefixes_num: Self::nodes_vec(num_depth_levels), - }, - SizedStride::Stride4 => Self { - stride_type: SizedStride::Stride4, - stride_size: 32, - stride_len: 4, - node_size: std::mem::size_of::(), - created_nodes: Self::nodes_vec(num_depth_levels), - prefixes_num: Self::nodes_vec(num_depth_levels), - }, - SizedStride::Stride5 => Self { - stride_type: SizedStride::Stride5, - stride_size: 64, - stride_len: 5, - node_size: std::mem::size_of::(), - created_nodes: Self::nodes_vec(num_depth_levels), - prefixes_num: Self::nodes_vec(num_depth_levels), - }, - SizedStride::Stride6 => Self { - stride_type: SizedStride::Stride6, - stride_size: 128, - stride_len: 6, - node_size: std::mem::size_of::(), - created_nodes: Self::nodes_vec(num_depth_levels), - prefixes_num: Self::nodes_vec(num_depth_levels), - }, - SizedStride::Stride7 => Self { - stride_type: SizedStride::Stride7, - stride_size: 256, - stride_len: 7, - node_size: std::mem::size_of::(), - created_nodes: Self::nodes_vec(num_depth_levels), - prefixes_num: Self::nodes_vec(num_depth_levels), - }, - SizedStride::Stride8 => Self { - stride_type: SizedStride::Stride8, - stride_size: 512, - stride_len: 8, - node_size: std::mem::size_of::(), - created_nodes: Self::nodes_vec(num_depth_levels), - prefixes_num: Self::nodes_vec(num_depth_levels), - }, - } - } - - pub fn mem_usage(&self) -> usize { - self.stride_size - * self.created_nodes.iter().fold(0, |mut acc, c| { - acc += c.count; - acc - }) - } - - fn nodes_vec(num_depth_levels: u8) -> Vec { - let mut vec: Vec = vec![]; - for n in 0..num_depth_levels { - vec.push(CreatedNodes { - depth_level: n, - count: 0, - }) - } - vec - } - - pub fn inc(&mut self, depth_level: u8) { - self.created_nodes[depth_level as usize].count += 1; - } - - pub fn inc_prefix_count(&mut self, depth_level: u8) { - self.prefixes_num[depth_level as usize].count += 1; - } -} - -impl Debug for StrideStats { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{:?}:{:>8?} {:?} ({}k)", - &self.stride_type, - &self.created_nodes.iter().fold(0, |mut a, n| { - a += n.count; - a - }), - &self.created_nodes, - &self.mem_usage() / 1024 - ) - } -} - -impl Display for StrideStats { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{:?}:{:>8?} {:?} ({}k)", - &self.stride_type, - &self.created_nodes.iter().fold(0, |mut a, n| { - a += n.count; - a - }), - &self.created_nodes, - &self.mem_usage() / 1024 - ) - } -} - -#[derive(Copy, Clone)] -pub struct CreatedNodes { - pub depth_level: u8, - pub count: usize, -} - -impl CreatedNodes { - pub fn add(mut self, num: usize) { - self.count += num; - } -} - -impl Debug for CreatedNodes { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - f.write_fmt(format_args!("/{}: {}", &self.depth_level, &self.count)) - } -} diff --git a/src/stride.rs b/src/stride.rs deleted file mode 100644 index e3e4f771..00000000 --- a/src/stride.rs +++ /dev/null @@ -1,262 +0,0 @@ -use crate::impl_primitive_stride; -use crate::synth_int::{U256, U512}; -use std::fmt::{Binary, Debug}; - -pub type Stride3 = u16; -pub type Stride4 = u32; -pub type Stride5 = u64; -pub type Stride6 = u128; -pub type Stride7 = U256; -pub type Stride8 = U512; - -pub trait Stride: - Sized + Debug + Binary + Eq + PartialOrd + PartialEq + Copy -{ - type PtrSize; - const BITS: u8; - const STRIDE_LEN: u8; - - // Get the bit position of the start of the given nibble. - // The nibble is defined as a `len` number of bits set from the right. - // bit_pos always has only one bit set in the complete array. - // e.g.: - // len: 4 - // nibble: u16 = 0b0000 0000 0000 0111 - // bit_pos: u16 = 0b0000 0000 0000 1000 - - // `::BITS` - // is the whole length of the bitmap, since we are shifting to the left, - // we have to start at the end of the bitmap. - // `((1 << len) - 1)` - // is the offset for this nibble length in the bitmap. - // `nibble` - // shifts to the right position withing the bit range for this nibble - // length, this follows from the fact that the `nibble` value represents - // *both* the bitmap part, we're considering here *and* the position - // relative to the nibble length offset in the bitmap. - fn get_bit_pos(nibble: u32, len: u8) -> Self; - - // Clear the bitmap to the right of the pointer and count the number of ones. - // This numbder represents the index to the corresponding prefix in the pfx_vec. - - // Clearing is performed by shifting to the right until we have the nibble - // all the way at the right. - - // `(::BITS >> 1)` - // The end of the bitmap (this bitmap is half the size of the pfx bitmap) - - // `nibble` - // The bit position relative to the offset for the nibble length, this index - // is only used at the last (relevant) stride, so the offset is always 0. - fn get_pfx_index(bitmap: Self, nibble: u32, len: u8) -> usize; - - // Clear the bitmap to the right of the pointer and count the number of ones. - // This number represents the index to the corresponding child node in the ptr_vec. - - // Clearing is performed by shifting to the right until we have the nibble - // all the way at the right. - - // For ptrbitarr the only index we want is the one for a full-length nibble - // (stride length) at the last stride, so we don't need the length of the nibble - - // `(::BITS >> 1)` - // The end of the bitmap (this bitmap is half the size of the pfx bitmap), - // ::BITS is the size of the pfx bitmap. - - // `nibble` - // The bit position relative to the offset for the nibble length, this index - // is only used at the last (relevant) stride, so the offset is always 0. - fn get_ptr_index(bitmap: Self::PtrSize, nibble: u32) -> usize; - - // Convert a ptrbitarr into a pfxbitarr sized bitmap, - // so we can do bitwise operations with a pfxbitarr sized - // bitmap on them. - // Since the last bit in the pfxbitarr isn't used, but the - // full ptrbitarr *is* used, the prtbitarr should be shifted - // one bit to the left. - #[allow(clippy::wrong_self_convention)] - fn into_stride_size(bitmap: Self::PtrSize) -> Self; - - // Convert a pfxbitarr sized bitmap into a ptrbitarr sized - // Note that bitwise operators align bits of unsigend types with different - // sizes to the right, so we don't have to do anything to pad the smaller sized - // type. We do have to shift one bit to the left, to accomodate the unused pfxbitarr's - // last bit. - fn into_ptrbitarr_size(bitmap: Self) -> Self::PtrSize; - - fn leading_zeros(self) -> u32; -} - -impl_primitive_stride![3; 16; u16; u8]; -impl_primitive_stride![4; 32; u32; u16]; -impl_primitive_stride![5; 64; u64; u32]; -impl_primitive_stride![6; 128; u128; u64]; - -impl Stride for Stride7 { - type PtrSize = u128; - const BITS: u8 = 255; - const STRIDE_LEN: u8 = 7; - - fn get_bit_pos(nibble: u32, len: u8) -> Self { - match 256 - ((1 << len) - 1) as u16 - nibble as u16 - 1 { - n if n < 128 => U256(0, 1 << n), - n => U256(1 << (n - 128), 0), - } - } - - fn get_pfx_index(bitmap: Self, nibble: u32, len: u8) -> usize { - let n = 256 - ((1 << len) - 1) as u16 - nibble as u16 - 1; - match n { - // if we move less than 128 bits to the right, - // all of bitmap.0 and a part of bitmap.1 will be used for counting zeros - // ex. - // ...1011_1010... >> 2 => ...0010_111010... - // ____ ==== -- --==== - n if n < 128 => { - bitmap.0.count_ones() as usize - + (bitmap.1 >> n).count_ones() as usize - - 1 - } - // if we move more than 128 bits to the right, - // all of bitmap.1 wil be shifted out of sight, - // so we only have to count bitmap.0 zeroes than (after shifting of course). - n => (bitmap.0 >> (n - 128)).count_ones() as usize - 1, - } - } - - fn get_ptr_index(bitmap: Self::PtrSize, nibble: u32) -> usize { - (bitmap >> ((256 >> 1) - nibble as u16 - 1) as usize).count_ones() - as usize - - 1 - } - - fn into_stride_size(bitmap: Self::PtrSize) -> Self { - // One bit needs to move into the self.0 u128, - // since the last bit of the *whole* bitmap isn't used. - U256(bitmap >> 127, bitmap << 1) - } - - fn into_ptrbitarr_size(bitmap: Self) -> Self::PtrSize { - // TODO expand: - // self.ptrbitarr = - // S::into_ptrbitarr_size(bit_pos | S::into_stride_size(self.ptrbitarr)); - bitmap.0 << 127 | bitmap.1 >> 1 - } - - #[inline] - fn leading_zeros(self) -> u32 { - let lz = self.0.leading_zeros(); - if lz == 128 { - lz + self.1.leading_zeros() - } else { - lz - } - } -} - -impl Stride for Stride8 { - type PtrSize = U256; - const BITS: u8 = 255; // bogus - const STRIDE_LEN: u8 = 8; - - fn get_bit_pos(nibble: u32, len: u8) -> Self { - match 512 - ((1 << len) - 1) as u16 - nibble as u16 - 1 { - n if n < 128 => U512(0, 0, 0, 1 << n), - n if n < 256 => U512(0, 0, 1 << (n - 128), 0), - n if n < 384 => U512(0, 1 << (n - 256), 0, 0), - n => U512(1 << (n - 384), 0, 0, 0), - } - } - - fn get_pfx_index(bitmap: Self, nibble: u32, len: u8) -> usize { - let n = 512 - ((1 << len) - 1) as u16 - nibble as u16 - 1; - match n { - // if we move less than 128 bits to the right, all of bitmap.2 - // and a part of bitmap.3 will be used for counting zeros. - // ex. - // ...1011_1010... >> 2 => ...0010_111010... - // ____ ==== -- --==== - n if n < 128 => { - bitmap.0.count_ones() as usize - + bitmap.1.count_ones() as usize - + bitmap.2.count_ones() as usize - + (bitmap.3 >> n).count_ones() as usize - - 1 - } - - n if n < 256 => { - bitmap.0.count_ones() as usize - + bitmap.1.count_ones() as usize - + (bitmap.2 >> (n - 128)).count_ones() as usize - - 1 - } - - n if n < 384 => { - bitmap.0.count_ones() as usize - + (bitmap.1 >> (n - 256)).count_ones() as usize - - 1 - } - - // if we move more than 384 bits to the right, all of bitmap. - // [1,2,3] will be shifted out of sight, so we only have to count - // bitmap.0 zeroes then (after shifting of course). - n => (bitmap.0 >> (n - 384)).count_ones() as usize - 1, - } - } - - fn get_ptr_index(bitmap: Self::PtrSize, nibble: u32) -> usize { - let n = (512 >> 1) - nibble as u16 - 1; - match n { - // if we move less than 256 bits to the right, all of bitmap.0 - // and a part of bitmap.1 will be used for counting zeros - // ex. - // ...1011_1010... >> 2 => ...0010_111010... - // ____ ==== -- --==== - n if n < 128 => { - bitmap.0.count_ones() as usize - + (bitmap.1 >> n).count_ones() as usize - - 1 - } - // if we move more than 256 bits to the right, all of bitmap.1 - // wil be shifted out of sight, so we only have to count bitmap.0 - // zeroes than (after) shifting of course). - n => (bitmap.0 >> (n - 128)).count_ones() as usize - 1, - } - } - - fn into_stride_size(bitmap: Self::PtrSize) -> Self { - // One bit needs to move into the self.0 u128, - // since the last bit of the *whole* bitmap isn't used. - U512( - 0, - bitmap.0 >> 127, - (bitmap.0 << 1) | (bitmap.1 >> 127), - bitmap.1 << 1, - ) - } - - fn into_ptrbitarr_size(bitmap: Self) -> Self::PtrSize { - // TODO expand: - // self.ptrbitarr = - // S::into_ptrbitarr_size(bit_pos | S::into_stride_size(self.ptrbitarr)); - U256( - bitmap.1 << 127 | bitmap.2 >> 1, - bitmap.2 << 127 | bitmap.3 >> 1, - ) - } - - #[inline] - fn leading_zeros(self) -> u32 { - let mut lz = self.0.leading_zeros(); - if lz == 128 { - lz += self.1.leading_zeros(); - if lz == 256 { - lz += self.2.leading_zeros(); - if lz == 384 { - lz += self.3.leading_zeros(); - } - } - } - lz - } -} diff --git a/src/synth_int.rs b/src/synth_int.rs deleted file mode 100644 index b8601fbc..00000000 --- a/src/synth_int.rs +++ /dev/null @@ -1,444 +0,0 @@ -use std::cmp::Ordering; -use std::convert::TryInto; -use std::fmt::{Binary, Debug}; -use std::sync::atomic::AtomicU64; - -use crate::af::Zero; - -//------------ U256 synthetic integer type ---------------------------------- - -#[derive(Copy, Clone)] -pub struct U256(pub u128, pub u128); - -impl U256 { - pub fn to_be_bytes(self) -> [u8; 32] { - [self.0.to_be_bytes(), self.1.to_be_bytes()] - .concat() - .try_into() - .expect("U256 with incorrect length.") - } - - pub fn from_bytes(bytes: &[u8]) -> U256 { - let nibble1: u128 = u128::from_be_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], - bytes[6], bytes[7], bytes[8], bytes[9], bytes[10], bytes[11], - bytes[12], bytes[13], bytes[14], bytes[15], - ]); - let nibble2: u128 = u128::from_be_bytes([ - bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], - bytes[22], bytes[23], bytes[24], bytes[25], bytes[26], bytes[27], - bytes[28], bytes[29], bytes[30], bytes[31], - ]); - U256(nibble1, nibble2) - } -} - -impl Debug for U256 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!( - "{:0128b}\n {:0128b}", - self.0, self.1 - )) - } -} - -impl Eq for U256 {} - -impl std::ops::Add for U256 { - type Output = Self; - - fn add(self, other: Self) -> Self { - U256(self.0.wrapping_add(other.0), self.1.wrapping_add(other.1)) - } -} - -impl Zero for U256 { - fn zero() -> Self { - U256(0_u128, 0_u128) - } - fn is_zero(&self) -> bool { - self.0 == 0_u128 && self.1 == 0_u128 - } -} - -impl Binary for U256 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Binary::fmt(&self, f) - } -} - -impl PartialOrd for U256 { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl Ord for U256 { - fn cmp(&self, other: &Self) -> Ordering { - match (self.0, &other.0) { - (a, b) if &a > b => self.0.cmp(&other.0), - _ => self.1.cmp(&other.1), - } - } -} - -impl std::ops::BitOr for U256 { - type Output = Self; - fn bitor(self, rhs: Self) -> Self::Output { - Self(self.0 | rhs.0, self.1 | rhs.1) - } -} - -impl std::ops::BitAnd for U256 { - type Output = Self; - fn bitand(self, rhs: Self) -> Self::Output - where - Self: Eq, - { - Self(self.0 & rhs.0, self.1 & rhs.1) - } -} - -impl PartialEq for U256 { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 && self.1 == other.1 - } -} - -//------------ U512 Synthetic Integer Type ---------------------------------- - -#[derive(Debug, Copy, Clone)] -pub struct U512(pub u128, pub u128, pub u128, pub u128); - -impl U512 { - pub fn to_be_bytes(self) -> [u8; 64] { - [ - self.0.to_be_bytes(), - self.1.to_be_bytes(), - self.2.to_be_bytes(), - self.3.to_be_bytes(), - ] - .concat() - .try_into() - .expect("U512 with incorrect length.") - } - - pub fn from_bytes(bytes: &[u8]) -> U512 { - let nibble1: u128 = u128::from_be_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], - bytes[6], bytes[7], bytes[8], bytes[9], bytes[10], bytes[11], - bytes[12], bytes[13], bytes[14], bytes[15], - ]); - let nibble2: u128 = u128::from_be_bytes([ - bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], - bytes[22], bytes[23], bytes[24], bytes[25], bytes[26], bytes[27], - bytes[28], bytes[29], bytes[30], bytes[31], - ]); - let nibble3: u128 = u128::from_be_bytes([ - bytes[32], bytes[33], bytes[34], bytes[35], bytes[36], bytes[37], - bytes[38], bytes[39], bytes[40], bytes[41], bytes[42], bytes[43], - bytes[44], bytes[45], bytes[46], bytes[47], - ]); - let nibble4: u128 = u128::from_be_bytes([ - bytes[48], bytes[49], bytes[50], bytes[51], bytes[52], bytes[53], - bytes[54], bytes[55], bytes[56], bytes[57], bytes[58], bytes[59], - bytes[60], bytes[61], bytes[62], bytes[63], - ]); - U512(nibble1, nibble2, nibble3, nibble4) - } -} - -impl PartialOrd for U512 { - fn partial_cmp(&self, other: &Self) -> Option { - match (self.0, &other.0) { - (a, b) if &a > b => Some(self.0.cmp(&other.0)), - _ => match (self.1, &other.1) { - (a, b) if &a > b => Some(self.1.cmp(&other.1)), - _ => match (self.2, &other.2) { - (a, b) if &a > b => Some(self.2.cmp(&other.2)), - _ => Some(self.3.cmp(&other.3)), - }, - }, - } - } -} - -impl PartialEq for U512 { - fn eq(&self, other: &Self) -> bool { - self.0 == other.0 - && self.1 == other.1 - && self.2 == other.2 - && self.3 == other.3 - } -} - -impl Eq for U512 {} - -impl std::ops::Add for U512 { - type Output = Self; - fn add(self, rhs: Self) -> Self::Output { - Self( - self.0 + rhs.0, - self.1 + rhs.1, - self.2 + rhs.2, - self.3 + rhs.3, - ) - } -} - -impl Zero for U512 { - fn zero() -> Self { - U512(0_u128, 0_u128, 0_u128, 0_u128) - } - fn is_zero(&self) -> bool { - self.0 == 0_u128 - && self.1 == 0_u128 - && self.2 == 0_u128 - && self.3 == 0_u128 - } -} - -impl Binary for U512 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Binary::fmt(&self, f) - } -} - -impl std::ops::BitOr for U512 { - type Output = Self; - fn bitor(self, rhs: Self) -> Self::Output { - Self( - self.0 | rhs.0, - self.1 | rhs.1, - self.2 | rhs.2, - self.3 | rhs.3, - ) - } -} - -impl std::ops::BitAnd for U512 { - type Output = Self; - fn bitand(self, rhs: Self) -> Self::Output - where - Self: Eq, - { - Self( - self.0 & rhs.0, - self.1 & rhs.1, - self.2 & rhs.2, - self.3 & rhs.3, - ) - } -} - -//------------ Atomic U128 Synthetic Integer Type ------------------------------------- - -#[allow(dead_code)] -pub struct AtomicU128(pub AtomicU64, pub AtomicU64); - -#[allow(dead_code)] -impl AtomicU128 { - pub fn new(value: u128) -> Self { - let (hi, lo) = - (((value << 64) >> 64) as u64, ((value >> 64) << 64) as u64); - AtomicU128(AtomicU64::new(hi), AtomicU64::new(lo)) - } - - pub fn into_be_bytes(self) -> [u8; 16] { - [ - self.0.into_inner().to_be_bytes(), - self.1.into_inner().to_be_bytes(), - ] - .concat() - .try_into() - .expect("AtomicU128 with incorrect length.") - } - - pub fn from_bytes(bytes: &[u8]) -> Self { - let hi = u64::from_be_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], - bytes[6], bytes[7], - ]); - let lo = u64::from_be_bytes([ - bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], - bytes[14], bytes[15], - ]); - AtomicU128(AtomicU64::new(hi), AtomicU64::new(lo)) - } -} - -impl Debug for AtomicU128 { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.write_fmt(format_args!( - "{:016x}\n{:016x}", - self.0.load(std::sync::atomic::Ordering::SeqCst), - self.1.load(std::sync::atomic::Ordering::SeqCst) - )) - } -} - -//------------ Atomic U256 Synthetic Integer Type ------------------------------------- - -#[allow(dead_code)] -pub struct AtomicU256( - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, -); - -#[allow(dead_code)] -impl AtomicU256 { - pub fn new(value: U256) -> Self { - let (hihi, hilo, lohi, lolo) = ( - ((value.0 << 64) >> 64) as u64, - ((value.0 >> 64) << 64) as u64, - ((value.1 << 64) >> 64) as u64, - ((value.1 >> 64) << 64) as u64, - ); - AtomicU256( - AtomicU64::new(hihi), - AtomicU64::new(hilo), - AtomicU64::new(lohi), - AtomicU64::new(lolo), - ) - } - - pub fn into_be_bytes(self) -> [u8; 32] { - [ - self.0.into_inner().to_be_bytes(), - self.1.into_inner().to_be_bytes(), - self.2.into_inner().to_be_bytes(), - self.3.into_inner().to_be_bytes(), - ] - .concat() - .try_into() - .expect("AtomicU256 with incorrect length.") - } - - pub fn from_bytes(bytes: &[u8]) -> Self { - let hihi = u64::from_be_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], - bytes[6], bytes[7], - ]); - let hilo = u64::from_be_bytes([ - bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], - bytes[14], bytes[15], - ]); - let lohi = u64::from_be_bytes([ - bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], - bytes[22], bytes[23], - ]); - let lolo = u64::from_be_bytes([ - bytes[24], bytes[25], bytes[26], bytes[27], bytes[28], bytes[29], - bytes[30], bytes[31], - ]); - AtomicU256( - AtomicU64::new(hihi), - AtomicU64::new(hilo), - AtomicU64::new(lohi), - AtomicU64::new(lolo), - ) - } -} - -//------------ Atomic U512 Synthetic Integer Type ------------------------------------- - -#[allow(dead_code)] -pub struct AtomicU512( - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, - pub AtomicU64, -); - -#[allow(dead_code)] -impl AtomicU512 { - pub fn new(value: U512) -> Self { - let (hihihi, hihilo, hilohi, hilolo, lohihi, lohilo, lolohi, lololo) = ( - ((value.0 << 64) >> 64) as u64, - ((value.0 >> 64) << 64) as u64, - ((value.1 << 64) >> 64) as u64, - ((value.1 >> 64) << 64) as u64, - ((value.2 << 64) >> 64) as u64, - ((value.2 >> 64) << 64) as u64, - ((value.3 << 64) >> 64) as u64, - ((value.3 >> 64) << 64) as u64, - ); - AtomicU512( - AtomicU64::new(hihihi), - AtomicU64::new(hihilo), - AtomicU64::new(hilohi), - AtomicU64::new(hilolo), - AtomicU64::new(lohihi), - AtomicU64::new(lohilo), - AtomicU64::new(lolohi), - AtomicU64::new(lololo), - ) - } - - pub fn into_be_bytes(self) -> [u8; 64] { - [ - self.0.into_inner().to_be_bytes(), - self.1.into_inner().to_be_bytes(), - self.2.into_inner().to_be_bytes(), - self.3.into_inner().to_be_bytes(), - self.4.into_inner().to_be_bytes(), - self.5.into_inner().to_be_bytes(), - self.6.into_inner().to_be_bytes(), - self.7.into_inner().to_be_bytes(), - ] - .concat() - .try_into() - .expect("AtomicU512 with incorrect length.") - } - - pub fn from_bytes(bytes: &[u8]) -> Self { - let hihihi = u64::from_be_bytes([ - bytes[0], bytes[1], bytes[2], bytes[3], bytes[4], bytes[5], - bytes[6], bytes[7], - ]); - let hihilo = u64::from_be_bytes([ - bytes[8], bytes[9], bytes[10], bytes[11], bytes[12], bytes[13], - bytes[14], bytes[15], - ]); - let hilohi = u64::from_be_bytes([ - bytes[16], bytes[17], bytes[18], bytes[19], bytes[20], bytes[21], - bytes[22], bytes[23], - ]); - let hilolo = u64::from_be_bytes([ - bytes[24], bytes[25], bytes[26], bytes[27], bytes[28], bytes[29], - bytes[30], bytes[31], - ]); - let lohihi = u64::from_be_bytes([ - bytes[32], bytes[33], bytes[34], bytes[35], bytes[36], bytes[37], - bytes[38], bytes[39], - ]); - let lohilo = u64::from_be_bytes([ - bytes[40], bytes[41], bytes[42], bytes[43], bytes[44], bytes[45], - bytes[46], bytes[47], - ]); - let lolohi = u64::from_be_bytes([ - bytes[48], bytes[49], bytes[50], bytes[51], bytes[52], bytes[53], - bytes[54], bytes[55], - ]); - let lololo = u64::from_be_bytes([ - bytes[56], bytes[57], bytes[58], bytes[59], bytes[60], bytes[61], - bytes[62], bytes[63], - ]); - AtomicU512( - AtomicU64::new(hihihi), - AtomicU64::new(hihilo), - AtomicU64::new(hilohi), - AtomicU64::new(hilolo), - AtomicU64::new(lohihi), - AtomicU64::new(lohilo), - AtomicU64::new(lolohi), - AtomicU64::new(lololo), - ) - } -} diff --git a/src/tree_bitmap/atomic_bitmap.rs b/src/tree_bitmap/atomic_bitmap.rs new file mode 100644 index 00000000..27071ca3 --- /dev/null +++ b/src/tree_bitmap/atomic_bitmap.rs @@ -0,0 +1,130 @@ +use parking_lot_core::SpinWait; +use std::fmt::{Binary, Debug}; +use std::sync::atomic::{fence, AtomicU16, AtomicU32, Ordering}; + +use crate::types::BitSpan; + +use super::tree_bitmap_node; + +pub struct AtomicPtrBitArr(pub AtomicU16); +pub struct AtomicPfxBitArr(pub AtomicU32); + +pub struct CasResult(pub Result); + +pub(crate) trait AtomicBitmap +where + Self: From, +{ + type InnerType: Binary + + Copy + + Debug + + PartialOrd + + std::ops::BitAnd + + std::ops::BitOr + + std::ops::BitXor + + num_traits::PrimInt; + + fn new() -> Self; + fn compare_exchange( + &self, + current: Self::InnerType, + new: Self::InnerType, + ) -> CasResult; + fn load(&self) -> Self::InnerType; + fn merge_with(&self, node: Self::InnerType) { + let mut spinwait = SpinWait::new(); + let current = self.load(); + + fence(Ordering::Acquire); + + let mut new = current | node; + loop { + match self.compare_exchange(current, new) { + CasResult(Ok(_)) => { + return; + } + CasResult(Err(current)) => { + new = current | node; + } + } + spinwait.spin_no_yield(); + } + } +} + +impl AtomicPtrBitArr { + pub(crate) fn ptr_range(&self, bs: BitSpan) -> (u16, u8) { + let ptrbitarr = self.load(); + tree_bitmap_node::ptr_range(ptrbitarr, bs) + } + + pub(crate) fn as_stride_size(&self) -> u32 { + (self.load() as u32) << 1 + } +} + +impl AtomicBitmap for AtomicPtrBitArr { + type InnerType = u16; + + fn new() -> Self { + AtomicPtrBitArr(AtomicU16::new(0)) + } + fn compare_exchange( + &self, + current: Self::InnerType, + new: Self::InnerType, + ) -> CasResult { + CasResult(self.0.compare_exchange( + current, + new, + Ordering::Acquire, + Ordering::Relaxed, + )) + } + + fn load(&self) -> Self::InnerType { + self.0.load(Ordering::Relaxed) + } +} + +impl From for AtomicPtrBitArr { + fn from(value: u16) -> Self { + Self(AtomicU16::new(value)) + } +} + +impl AtomicPfxBitArr { + pub(crate) fn ms_pfx_mask(&self, bs: BitSpan) -> u32 { + let pfxbitarr = self.load(); + tree_bitmap_node::ms_prefix_mask_arr(bs) & pfxbitarr + } +} + +impl AtomicBitmap for AtomicPfxBitArr { + type InnerType = u32; + + fn new() -> Self { + AtomicPfxBitArr(AtomicU32::new(0)) + } + fn compare_exchange( + &self, + current: Self::InnerType, + new: Self::InnerType, + ) -> CasResult { + CasResult(self.0.compare_exchange( + current, + new, + Ordering::Acquire, + Ordering::Relaxed, + )) + } + fn load(&self) -> Self::InnerType { + self.0.load(Ordering::Relaxed) + } +} + +impl From for AtomicPfxBitArr { + fn from(value: u32) -> Self { + Self(AtomicU32::new(value)) + } +} diff --git a/src/tree_bitmap/mod.rs b/src/tree_bitmap/mod.rs new file mode 100644 index 00000000..5cfa6931 --- /dev/null +++ b/src/tree_bitmap/mod.rs @@ -0,0 +1,1154 @@ +mod atomic_bitmap; +mod node_cht; + +mod tree_bitmap_iterators; +mod tree_bitmap_node; +mod tree_bitmap_query; + +pub(crate) use tree_bitmap_node::{ + NodeId, NodeMoreSpecificChildIter, NodeMoreSpecificsPrefixIter, + TreeBitMapNode, +}; +use zerocopy::FromZeros; + +// ----------- Dev Log for the RIB ------------------------------------------- +// +// The StarCastAfRib provides in-memory storage for the TreeBitMapNodes +// and for prefixes and their meta-data. The storage for node is on the +// `buckets` field, and the prefixes are stored in, well, the `prefixes` +// field. They are both organised in the same way, as chained hash tables, +// one per (prefix|node)-length. The hashing function (that is detailed +// lower down in this file), basically takes the address part of the +// node|prefix and uses `(node|prefix)-address part % bucket size` +// as its index. +// +// Both the prefixes and the buckets field have one bucket per (prefix|node) +// -length that start out with a fixed-size array. The size of the arrays is +// set in the rotonda_macros/maps.rs file. +// +// For lower (prefix|node)-lengths the number of elements in the array is +// equal to the number of prefixes in that length, so there's exactly one +// element per (prefix|node). For greater lengths there will be collisions, +// in that case the stored (prefix|node) will have a reference to another +// bucket (also of a fixed size), that holds a (prefix|node) that collided +// with the one that was already stored. A (node|prefix) lookup will have to +// go over all (node|prefix) buckets until it matches the requested (node| +// prefix) or it reaches the end of the chain. +// +// The chained (node|prefixes) are occupied at a first-come, first-serve +// basis, and are not re-ordered on new insertions of (node|prefixes). This +// may change in the future, since it prevents iterators from being ordered. +// +// One of the nice things of having one table per (node|prefix)-length is that +// a search can start directly at the prefix-length table it wishes, and go +// go up and down into other tables if it needs to (e.g., because more- or +// less-specifics were asked for). In contrast if you do a lookup by +// traversing the tree of nodes, we would always have to go through the root- +// node first and then go up the tree to the requested node. The lower nodes +// of the tree (close to the root) would be a formidable bottle-neck then. +// +// Previously, the meta-data was an atomically stored value, that was required +// to implement the `Meta` and the `Clone` trait. New meta-data instances were +// stored atomically without further ado, but updates to a piece of meta-data +// were done by merging the previous meta-data with the new meta-data, through +// use of the `MergeUpdate` trait. +// +// The `upsert_prefix` methods were used to retrieve only the most recent +// insert for a prefix (for now). +// +// Prefix example +// +// (level 0 arrays) prefixes bucket +// /len size +// ┌──┐ +// len /0 │ 0│ 1 1 ■ +// └──┘ │ +// ┌──┬──┐ │ +// len /1 │00│01│ 2 2 │ +// └──┴──┘ perfect +// ┌──┬──┬──┬──┐ hash +// len /2 │ │ │ │ │ 4 4 │ +// └──┴──┴──┴──┘ │ +// ┌──┬──┬──┬──┬──┬──┬──┬──┐ │ +// len /3 │ │ │ │ │ │ │ │ │ 8 8 ■ +// └──┴──┴──┴──┴──┴──┴──┴──┘ +// ┌──┬──┬──┬──┬──┬──┬──┬──┐ ┌────────────┐ +// len /4 │ │ │ │ │ │ │ │ │ 8 16 ◀────────│ collision │ +// └──┴──┴──┴┬─┴──┴──┴──┴──┘ └────────────┘ +// └───┐ +// │ ┌─collision─────────┐ +// ┌───▼───┐ │ │ +// │ │ ◀────────│ 0x0100 and 0x0101 │ +// │ 0x010 │ └───────────────────┘ +// │ │ +// ├───────┴──────────────┬──┬──┐ +// │ StoredPrefix 0x0101 │ │ │ +// └──────────────────────┴─┬┴─┬┘ +// │ │ +// ┌────────────────────┘ └──┐ +// ┌──────────▼──────────┬──┐ ┌─▼┬──┐ +// ┌─▶│ metadata (current) │ │ │ 0│ 1│ (level 1 array) +// │ └─────────────────────┴──┘ └──┴──┘ +// merge└─┐ │ │ +// update │ ┌────────────┘ │ +// │┌──────────▼──────────┬──┐ ┌───▼───┐ +// ┌─▶│ metadata (previous) │ │ │ │ +// │ └─────────────────────┴──┘ │ 0x0 │ +// merge└─┐ │ │ │ +// update │ ┌────────────┘ ├───────┴──────────────┬──┐ +// │┌──────────▼──────────┬──┐ │ StoredPrefix 0x0110 │ │ +// │ metadata (oldest) │ │ └──────────────────────┴──┘ +// └─────────────────────┴──┘ │ +// ┌─────────────┘ +// ┌──────────▼──────────────┐ +// │ metadata (current) │ +// └─────────────────────────┘ + +// Note about the memory usage of the data-structures of the Buckets +// +// As said, the prefixes and nodes are stored in buckets. A bucket right now +// is of type `[MaybeUnit>]`, this has the advantage +// that the length can be variable, based on the stride size for that level. +// It saves us to have to implement a generic something. +// Another advantage is the fixed place in which an atomic StoredPrefix +// lives: this makes compare-and-swapping it relatively straight forward. +// Each accessing thread would try to read/write the exact same entry in the +// array, so shouldn't be any 'rug pulling' on the whole array. +// +// A disadvantage is that this is a fixed size, sparse array the moment it +// is created. Theoretically, an `Atomic` +// would not have this disadvantage. Manipulating the whole vec atomically +// though is very tricky (we would have to atomically compare-and-swap the +// whole vec each time the prefix meta-data is changed) and inefficient, +// since we would have to either keep the vec sorted on `PrefixId` at all +// times, or, we would have to inspect each value in the vec on *every* read +// or write. the StoredPrefix (this is a challenge in itself, since the +// StoredPrefix needs to be read atomically to retrieve the PrefixId). +// Compare-and-swapping a whole vec most probably would need a hash over the +// vec to determine whether it was changed. I gave up on this approach, +// +// Another approach to try to limit the memory use is to try to use other +// indexes in the same array on collision (the array mentioned above), before +// heading off and following the reference to the next bucket. This would +// limit the amount of (sparse) arrays being created for a typical prefix +// treebitmap, at the cost of longer average search times. Two +// implementations of this approach are Cuckoo hashing[^1], and Skip Lists. +// Skip lists[^2] are a probabilistic data-structure, famously used by Redis, +// (and by TiKv). I haven't tries either of these. Crossbeam has a SkipList +// implementation, that wasn't ready at the time I wrote this. Cuckoo +// hashing has the advantage of being easier to understand/implement. Maybe +// Cuckoo hashing can also be combined with Fibonacci hashing[^3]. Note that +// Robin Hood hashing maybe faster than Cuckoo hashing for reads, but it +// requires shifting around existing entries, which is rather costly to do +// atomically (and complex). + +// [^1]: [https://en.wikipedia.org/wiki/Cuckoo_hashing] +// [^3]: [https://docs.rs/crossbeam-skiplist/0.1.1/crossbeam_skiplist/] +// [^3]: [https://probablydance.com/2018/06/16/fibonacci-hashing- +// the-optimization-that-the-world-forgot-or-a-better-alternative- +// to-integer-modulo/] + +// Notes on memory leaks in Rotonda-store +// +// Both valgrind and miri report memory leaks on the multi-threaded prefix +// store. Valgrind only reports it when it a binary stops using the tree, +// while still keeping it around. An interrupted use of the mt-prefix-store +// does not report any memory leaks. Miri is persistent in reporting memory +// leaks in the mt-prefix-store. They both report the memory leaks in the same +// location: the init method of the node- and prefix-buckets. +// +// I have reasons to believe these reported memory leaks aren't real, or that +// crossbeam-epoch leaks a bit of memory when creating a new `Atomic` +// instance. Since neither prefix nor node buckets can or should be dropped +// this is not a big issue anyway, it just means that an `Atomic` occupies +// more memory than it could in an optimal situation. Since we're not storing +// the actual meta-data in an `Atomic` (it is stored in an `flurry Map`), this +// means memory usage won't grow on updating the meta-data on a prefix, +// (unless the meta-data itself grows of course, but that's up to the user). +// +// To get a better understanding on the nature of the reported memory leaks I +// have created a branch (`vec_set`) that replaces the dynamically sized array +// with a (equally sparse) Vec, that is not filled with `Atomic:::null()`, but +// with `Option= 0.4 + +// The above scheme is outdated! After done a few day of bench marking, it was +// found that storing the meta-data in `RwLock` structures actually +// performs better in both time, and space. Also the overall performance +// is way more predictable and somewhat linearly related to the busyness of +// the whole system. Furthermore it was found that using RwLock around the +// HashMaps, instead of mutexes (from std) was around 2% slower at insert +// time, while we believe (we haven't tested this), that read performance will +// be superior to mutex. In terms of usability `RwLock` do not require +// the user to implement the RCU-style `MergeUpdate` trait (it is removed +// now). + +// Adding the possibilty of storing more than one piece of meta-data for a +// prefix (through the use the MUI), made the RCU style storing very awkward: +// all the previous pieces of meta-data (let's call them records), collected +// in a HashMap, needed to copied out of the store, modified, and copied back +// in, while being able to fail, and retried. Locking these HashMaps is way +// more efficient, both in time (copying costs time), and memory (copying, +// costs, well, memory). So what we have now, is a hybrid tree, where the +// "core" consists of RCU-style, lock-free nodes (that can't be deleted!), and +// locked structures at the "edges" (not leaves, because all nodes can carry +// meta-data). + +use crate::cht::{nodeset_size, prev_node_size, Cht, Value}; +use crate::errors::{FatalError, FatalResult}; +use crate::rib::STRIDE_SIZE; +use crate::stats::Counters; +use crate::types::{BitSpan, PrefixId}; +use crossbeam_epoch::{Atomic, Guard, Owned, Shared}; +use log::{debug, error, log_enabled, trace}; +use node_cht::{NodeCht, NodeSet, StoredNode}; +use roaring::RoaringBitmap; +use tree_bitmap_node::NewNodeOrIndex; + +use std::sync::atomic::{AtomicBool, AtomicU16, AtomicU32, Ordering}; +use std::{fmt::Debug, marker::PhantomData}; + +use crate::types::AddressFamily; +use atomic_bitmap::{AtomicBitmap, AtomicPfxBitArr, AtomicPtrBitArr}; + +use crate::types::errors::PrefixStoreError; + +#[cfg(feature = "cli")] +use ansi_term::Colour; + +//--------------------- TreeBitMap ------------------------------------------ + +// The tree that holds the existence information for all prefixes for all +//strategies. This tree is also used to find all less- and more-specifics and +//iterate over them. It also holds a bitmap that contains RIB-wide withdrawn +//muis (peers in most cases). +#[derive(Debug)] +pub(crate) struct TreeBitMap { + // the chained hash table that backs the treebitmap + node_cht: NodeCht, + // the bitmap that holds RIB-wide withdrawn muis (e.g. peers) + withdrawn_muis_bmin: Atomic, + // number of prefixes in the store, etc. + counters: Counters, + // see the rant on update_default_route_prefix_meta + default_route_exists: AtomicBool, +} + +impl TreeBitMap { + pub(crate) fn new() -> Result> { + let tree_bitmap = Self { + node_cht: Cht::init(), + withdrawn_muis_bmin: RoaringBitmap::new().into(), + counters: Counters::default(), + default_route_exists: AtomicBool::new(false), + }; + + let _retry_count = tree_bitmap + .store_node( + NodeId::dangerously_new_with_id_as_is( + ::new_zeroed(), + 0, + ), + 0_u32, + TreeBitMapNode { + ptrbitarr: AtomicPtrBitArr(AtomicU16::new(0)), + pfxbitarr: AtomicPfxBitArr(AtomicU32::new(0)), + _af: PhantomData, + }, + ) + .map_err(|_| "Cannot create root for in memory tree")?; + + Ok(tree_bitmap) + } + + // Sets the bit for the requested prefix to 1 in the corresponding + // pfxbitarr in the tree. + // + // returns a Result over a tuple of (retry_count, existed), where + // `retry_count` is the accumulated number of times all the atomic + // operations involved had to be retried. + pub(crate) fn set_prefix_exists( + &self, + pfx: PrefixId, + mui: u32, + ) -> Result<(u32, bool), PrefixStoreError> { + if pfx.len() == 0 { + let prefix_new = + !self.default_route_exists.swap(true, Ordering::Acquire); + return self + .update_default_route_prefix_meta(mui) + .map(|(rc, _mui_exists)| (rc, !prefix_new)) + .map_err(|_| PrefixStoreError::StoreNotReadyError); + } + + let mut stride_end: u8 = 0; + let mut cur_i = self.get_root_node_id(); + // let mut level: u8 = 0; + let mut acc_retry_count = 0; + + let retry_and_exists = loop { + stride_end += STRIDE_SIZE; + let nibble_len = if pfx.len() < stride_end { + STRIDE_SIZE + pfx.len() - stride_end + } else { + STRIDE_SIZE + }; + let bit_span = AF::into_bit_span( + pfx.bits(), + stride_end - STRIDE_SIZE, + nibble_len, + ); + let is_last_stride = pfx.len() <= stride_end; + let stride_start = stride_end - STRIDE_SIZE; + + let node_result = { + let local_retry_count = 0; + // retrieve_node_mut updates the bitmap index if + // necessary. + if let Some(current_node) = self.retrieve_node_mut(cur_i, mui) + { + match current_node.eval_node_or_prefix_at( + bit_span, + // All the bits of the search prefix, but with + // a length set to the start of the current + // stride. + NodeId::dangerously_new_with_id_as_is( + pfx.bits(), + stride_start, + ), + is_last_stride, + ) { + (NewNodeOrIndex::NewNode(n), retry_count) => { + // Stride3 logs to stats[0], Stride4 logs + // to stats[1], etc. + // $self.stats[$stats_level].inc($level); + + // get a new identifier for the node we're + // going to create. + let new_id = NodeId::new_with_cleaned_id( + pfx.bits(), + stride_start + bit_span.len, + ); + + // store the new node in the in_memory + // part of the RIB. It returns the created + // id and the number of retries before + // success. + match self.store_node(new_id, mui, n) { + Ok((node_id, s_retry_count)) => Ok(( + node_id, + acc_retry_count + + s_retry_count + + retry_count, + )), + Err(err) => Err(err), + } + } + ( + NewNodeOrIndex::ExistingNode(node_id), + retry_count, + ) => { + if log_enabled!(log::Level::Trace) + && local_retry_count > 0 + { + trace!( + "{} contention: Node already exists {}", + std::thread::current() + .name() + .unwrap_or("unnamed-thread"), + node_id + ) + } + Ok(( + node_id, + acc_retry_count + + local_retry_count + + retry_count, + )) + } + (NewNodeOrIndex::NewPrefix, retry_count) => { + break ( + acc_retry_count + + local_retry_count + + retry_count, + false, + ) + } + (NewNodeOrIndex::ExistingPrefix, retry_count) => { + break ( + acc_retry_count + + local_retry_count + + retry_count, + true, + ) + } + } + } else { + return Err(PrefixStoreError::NodeCreationMaxRetryError); + } + }; + + match node_result { + Ok((next_id, retry_count)) => { + cur_i = next_id; + // level += 1; + acc_retry_count += retry_count; + } + Err(err) => { + if log_enabled!(log::Level::Error) { + error!( + "{} failing to store (intermediate) node {}. +Giving up this node. This shouldn't happen!", + std::thread::current() + .name() + .unwrap_or("unnamed-thread"), + cur_i, + ); + error!( + "{} {}", + std::thread::current() + .name() + .unwrap_or("unnamed-thread"), + err + ); + } + } + } + }; + + Ok(retry_and_exists) + } + + pub fn prefix_exists(&self, prefix_id: PrefixId) -> bool { + trace!("pe exists {:?}?", prefix_id); + let (node_id, bs) = self.node_id_for_prefix(&prefix_id); + + match self.retrieve_node(node_id) { + Some(n) => { + let pfxbitarr = n.pfxbitarr.load(); + pfxbitarr & bs.into_bit_pos() > 0 + } + None => false, + } + } + + pub fn prefix_exists_for_mui( + &self, + prefix_id: PrefixId, + mui: u32, + ) -> bool { + trace!("pe exists {:?}?", prefix_id); + let (node_id, bs) = self.node_id_for_prefix(&prefix_id); + + match self.retrieve_node_for_mui(node_id, mui) { + Some(n) => { + let pfxbitarr = n.pfxbitarr.load(); + pfxbitarr & bs.into_bit_pos() > 0 + } + None => false, + } + } + + // Yes, we're hating this. But, the root node has no room for a serial of + // the prefix 0/0 (the default route), which doesn't even matter, unless, + // UNLESS, somebody wants to store a default route. So we have to store a + // serial for this prefix. The normal place for a serial of any prefix is + // on the pfxvec of its paren. But, hey, guess what, the + // default-route-prefix lives *on* the root node, and, you know, the root + // node doesn't have a parent. We can: + // - Create a type RootTreeBitmapNode with a ptrbitarr with a size one + // bigger than a "normal" TreeBitMapNod for the first stride size. no we + // have to iterate over the root-node type in all matches on + // stride_size, just because we have exactly one instance of the + // RootTreeBitmapNode. So no. + // - Make the `get_pfx_index` method on the implementations of the + // `Stride` trait check for a length of zero and branch if it is and + // return the serial of the root node. Now each and every call to this + // method will have to check a condition for exactly one instance of + // RootTreeBitmapNode. So again, no. + // - The root node only gets used at the beginning of a search query or an + // insert. So if we provide two specialised methods that will now how to + // search for the default-route prefix and now how to set serial for + // that prefix and make sure we start searching/inserting with one of + // those specialized methods we're good to go. + fn update_default_route_prefix_meta( + &self, + mui: u32, + ) -> FatalResult<(u32, bool)> { + trace!("Updating the default route..."); + + if let Some(_root_node) = + self.retrieve_node_mut(self.get_root_node_id(), mui) + { + self.node_cht + .root_for_len(self.get_root_node_id().len()) + .update_rbm_index(mui) + } else { + Err(FatalError) + } + } + + pub(crate) fn withdrawn_muis_bmin<'a>( + &'a self, + guard: &'a Guard, + ) -> &'a RoaringBitmap { + unsafe { + self.withdrawn_muis_bmin + .load(Ordering::Acquire, guard) + .deref() + } + } + + pub fn mark_mui_as_active( + &self, + mui: u32, + guard: &Guard, + ) -> Result<(), PrefixStoreError> { + let current = self.withdrawn_muis_bmin.load(Ordering::Acquire, guard); + + let mut new = unsafe { current.as_ref() } + .ok_or(PrefixStoreError::StoreNotReadyError)? + .clone(); + + new.remove(mui); + self.update_withdrawn_muis_bmin(current, new, guard) + } + + pub fn mark_mui_as_withdrawn( + &self, + mui: u32, + guard: &Guard, + ) -> Result<(), PrefixStoreError> { + let current = self.withdrawn_muis_bmin.load(Ordering::Acquire, guard); + + let mut new = unsafe { current.as_ref() } + .ok_or(PrefixStoreError::StoreNotReadyError)? + .clone(); + + new.insert(mui); + + self.update_withdrawn_muis_bmin(current, new, guard) + } + + pub(crate) fn update_withdrawn_muis_bmin<'a>( + &self, + current: Shared<'a, RoaringBitmap>, + mut new: RoaringBitmap, + guard: &'a Guard, + ) -> Result<(), PrefixStoreError> { + loop { + match self.withdrawn_muis_bmin.compare_exchange( + current, + Owned::new(new), + Ordering::AcqRel, + Ordering::Acquire, + guard, + ) { + Ok(_) => return Ok(()), + Err(updated) => { + new = unsafe { updated.current.as_ref() } + .ok_or(PrefixStoreError::StoreNotReadyError)? + .clone(); + } + } + } + } + + // Store a new node in the tree, or merge the existing node with this + // node. This might fail disastrously, e.g. in case of failed I/O. + fn store_node( + &self, + id: NodeId, + multi_uniq_id: u32, + new_node: TreeBitMapNode, + ) -> FatalResult<(NodeId, u32)> { + if log_enabled!(log::Level::Trace) { + debug!( + "{} store: Store node {}: {:?} mui {}", + std::thread::current().name().unwrap_or("unnamed-thread"), + id, + new_node, + multi_uniq_id + ); + } + self.counters.inc_nodes_count(); + + let mut nodes = self.node_cht.root_for_len(id.len()); + let mut level = 0; + let mut retry_count = 0; + + loop { + // let this_level = bits_for_len(id.len(), level); + + trace!("{:032b}", id.len()); + trace!("id {:?}", id); + trace!("multi_uniq_id {}", multi_uniq_id); + + // HASHING FUNCTION + let index = Self::hash_node_id(id, level); + + match nodes.read().get(index) { + None => { + // No node exists, so we create one here. + let next_level = nodeset_size(id.len(), level + 1); + + if log_enabled!(log::Level::Trace) { + trace!( + "Empty node found,creating new node {} len{} vl{}", + id, + id.len(), + level + 1 + ); + trace!("Next level {}", next_level); + trace!("Creating space for {} nodes", next_level); + } + + trace!("multi uniq id {}", multi_uniq_id); + trace!("next level {}", next_level); + + // A weird trick to create either a NodeSet with 16 nodes, + // or one without any (for the last stride) + let node_set = NodeSet::init_with_p2_children( + // next_level.saturating_sub(this_level) as usize, + next_level as usize, + ); + + let ptrbitarr = new_node.ptrbitarr.load(); + let pfxbitarr = new_node.pfxbitarr.load(); + + let (stored_node, its_us) = + nodes.read().get_or_init(index, || StoredNode { + node_id: id, + node: new_node, + node_set, + }); + + if stored_node.node_id == id { + stored_node + .node_set + .update_rbm_index(multi_uniq_id)?; + + // merge_with herre contains the critical section! + if !its_us && ptrbitarr != 0 { + retry_count += 1; + stored_node.node.ptrbitarr.merge_with(ptrbitarr); + } + + if !its_us && pfxbitarr != 0 { + retry_count += 1; + stored_node.node.pfxbitarr.merge_with(pfxbitarr); + } + } + + return Ok((id, retry_count)); + } + Some(stored_node) => { + // A node exists, might be ours, might be + // another one. + + if log_enabled!(log::Level::Trace) { + trace!( + "{} store: Node here exists {:?}", + std::thread::current() + .name() + .unwrap_or("unnamed-thread"), + stored_node.node_id + ); + trace!("node_id {:?}", stored_node.node_id); + trace!("node_id {:032b}", stored_node.node_id.bits()); + trace!("id {}", id); + trace!(" id {:032b}", id.bits()); + } + + // See if somebody beat us to creating our + // node already, if so, we still need to do + // work: we have to update the bitmap index + // with the multi_uniq_id we've got from the + // caller. + if id == stored_node.node_id { + stored_node + .node_set + .update_rbm_index(multi_uniq_id)?; + + if new_node.ptrbitarr.load() != 0 { + stored_node + .node + .ptrbitarr + .merge_with(new_node.ptrbitarr.load()); + } + if new_node.pfxbitarr.load() != 0 { + stored_node + .node + .pfxbitarr + .merge_with(new_node.pfxbitarr.load()); + } + + return Ok((id, retry_count)); + } else { + // it's not "our" node, make a (recursive) + // call to create it. + level += 1; + trace!( +"Collision with node_id {}, move to next level: {} len{} next_lvl{} index {}", + stored_node.node_id, + id, + id.len(), + level, + index + ); + + match nodeset_size(id.len(), level) { + // on to the next level! + next_bit_shift if next_bit_shift > 0 => { + nodes = &stored_node.node_set; + } + // There's no next level anymore, we ran out of + // the maximum number of levels for this AF. This + // should happen under no circumstance, there's a + // serious logic error here somewhere. + _ => { + return Err(FatalError); + } + } + } + } + } + } + } + + pub fn retrieve_node_mut( + &self, + id: NodeId, + mui: u32, + ) -> Option<&TreeBitMapNode> { + // HASHING FUNCTION + let mut level = 0; + let mut node; + let mut nodes = self.node_cht.root_for_len(id.len()); + + loop { + let index = Self::hash_node_id(id, level); + match nodes.read().get(index) { + // This arm only ever gets called in multi-threaded code + // where our thread (running this code *now*), andgot + // ahead of another thread: After the other thread created + // the TreeBitMapNode first, it was overtaken by our + // thread running this method, so our thread enounters an + // empty node in the store. + None => { + // let this_level = bits_for_len(id.len(), level); + let next_level = nodeset_size(id.len(), level + 1); + let node_set = NodeSet::init_with_p2_children( + next_level as usize, // next_level.saturating_sub(this_level) as usize, + ); + + // See if we can create the node + (node, _) = + nodes.read().get_or_init(index, || StoredNode { + node_id: id, + node: TreeBitMapNode::new(), + node_set, + }); + + // We may have lost, and a different node than we + // intended could live here, if so go a level deeper + if id == node.node_id { + // Nope, its ours or at least the node we need. + let _retry_count = + node.node_set.update_rbm_index(mui).ok(); + + return Some(&node.node); + }; + } + Some(this_node) => { + node = this_node; + if id == this_node.node_id { + // YES, It's the one we're looking for! + + // Update the rbm_index in this node with the + // multi_uniq_id that the caller specified. This + // is the only atomic operation we need to do + // here. The NodeSet that the index is attached + // to, does not need to be written to, it's part + // of a trie, so it just needs to "exist" (and it + // already does). + let retry_count = + this_node.node_set.update_rbm_index(mui).ok(); + + trace!("Retry_count rbm index {:?}", retry_count); + trace!( + "add multi uniq id to bitmap index {} for node {}", + mui, + this_node.node + ); + return Some(&this_node.node); + }; + } + } + // It isn't ours. Move one level deeper. + level += 1; + match nodeset_size(id.len(), level) { + // on to the next level! + next_bit_shift if next_bit_shift > 0 => { + nodes = &node.node_set; + } + // There's no next level, we found nothing. + _ => return None, + } + } + } + + pub fn retrieve_node( + &self, + id: NodeId, + ) -> Option<&TreeBitMapNode> { + // HASHING FUNCTION + let mut level = 0; + let mut node; + let mut nodes = self.node_cht.root_for_len(id.len()); + + loop { + let index = Self::hash_node_id(id, level); + match nodes.read().get(index) { + // This arm only ever gets called in multi-threaded code + // where our thread (running this code *now*), andgot + // ahead of another thread: After the other thread created + // the TreeBitMapNode first, it was overtaken by our + // thread running this method, so our thread enounters an + // empty node in the store. + None => { + return None; + } + Some(this_node) => { + node = this_node; + if id == this_node.node_id { + // YES, It's the one we're looking for! + return Some(&this_node.node); + }; + } + } + // It isn't ours. Move one level deeper. + level += 1; + match nodeset_size(id.len(), level) { + // on to the next level! + next_bit_shift if next_bit_shift > 0 => { + nodes = &node.node_set; + } + // There's no next level, we found nothing. + _ => return None, + } + } + } + + pub(crate) fn retrieve_node_for_mui( + &self, + id: NodeId, + mui: u32, + ) -> Option<&TreeBitMapNode> { + // HASHING FUNCTION + let mut level = 0; + let mut node; + let mut nodes = self.node_cht.root_for_len(id.len()); + + loop { + let index = Self::hash_node_id(id, level); + match nodes.read().get(index) { + // This arm only ever gets called in multi-threaded code + // where our thread (running this code *now*), andgot + // ahead of another thread: After the other thread created + // the TreeBitMapNode first, it was overtaken by our + // thread running this method, so our thread enounters an + // empty node in the store. + None => { + return None; + } + Some(this_node) => { + // early return if the mui is not in the index + // stored in this node, meaning the mui does not + // appear anywhere in the sub-tree formed from + // this node. + node = this_node; + + let bmin = match this_node.node_set.rbm().read() { + Ok(bmin) => bmin, + // if this lock is poisened, we are still going to + // work with the bmin. The data in the bmin may be + // stale, because of the lock poisoning, but this may + // also happen because of delays in other parts of the + // store in normal circumstances. We are counting on a + // future call to a write method to actually propagate + // a FatalError to the user of the store. + Err(bmin) => bmin.into_inner(), + }; + if !bmin.contains(mui) { + return None; + } + + if id == this_node.node_id { + // YES, It's the one we're looking for! + return Some(&this_node.node); + }; + } + } + // It isn't ours. Move one level deeper. + level += 1; + match nodeset_size(id.len(), level) { + // on to the next level! + next_bit_shift if next_bit_shift > 0 => { + nodes = &node.node_set; + } + // There's no next level, we found nothing. + _ => return None, + } + } + } + + pub(crate) fn get_root_node_id(&self) -> NodeId { + NodeId::dangerously_new_with_id_as_is( + ::new_zeroed(), + 0, + ) + } + + pub fn nodes_count(&self) -> usize { + self.counters.nodes_count() + } + + pub fn prefixes_count(&self) -> usize { + self.counters.prefixes_count().iter().sum() + } + + // len checking does it all + #[allow(clippy::indexing_slicing)] + pub fn prefixes_count_for_len( + &self, + len: u8, + ) -> Result { + if len <= AF::BITS { + Ok(self.counters.prefixes_count()[len as usize]) + } else { + Err(PrefixStoreError::PrefixLengthInvalid) + } + } + + // Calculates the id of the node that COULD host a prefix in its + // ptrbitarr. + pub(crate) fn node_id_for_prefix( + &self, + prefix: &PrefixId, + ) -> (NodeId, BitSpan) { + trace!( + "prefix id bits: {:032b} len: {}", + prefix.bits(), + prefix.len() + ); + let mut acc = 0; + loop { + acc += STRIDE_SIZE; + if acc >= prefix.len() { + let node_len = acc - STRIDE_SIZE; + return ( + NodeId::new_with_cleaned_id(prefix.bits(), node_len), + // NOT THE HASHING FUNCTION! + // Do the right shift in a checked manner, for the sake + // of 0/0. A search for 0/0 will perform a 0 << MAX_LEN, + // which will panic in debug mode (undefined behaviour + // in prod). + BitSpan::new( + ((prefix.bits() << AF::from_u8(node_len)) + .checked_shr_or_zero( + (AF::BITS - (prefix.len() - node_len)).into(), + )) + .dangerously_truncate_to_u32(), + prefix.len() - node_len, + ), + ); + } + } + } + + // ------- THE HASHING FUNCTION ----------------------------------------- + + // Ok, so hashing is really hard, but we're keeping it simple, and + // because we're keeping it simple we're having lots of collisions, but + // we don't care! + // + // We're using a part of bitarray representation of the address part of + // a prefix the as the hash. Sounds complicated, but isn't. + // Suppose we have an IPv4 prefix, say 130.24.55.0/24. + // The address part is 130.24.55.0 or as a bitarray that would be: + // + // pos 0 4 8 12 16 20 24 28 + // bit 1000 0010 0001 1000 0011 0111 0000 0000 + // + // First, we're discarding the bits after the length of the prefix, so + // we'll have: + // + // pos 0 4 8 12 16 20 + // bit 1000 0010 0001 1000 0011 0111 + // + // Now we're dividing this bitarray into one or more levels. A level can + // be an arbitrary number of bits between 1 and the length of the prefix, + // but the number of bits summed over all levels should be exactly the + // prefix length. So in our case they should add up to 24. A possible + // division could be: 4, 4, 4, 4, 4, 4. Another one would be: 12, 12. The + // actual division being used is described in the function + // `::get_bits_for_len` in the `rotonda-macros` crate. Each level has + // its own hash, so for our example prefix this would be: + // + // pos 0 4 8 12 16 20 + // level 0 1 + // hash 1000 0010 0001 1000 0011 0111 + // + // level 1 hash: 1000 0010 0001 + // level 2 hash: 1000 0011 0011 + // + // The hash is now converted to a usize integer, by shifting it all the + // way to the right in a u32 and then converting to a usize. Why a usize + // you ask? Because the hash is used by the CustomAllocStorage as the + // index to the array for that specific prefix length and level. + // So for our example this means that the hash on level 1 is now 0x821 + // (decimal 2081) and the hash on level 2 is 0x833 (decimal 2099). + // Now, if we only consider the hash on level 1 and that we're going to + // use that as the index to the array that stores all prefixes, you'll + // notice very quickly that all prefixes starting with 130.[16..31] will + // cause a collision: they'll all point to the same array element. These + // collisions are resolved by creating a linked list from each array + // element, where each element in the list has an array of its own that + // uses the hash function with the level incremented. + + pub(crate) fn hash_node_id(id: NodeId, level: u8) -> usize { + // And, this is all of our hashing function. + // let last_level = if level > 0 { + // bits_for_len(id.len(), level - 1) + // } else { + // 0 + // }; + let last_level = prev_node_size(id.len(), level); + // trace!("bits division {}", this_level); + // trace!( + // "calculated index ({} << {}) >> {}", + // id.get_id().0, + // last_level, + // ((::BITS - (this_level - last_level)) % ::BITS) as usize + // ); + // HASHING FUNCTION + let size = nodeset_size(id.len(), level); + ((id.bits() << AF::from_u8(last_level)) + >> AF::from_u8((::BITS - size) % ::BITS)) + .dangerously_truncate_to_u32() as usize + } +} + +// Partition for stride 4 +// +// ptr bits never happen in the first half of the bitmap for the stride-size. Consequently the ptrbitarr can be an integer type +// half the size of the pfxbitarr. +// +// ptr bit arr (u16) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 x +// pfx bit arr (u32) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 +// nibble * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 1010 1011 1100 1101 1110 1111 x +// nibble len offset 0 1 2 3 4 +// +// stride 3: 1 + 2 + 4 + 8 = 15 bits. 2^4 - 1 (1 << 4) - 1. ptrbitarr starts at pos 7 (1 << 3) - 1 +// stride 4: 1 + 2 + 4 + 8 + 16 = 31 bits. 2^5 - 1 (1 << 5) - 1. ptrbitarr starts at pos 15 (1 << 4) - 1 +// stride 5: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 63 bits. 2^6 - 1 +// stride 6: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 127 bits. 2^7 - 1 +// stride 7: 1 + 2 + 4 + 8 + 16 + 32 + 64 = 128 = 256 bits. 2^8 - 1126 +// stride 8: 1 + 2 + 4 + 8 + 16 + 32 + 64 + 128 + 256 = 511 bits. 2^9 - 1 +// +// Ex.: +// pfx 65.0.0.252/30 0100_0001_0000_0000_0000_0000_1111_1100 +// +// nibble 1 (pfx << 0) >> 28 0000_0000_0000_0000_0000_0000_0000_0100 +// bit_pos (1 << nibble length) - 1 + nibble 0000_0000_0000_0000_0000_1000_0000_0000 +// +// nibble 2 (pfx << 4) >> 24 0000_0000_0000_0000_0000_0000_0000_0001 +// bit_pos (1 << nibble length) - 1 + nibble 0000_0000_0000_0000_1000_0000_0000_0000 +// ... +// nibble 8 (pfx << 28) >> 0 0000_0000_0000_0000_0000_0000_0000_1100 +// bit_pos (1 << nibble length) - 1 + nibble = (1 << 2) - 1 + 2 = 5 0000_0010_0000_0000_0000_0000_0000_0000 +// 5 - 5 - 5 - 4 - 4 - [4] - 5 +// startpos (2 ^ nibble length) - 1 + nibble as usize + +// This implements the funky stats for a tree +#[cfg(feature = "cli")] +impl std::fmt::Display + for TreeBitMap +{ + fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(_f, "{} prefixes created", self.prefixes_count())?; + writeln!(_f, "{} nodes created", self.nodes_count())?; + writeln!(_f)?; + + writeln!( + _f, + "level\t[{}] prefixes-occupied/max-prefixes percentage_occupied", + Colour::Green.paint("prefixes") + )?; + + let bars = ["▏", "▎", "▍", "▌", "▋", "▊", "▉"]; + const SCALE: u32 = 5500; + for crate::stats::CreatedNodes { + depth_level: len, + count: prefix_count, + } in self.counters.prefix_stats() + { + let max_pfx = u128::overflowing_pow(2, len as u32); + let n = (prefix_count as u32 / SCALE) as usize; + + write!(_f, "/{}\t", len)?; + + for _ in 0..n { + write!(_f, "{}", Colour::Green.paint("█"))?; + } + + write!( + _f, + "{}", + Colour::Green.paint( + *bars + .get( + ((prefix_count as u32 % SCALE) / (SCALE / 7)) + as usize + ) + .unwrap_or(&"NaN") + ) // = scale / 7 + )?; + + write!( + _f, + " {}/{} {:.2}%", + prefix_count, + max_pfx.0, + (prefix_count as f64 / max_pfx.0 as f64) * 100.0 + )?; + + writeln!(_f)?; + } + + Ok(()) + } +} diff --git a/src/tree_bitmap/node_cht.rs b/src/tree_bitmap/node_cht.rs new file mode 100644 index 00000000..ca4507f9 --- /dev/null +++ b/src/tree_bitmap/node_cht.rs @@ -0,0 +1,96 @@ +use std::sync::RwLock; + +use log::{debug, log_enabled}; + +use roaring::RoaringBitmap; + +use super::tree_bitmap_node::{NodeId, TreeBitMapNode}; +use crate::cht::{Cht, OnceBoxSlice, Value}; +use crate::errors::{FatalError, FatalResult}; +use crate::types::errors::PrefixStoreError; +use crate::types::AddressFamily; + +pub(crate) type NodeCht = + Cht, ROOT_SIZE, 4>; + +#[derive(Debug)] +pub(crate) struct StoredNode +where + Self: Sized, + AF: AddressFamily, +{ + // the id of this node. since we're using linked lists to store nodes in + // first-come-first-served order, we need to store the actual node id. + pub(crate) node_id: NodeId, + // The ptrbitarr and pfxbitarr for this node + pub(crate) node: TreeBitMapNode, + // Child nodes linked from this node + pub(crate) node_set: NodeSet, +} + +#[derive(Debug)] +pub(crate) struct NodeSet( + OnceBoxSlice>, + // A Bitmap index that keeps track of the `multi_uniq_id`s (mui) that are + // present in value collections in the meta-data tree in the child nodes + RwLock, +); + +impl NodeSet { + pub(crate) fn rbm(&self) -> &RwLock { + &self.1 + } + + pub(crate) fn update_rbm_index( + &self, + multi_uniq_id: u32, + ) -> FatalResult<(u32, bool)> + where + AF: crate::types::AddressFamily, + { + let try_count = 0; + let mut rbm = self.1.write().map_err(|_| FatalError)?; + let absent = rbm.insert(multi_uniq_id); + + Ok((try_count, !absent)) + } + + pub(crate) fn _remove_from_rbm_index( + &self, + multi_uniq_id: u32, + _guard: &crate::epoch::Guard, + ) -> Result + where + AF: crate::types::AddressFamily, + { + let try_count = 0; + + let mut rbm = self + .1 + .write() + .map_err(|_| PrefixStoreError::StoreNotReadyError)?; + rbm.remove(multi_uniq_id); + + Ok(try_count) + } + + pub(crate) fn read(&self) -> &OnceBoxSlice> { + &self.0 + } +} + +impl Value for NodeSet { + fn init_with_p2_children(p2_size: usize) -> Self { + if log_enabled!(log::Level::Debug) { + debug!( + "{} store: creating space for {} nodes", + std::thread::current().name().unwrap_or("unnamed-thread"), + 2_usize.pow(p2_size as u32) + ); + } + + let size = if p2_size == 0 { 0 } else { 1 << p2_size }; + + NodeSet(OnceBoxSlice::new(size), RoaringBitmap::new().into()) + } +} diff --git a/src/tree_bitmap/tree_bitmap_iterators.rs b/src/tree_bitmap/tree_bitmap_iterators.rs new file mode 100644 index 00000000..89607992 --- /dev/null +++ b/src/tree_bitmap/tree_bitmap_iterators.rs @@ -0,0 +1,361 @@ +// ----------- Store Iterators ---------------------------------------------- +// +// This file hosts the iterators for the Rib and implementations for the +// methods that start'em. There are 3 Iterators: +// +// 1. an iterator `PrefixIter` that iterates over ALL of the prefix buckets of +// the CHT backing the TreeBitMap. +// +// 2. a MoreSpecificsIterator that starts from a prefix in the prefix buckets +// for that particular prefix length, but uses the node in the TreeBitMap to +// find its more specifics. +// +// 3. a LessSpecificIterator, that just reduces the prefix size bit-by-bit and +// looks in the prefix buckets for the diminuishing prefix. +// +// The Iterators that start from the root node of the TreeBitMap (which +// is the only option for the single-threaded TreeBitMap) live in the +// deprecated_node.rs file. They theoretically should be slower and cause more +// contention, since every lookup has to go through the levels near the root +// in the TreeBitMap. + +use crate::TreeBitMap; +use crate::{ + tree_bitmap::tree_bitmap_node::{ + NodeMoreSpecificChildIter, NodeMoreSpecificsPrefixIter, + }, + types::{AddressFamily, BitSpan, PrefixId}, +}; + +use inetnum::addr::Prefix; +use log::{log_enabled, trace}; + +// ----------- MoreSpecificPrefixIter ------------------------------------ + +// A iterator over all the more-specifics for a given prefix. +// +// This iterator is somewhat different from the other *PrefixIterator types, +// since it uses the Nodes to select the more specifics. An Iterator that +// would only use the Prefixes in the store could exist, but iterating over +// those in search of more specifics would be way more expensive. + +// The first iterator it goes over should have a bit_span that is the +// difference between the requested prefix and the node that hosts that +// prefix. See the method initializing this iterator (`get_node_for_id_prefix` +// takes care of it in there). The consecutive iterators will all have a +// bit_span of { bits: 0, len: 0 }. Yes, we could also use the PrefixIter +// there (it iterates over all prefixes of a node), but then we would have to +// deal with two different types of iterators. Note that the iterator is +// neither depth- or breadth-first and the results are essentially unordered. + +pub(crate) struct MoreSpecificPrefixIter< + 'a, + AF: AddressFamily, + const ROOT_SIZE: usize, +> { + tree: &'a TreeBitMap, + cur_ptr_iter: NodeMoreSpecificChildIter, + cur_pfx_iter: NodeMoreSpecificsPrefixIter, + parent_and_position: Vec>, +} + +impl<'a, AF: AddressFamily + 'a, const ROOT_SIZE: usize> Iterator + for MoreSpecificPrefixIter<'a, AF, ROOT_SIZE> +{ + type Item = PrefixId; + + fn next(&mut self) -> Option { + trace!("MoreSpecificsPrefixIter"); + + loop { + // first drain the current prefix iterator until empty. + let next_pfx = self.cur_pfx_iter.next(); + + if next_pfx.is_some() { + return next_pfx; + } + + // Our current prefix iterator for this node is done, look for + // the next pfx iterator of the next child node in the current + // ptr iterator. + trace!("resume ptr iterator {:?}", self.cur_ptr_iter); + + let mut next_ptr = self.cur_ptr_iter.next(); + + // Our current ptr iterator is also done, maybe we have a parent + if next_ptr.is_none() { + trace!("try for parent"); + if let Some(cur_ptr_iter) = self.parent_and_position.pop() { + trace!("continue with parent"); + self.cur_ptr_iter = cur_ptr_iter; + next_ptr = self.cur_ptr_iter.next(); + } else { + trace!("no more parents"); + return None; + } + } + + if let Some(next_ptr) = next_ptr { + let node = self.tree.retrieve_node(next_ptr); + + match node { + // Some(next_node) => { + // // copy the current iterator into the parent vec and create + // // a new ptr iterator for this node + // self.parent_and_position.push(self.cur_ptr_iter); + // let ptr_iter = next_node.more_specific_ptr_iter( + // next_ptr, + // BitSpan { bits: 0, len: 0 }, + // ); + // self.cur_ptr_iter = ptr_iter.wrap(); + + // // trace!( + // // "next stride new iterator stride 3 {:?} start \ + // // bit_span {:?}", + // // self.cur_ptr_iter, + // // self.start_bit_span + // // ); + // self.cur_pfx_iter = next_node + // .more_specific_pfx_iter( + // next_ptr, + // BitSpan::new(0, 0), + // ) + // .wrap(); + // } + Some(next_node) => { + // create new ptr iterator for this node. + self.parent_and_position.push(self.cur_ptr_iter); + let ptr_iter = next_node.more_specific_ptr_iter( + next_ptr, + BitSpan { bits: 0, len: 0 }, + ); + self.cur_ptr_iter = ptr_iter; + + trace!( + "next stride new iterator stride 4 {:?} start \ + bit_span 0 0", + self.cur_ptr_iter, + ); + self.cur_pfx_iter = next_node.more_specific_pfx_iter( + next_ptr, + BitSpan::new(0, 0), + ); + } + // Some(SizedStrideRef::Stride5(next_node)) => { + // // create new ptr iterator for this node. + // self.parent_and_position.push(self.cur_ptr_iter); + // let ptr_iter = next_node.more_specific_ptr_iter( + // next_ptr, + // BitSpan { bits: 0, len: 0 }, + // ); + // self.cur_ptr_iter = ptr_iter.wrap(); + + // // trace!( + // // "next stride new iterator stride 5 {:?} start \ + // // bit_span {:?}", + // // self.cur_ptr_iter, + // // self.start_bit_span + // // ); + // self.cur_pfx_iter = next_node + // .more_specific_pfx_iter( + // next_ptr, + // BitSpan::new(0, 0), + // ) + // .wrap(); + // } + None => { + println!("no node here."); + return None; + } + }; + } + } + } +} + +pub(crate) struct LMPrefixIter<'a, AF: AddressFamily, const ROOT_SIZE: usize> +{ + tree: &'a TreeBitMap, + prefix: PrefixId, +} + +impl Iterator + for LMPrefixIter<'_, AF, ROOT_SIZE> +{ + type Item = PrefixId; + fn next(&mut self) -> Option { + trace!("search lm prefix for {:?}", self.prefix); + + loop { + if self.prefix.len() == 0 { + return None; + } + + if self.tree.prefix_exists(self.prefix) { + return Some(self.prefix); + } + + self.prefix = self.prefix.truncate_to_len(self.prefix.len() - 1); + } + } +} + +// ----------- LessSpecificPrefixIter --------------------------------------- + +// This iterator iterates over all the less-specifics for a given prefix. It +// does *not* use the tree, it goes directly into the CustomAllocStorage and +// retrieves the less-specifics by going from len to len, searching for the +// prefixes. + +pub(crate) struct LessSpecificPrefixIter< + 'a, + AF: AddressFamily, + const ROOT_SIZE: usize, +> { + tree: &'a TreeBitMap, + prefix: PrefixId, + cur_level: u8, +} + +impl Iterator + for LessSpecificPrefixIter<'_, AF, ROOT_SIZE> +{ + type Item = PrefixId; + + // This iterator moves down all prefix lengths, starting with the length + // of the (search prefix - 1), looking for shorter prefixes, where the + // its bits are the same as the bits of the search prefix. + fn next(&mut self) -> Option { + trace!("search next less-specific for {:?}", self.prefix); + self.cur_level = self.cur_level.saturating_sub(1); + + loop { + if self.cur_level == 0 { + return None; + } + + let lvl_pfx = self.prefix.truncate_to_len(self.cur_level); + if self.tree.prefix_exists(lvl_pfx) { + return Some(lvl_pfx); + } + + self.cur_level = self.cur_level.saturating_sub(1); + } + } +} + +// ----------- Iterator initialization methods for Rib ----------------------- + +// These are only the methods that are starting the iterations. All other +// methods for Rib are in the main rib.rs file. + +impl<'a, AF: AddressFamily, const ROOT_SIZE: usize> + TreeBitMap +{ + // Iterator over all more-specific prefixes, starting from the given + // prefix at the given level and cursor. + pub fn more_specific_prefix_iter_from( + &'a self, + start_prefix_id: PrefixId, + ) -> impl Iterator> + 'a { + trace!("more specifics for {:?}", start_prefix_id); + + // A v4 /32 or a v6 /128 doesn't have more specific prefixes 🤓. + if start_prefix_id.len() >= AF::BITS { + None + } else { + // calculate the node start_prefix_id lives in. + let (start_node_id, start_bs) = + self.node_id_for_prefix(&start_prefix_id); + trace!("start node {}", start_node_id); + trace!( + "start prefix id {:032b} (len {})", + start_prefix_id.bits(), + start_prefix_id.len() + ); + trace!( + "start node id {:032b} (bits {} len {})", + start_node_id.bits(), + start_node_id.bits(), + start_node_id.len() + ); + trace!( + "start pfx bit span {:08b} {} len {}", + start_bs.bits, + start_bs.bits, + start_bs.len + ); + trace!( + "start ptr bit span {:08b} {} len {}", + start_bs.bits, + start_bs.bits, + start_bs.len + ); + + let cur_pfx_iter: NodeMoreSpecificsPrefixIter; + let cur_ptr_iter: NodeMoreSpecificChildIter; + let node = self.retrieve_node(start_node_id); + + if let Some(node) = node { + let n = node; + { + cur_pfx_iter = + n.more_specific_pfx_iter(start_node_id, start_bs); + trace!("---------------------"); + trace!("start iterating nodes"); + cur_ptr_iter = + n.more_specific_ptr_iter(start_node_id, start_bs); + }; + + Some(MoreSpecificPrefixIter { + tree: self, + cur_pfx_iter, + cur_ptr_iter, + parent_and_position: vec![], + }) + } else { + None + } + } + .into_iter() + .flatten() + } + + // Iterator over all less-specific prefixes, starting from the given + // prefix at the given level and cursor. + pub fn less_specific_prefix_iter( + &'a self, + start_prefix_id: PrefixId, + ) -> impl Iterator> + 'a { + if log_enabled!(log::Level::Trace) { + trace!("less specifics for {}", Prefix::from(start_prefix_id)); + trace!("level {}, len {}", 0, start_prefix_id.len()); + } + + LessSpecificPrefixIter { + tree: self, + prefix: start_prefix_id, + cur_level: start_prefix_id.len(), + } + } + + pub fn longest_matching_prefix( + &'a self, + prefix: PrefixId, + ) -> Option> { + if log_enabled!(log::Level::Trace) { + trace!("lmp for {}", Prefix::from(prefix)); + } + + LMPrefixIter { tree: self, prefix }.next() + } + + // Iterator over all the prefixes in the in_memory store. + pub fn prefixes_iter(&'a self) -> impl Iterator + 'a { + self.more_specific_prefix_iter_from(PrefixId::new( + AF::new(0_u32.into()), + 0, + )) + .map(Prefix::from) + } +} diff --git a/src/tree_bitmap/tree_bitmap_node.rs b/src/tree_bitmap/tree_bitmap_node.rs new file mode 100644 index 00000000..aa707a2f --- /dev/null +++ b/src/tree_bitmap/tree_bitmap_node.rs @@ -0,0 +1,706 @@ +use std::sync::atomic::{AtomicU16, AtomicU32}; +use std::{fmt::Debug, marker::PhantomData}; + +use log::{log_enabled, trace}; +use parking_lot_core::SpinWait; + +use crate::tree_bitmap::atomic_bitmap::{ + AtomicBitmap, AtomicPfxBitArr, AtomicPtrBitArr, CasResult, +}; +use crate::types::BitSpan; + +use crate::rib::{BIT_SPAN_SIZE, STRIDE_SIZE}; +use crate::types::AddressFamily; +use crate::types::PrefixId; + +//------------ TreeBitMap Node ---------------------------------------------- + +// The treebitmap turned into a "trie-bitmap", really. A Node in the +// treebitmap now only holds a ptrbitarr bitmap and a pfxbitarr bitmap, that +// indicate whether a node or a prefix exists in that spot. The corresponding +// node Ids and prefix ids are calculated from their position in the array. +// Nodes do *NOT* have a clue where they are in the tree, so they don't know +// the node id they represent. Instead, the node id is calculated from the +// position in the tree. That's why several methods take a `base_prefix` as a +// an argument: it represents the ID of the node itself. +// +// The elision of both the collection of children nodes and the prefix nodes +// in a treebitmap node is enabled by the storage backend for the +// multi-threaded store, since holds its entries keyed on the [node|prefix] +// id. (in contrast with arrays or `vec`s, that have +pub(crate) struct TreeBitMapNode +where + Self: Sized, + AF: AddressFamily, +{ + pub ptrbitarr: AtomicPtrBitArr, + pub pfxbitarr: AtomicPfxBitArr, + pub _af: PhantomData, +} + +impl Debug for TreeBitMapNode +where + AF: AddressFamily, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("TreeBitMapNode") + .field("ptrbitarr", &self.ptrbitarr.load()) + .field("pfxbitarr", &self.pfxbitarr.load()) + .finish() + } +} + +impl std::fmt::Display for TreeBitMapNode +where + AF: AddressFamily, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "TreeBitMapNode {{ ptrbitarr: {:?}, pfxbitarr: {:?} }}", + self.ptrbitarr.load(), + self.pfxbitarr.load(), + ) + } +} + +impl TreeBitMapNode +where + AF: AddressFamily, +{ + pub(crate) fn new() -> Self { + TreeBitMapNode { + ptrbitarr: AtomicPtrBitArr::new(), + pfxbitarr: AtomicPfxBitArr::new(), + _af: PhantomData, + } + } + + // ------- Iterators ---------------------------------------------------- + + // Iterate over the more specific prefixes ids contained in this node + pub(crate) fn more_specific_pfx_iter( + &self, + base_prefix: NodeId, + start_bs: BitSpan, + ) -> NodeMoreSpecificsPrefixIter { + debug_assert!(start_bs.check()); + NodeMoreSpecificsPrefixIter:: { + pfxbitarr: self.pfxbitarr.ms_pfx_mask(start_bs), + base_prefix, + } + } + + // Iterate over the nodes that contain more specifics for the requested + // base_prefix and corresponding bit_span. + pub(crate) fn more_specific_ptr_iter( + &self, + base_prefix: NodeId, + start_bs: BitSpan, + ) -> NodeMoreSpecificChildIter { + debug_assert!(start_bs.check()); + // let ptrbitarr = self.ptrbitarr.load(); + let (bitrange, start_cursor) = self.ptrbitarr.ptr_range(start_bs); + + NodeMoreSpecificChildIter:: { + bitrange, + base_prefix, + start_bs, + start_cursor, + } + } + + // ------- Search by Traversal methods ----------------------------------- + + // Inspects the stride (nibble, nibble_len) to see it there's already a + // child node (if not at the last stride) or a prefix (if it's the last + // stride). + // + // Returns a tuple of which the first element is one of: + // - A newly created child node. + // - The index of the existing child node in the global `nodes` vec + // - A newly created Prefix + // - The index of the existing prefix in the global `prefixes` vec + // and the second element is the number of accumulated retries for the + // compare_exchange of both ptrbitarr and pfxbitarr. + pub(crate) fn eval_node_or_prefix_at( + &self, + bit_span: BitSpan, + // all the bits of the search prefix, but with the length set to + // the length of this stride. So bits are set beyond its length. + base_prefix: NodeId, + // stride_len: u8, + is_last_stride: bool, + ) -> (NewNodeOrIndex, u32) { + // THE CRITICAL SECTION + // + // UPDATING ptrbitarr & pfxbitarr + // + // This section is not as critical as creating/updating a + // a prefix. We need to set one bit only, and if somebody + // beat us to it that's fine, we'll figure that out when + // we try to write the prefix's serial number later on. + // The one thing that can go wrong here is that we are + // using an old ptrbitarr and overwrite bits set in the + // meantime elsewhere in the bitarray. + let mut retry_count = 0; + let ptrbitarr = self.ptrbitarr.load(); + let pfxbitarr = self.pfxbitarr.load(); + let bit_pos = bit_span.into_bit_pos(); + let new_node: TreeBitMapNode; + + // Check that we're not at the last stride (pfx.len <= stride_end), + // Note that next_stride may have a value, but we still don't want to + // continue, because we've exceeded the length of the prefix to + // be inserted. + // Also note that a nibble_len < S::BITS (a smaller than full nibble) + // does indeed indicate the last stride has been reached, but the + // reverse is *not* true, i.e. a full nibble can also be the last + // stride. Hence the `is_last_stride` argument + if !is_last_stride { + // We are not at the last stride + // Check it the ptr bit is already set in this position + if (self.ptrbitarr.as_stride_size() & bit_pos) == 0 { + // Nope, set it and create a child node + new_node = TreeBitMapNode { + ptrbitarr: AtomicPtrBitArr(AtomicU16::new(0)), + pfxbitarr: AtomicPfxBitArr(AtomicU32::new(0)), + _af: PhantomData, + }; + + // THE CRITICAL SECTION + // + // UPDATING pfxbitarr + // + // preventing using an old ptrbitarr and overwrite bits set + // in the meantime elsewhere in the bitarray. + let mut a_ptrbitarr = self.ptrbitarr.compare_exchange( + ptrbitarr, + into_ptrbitarr(bit_pos | into_pfxbitarr(ptrbitarr)), + ); + let mut spinwait = SpinWait::new(); + loop { + match a_ptrbitarr { + CasResult(Ok(_)) => { + break; + } + CasResult(Err(newer_array)) => { + // Someone beat us to it, so we need to use the + // newer array. + retry_count += 1; + a_ptrbitarr = self.ptrbitarr.compare_exchange( + newer_array, + into_ptrbitarr( + bit_pos | into_pfxbitarr(newer_array), + ), + ); + } + }; + spinwait.spin_no_yield(); + } + + return (NewNodeOrIndex::NewNode(new_node), retry_count); + } + } else { + // only at the last stride do we create the bit in the prefix + // bitmap, and only if it doesn't exist already + if pfxbitarr & bit_pos == 0 { + // THE CRITICAL SECTION + // + // UPDATING pfxbitarr + // + // preventing using an old pfxbitarr and overwrite bits set + // in the meantime elsewhere in the bitarray. + let mut a_pfxbitarr = self + .pfxbitarr + .compare_exchange(pfxbitarr, bit_pos | pfxbitarr); + let mut spinwait = SpinWait::new(); + + loop { + match a_pfxbitarr { + CasResult(Ok(_)) => { + break; + } + CasResult(Err(newer_array)) => { + // Someone beat us to it, so we need to use the + // newer array. + retry_count += 1; + a_pfxbitarr = self.pfxbitarr.compare_exchange( + newer_array, + bit_pos | newer_array, + ); + } + }; + spinwait.spin_no_yield(); + } + + return (NewNodeOrIndex::NewPrefix, retry_count); + } + return (NewNodeOrIndex::ExistingPrefix, retry_count); + } + + // Nodes always live at the last length of a stride (i.e. the last + // nibble), so we add the stride length to the length of the + // base_prefix (which is always the start length of the stride). + ( + NewNodeOrIndex::ExistingNode( + base_prefix.add_to_len(STRIDE_SIZE).truncate_to_len(), + ), + retry_count, + ) + } +} + +// ------------ Iterator methods -------------------------------------------- + +// ----------- NodeChildIter ------------------------------------------------ + +// create an iterator over all child nodes id +// +// we don't have a collection of local nodes anymore, since the id of the +// node are deterministically generated, as the prefix+len they represent +// in the treebitmap. This has both the advantage of using less memory, +// and being easier to use in a concurrently updated tree. The +// disadvantage is that we have to look up the child nodes on the fly +// when we want to iterate over all children of a node. +// +// ptr child nodes only exist at the last nibble of the stride size +// (`child_len`). Since children in the first nibbles are leaf nodes. +// leaf nodes will only be prefixes. So if we have a first stride of +// size 5, all ptr nodes wil have StrideNodeIds with len = 5. +// +// Ex.: +// +// Stride no. 1 2 3 4 5 6 7 +// StrideSize 5 5 4 3 3 3 3 +// child pfxs len /1-5 /5-10 /10-14 /15-17 /18-20 /21-23 /24-26 +// child Nodes len /5 /10 /14 /17 /20 /23 /26 +// +// Stride no. 8 9 +// StrideSize 3 3 +// child pfxs len /27-29 /30-32 +// child Nodes len /29 /32 + +type PtrBitArr = u16; + +// ----------- NodeMoreSpecificChildIter ------------------------------------ + +// Create an iterator over all the child nodes that hold a more specific +// prefixes of the specified start_bit_span. This basically the same Iterator +// as the ChildNodeIter, except that it stops (potentially) earlier, to avoid +// including nodes with adjacent prefixes. Starting an iterator with a +// `start_bit_span` of { bits: 0, len: 0 } will return all child nodes of +// this node. In that case you could also use the `NodeChildIter` instead. +// +// inputs +// +// `base_prefix` +// This iterator take a `base_prefix` since the nodes themselves have no +// knowledge of their own prefixes, those are inferred by their position in +// the tree (therefore, it's actually a Trie). Note that `base_prefix` + +// `bit_span` define the actual starting prefix for this iterator. +// +// `ptrbitarr` +// is the bitmap that holds the slots that have child nodes. +// +// `start_bit_span` +// is the bit span that is going to be used as a starting point for the +// iterator. +// +// `cursor` +// holds the current cursor offset from the start_bit_span.bits, the sum of +// these describe the current position in the bitmap. Used for re-entry into +// the iterator. A new iterator should start with None. +// +// How this works +// +// The iterator starts at the start_bit_span.bits position in the bitmap and +// advances until it reaches either a one in the bitmap, or the maximum +// position for the particular more-specifics for this bit_span. +// +// e.x. +// The stride size is 5 and the starting bit span is {bits: 2, len: 4} (0010) +// The starting point is therefore the bit_array 0010. The iterator will go +// over 0010 0 and 0010 1. The next bits to consider would be 0011 0 which +// would not fit our starting bit_span of 0010. So we have to stop after 2 +// iterations. This means that the number of iterations is determined by the +// difference between the number of bits in the stride size (5) and the the +// number of bits in the start_bit_span (4). The number of iterations in the +// above example is therefore 1 << (5 - 4) = 2. Remember that a ptrbitarr +// holds only one stride size (the largest for its stride size), so we're +// done now. + +#[derive(Debug, Copy, Clone)] +pub(crate) struct NodeMoreSpecificChildIter { + base_prefix: NodeId, + bitrange: PtrBitArr, + start_bs: BitSpan, + start_cursor: u8, +} + +impl std::iter::Iterator + for NodeMoreSpecificChildIter +{ + type Item = NodeId; + fn next(&mut self) -> Option { + if self.bitrange == 0 { + trace!("empty ptrbitarr. This iterator is done."); + return None; + } + + let cursor = self.bitrange.leading_zeros() as u8 + 15; + trace!("LZCNT {}", self.bitrange.leading_zeros()); + + // if self.bitrange.leading_zeros() == 0 { + // trace!("bitrange {:032b}", self.bitrange); + // panic!("empty bitrange. This iterator is done."); + // return None; + // } + + trace!( + "base_prefix {}, start bit span {:?} start-stop cursor {}-{}", + self.base_prefix, + self.start_bs, + self.start_cursor, + ::min( + (1 << (STRIDE_SIZE - self.start_bs.len)) + self.start_cursor, + BIT_SPAN_SIZE - 2 + ) + ); + + trace!("bitrange {:032b}", self.bitrange); + + self.bitrange ^= ptr_bit_pos_from_index(cursor); + + trace!("mask {:032b}", ptr_bit_pos_from_index(cursor)); + trace!("next br {:032b}", self.bitrange); + + let bs = BitSpan::from_bit_pos_index(cursor); + if log_enabled!(log::Level::Trace) { + let bit_pos = ptr_bit_pos_from_index(cursor); + trace!( + "{:02}: {:05b} {:032b} bit_span: {:04b} ({:02}) (len: {})", + cursor, + cursor - 1, + bit_pos, + bs.bits, + bs.bits, + bs.len + ); + trace!( + ">> found node with more specific prefixes for + base prefix {:?} bit span {:?} (cursor {})", + self.base_prefix, + bs, + cursor + ); + } + + let pfx = self.base_prefix.add_bit_span(BitSpan { + bits: bs.bits, + len: STRIDE_SIZE, + }); + Some(pfx) + } +} + +// ----------- NodePrefixIter ----------------------------------------------- + +// Create an iterator of all prefix ids hosted by this node. + +// Partition for stride 3 +// +// pfxbitarr (AF::BITS) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 +// bit_span (binary) * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 * +// bit_span (dec.) * 0 1 0 1 2 3 0 1 2 3 4 5 6 7 * +// len 0 1 2 3 +// +// pfxbitarr (example) 1 0 0 0 0 0 1 1 1 0 0 0 0 0 0 0 +// pos (example) 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 +// +// Ex.: +// `pos` describes the bit that is currently under consideration. +// +// `pfxbitarr` is the bitmap that contains the prefixes. Every 1 in the +// bitmap means that the prefix is hosted by this node. Moreover, the +// position in the bitmap describes the address part of the prefix, given +// a `base prefix`. The described prefix is the bits of the `base_prefix` +// bitmap appended by the `bit span` bits. +// +// The length of the prefix is +// described by sum of the length of the base_prefix and the `len` +// variable. +// +// The `bit_span` variable starts counting at every new prefix length. +// pub(crate) struct NodePrefixIter { +// base_prefix: StrideNodeId, +// pfxbitarr: <::AtomicPfxSize as AtomicBitmap>::InnerType, +// cursor: u8, +// _af: PhantomData, +// _s: PhantomData, +// } + +#[allow(clippy::indexing_slicing)] +pub const fn ms_prefix_mask_arr(bs: BitSpan) -> u32 { + [ + 0b_01111111111111111111111111111110, // bits = 0, len = 0 + 0b_01011001111000011111111000000000, // bits = 0, len = 1 + 0b_00100110000111100000000111111110, // bits = 1, len = 1 + 0b_00010001100000011110000000000000, // bits = 0, len = 2 + 0b_00001000011000000001111000000000, // bits = 1, len = 2 + 0b_00000100000110000000000111100000, // bits = 2, len = 2 + 0b_00000010000001100000000000011110, // bits = 3, len = 2 + 0b_00000001000000011000000000000000, // bits = 0, len = 3 + 0b_00000000100000000110000000000000, // bits = 1, len = 3 + 0b_00000000010000000001100000000000, // bits = 2, len = 3 + 0b_00000000001000000000011000000000, // bits = 3, len = 3 + 0b_00000000000100000000000110000000, // bits = 4, len = 3 + 0b_00000000000010000000000001100000, // bits = 5, len = 3 + 0b_00000000000001000000000000011000, // bits = 6, len = 3 + 0b_00000000000000100000000000000110, // bits = 7, len = 3 + 0b_00000000000000010000000000000000, // bits = 0, len = 4 + 0b_00000000000000001000000000000000, // bits = 1, len = 4 + 0b_00000000000000000100000000000000, // bits = 2, len = 4 + 0b_00000000000000000010000000000000, // bits = 3, len = 4 + 0b_00000000000000000001000000000000, // bits = 4, len = 4 + 0b_00000000000000000000100000000000, // bits = 5, len = 4 + 0b_00000000000000000000010000000000, // bits = 6, len = 4 + 0b_00000000000000000000001000000000, // bits = 7, len = 4 + 0b_00000000000000000000000100000000, // bits = 8, len = 4 + 0b_00000000000000000000000010000000, // bits = 9, len = 4 + 0b_00000000000000000000000001000000, // bits =10, len = 4 + 0b_00000000000000000000000000100000, // bits =11, len = 4 + 0b_00000000000000000000000000010000, // bits =12, len = 4 + 0b_00000000000000000000000000001000, // bits =13, len = 4 + 0b_00000000000000000000000000000100, // bits =14, len = 4 + 0b_00000000000000000000000000000010, // bits =15, len = 4 + 0b_00000000000000000000000000000000, // padding + ][(1 << bs.len) - 1 + bs.bits as usize] +} + +fn into_ptrbitarr(bitmap: u32) -> u16 { + (bitmap >> 1) as u16 +} + +fn into_pfxbitarr(bitmap: u16) -> u32 { + (bitmap as u32) << 1 +} + +fn bit_pos_from_index(i: u8) -> u32 { + 1_u32.rotate_right(1) >> i +} + +fn ptr_bit_pos_from_index(i: u8) -> u16 { + // trace!("pfx {} ptr {} strlen {}", + // <$pfxsize>::BITS, <$ptrsize>::BITS, Self::STRIDE_LEN); + trace!("PTR_BIT_POS_FROM_INDEX {i}"); + 1_u16.rotate_right(i as u32 + 2) +} + +pub(crate) fn ptr_range(ptrbitarr: u16, bs: BitSpan) -> (u16, u8) { + let start: u8 = (bs.bits << (4 - bs.len)) as u8; + let stop: u8 = start + (1 << (4 - bs.len)); + let mask: u16 = (((1_u32 << (stop as u32 - start as u32)) - 1) + .rotate_right(stop as u32) + >> 16) as u16; + if log_enabled!(log::Level::Trace) { + trace!("- mask {:032b}", mask); + trace!("- ptrbitarr {:032b}", ptrbitarr); + trace!("- shl bitar {:032b}", ptrbitarr & mask); + } + + (ptrbitarr & mask, start) +} + +// Creates an Iterator that returns all prefixes that exist in a node that +// are a more-specific prefix of the `base_prefix` + `start_bit_span`. +// +// Inputs +// +// `base_prefix` +// This iterator take a `base_prefix` since the nodes themselves have no +// knowledge of their own prefixes, those are inferred by their position in +// the tree (therefore, it's actually a Trie). Note that `base_prefix` + +// `bit_span` define the actual starting prefix for this iterator. +// +// `pfxbitarr` +// is the bitmap that holds the slots that have prefixes. +// +// `start_bit_span` +// is the bit span that is going to be used as a starting point for the +// iterator. +// +// `cursor` +// holds the current cursor offset from the start_bit_span.bits, the sum of +// these describe the current position in the bitmap. Used for re-entry into +// the iterator. A new iterator should start with None. +// +// How this works +// +// The iterator starts at the start_bit_span.bits position in the bitmap and +// advances until it reaches either a one in the bitmap, or the maximum +// position for the particular more-specifics for this bit_span. When it +// reaches the maximum position it determines whether there are more stride- +// sizes available in this bitmap. If there are, it advances to the next +// stride-size in the first position. If not it terminates the iterator. +// +// e.x. +// The stride size is 5 and the starting bit span is {bits: 1, len: 3} (001) +// This means that the stride size that we have to consider are 4 and 5. 3 +// being the size of the current bit_span and 5 being the size of the total +// stride. +// The starting point is therefore the bit_array 001. The iterator will go +// over 001 00, 001 01, 001 10 and 001 11. The next bits to consider would be +// 010 00 which would not fit our starting bit_span of 0010. So we have to +// stop after 2 iterations. This means that the number of iterations is +// determined by the difference between the number of bits in the stride size +// (5) and the the number of bits in the start_bit_span (4). The number of +// iterations in the above example is therefore 1 << (5 - 3) = 4. +// Unlike the MoreSpecificPrefixIter, we will have to consider more lengths +// than just the bit_span len. We will have to jump a few pfxbitarr bits and +// move to the next stride size in the bitmap, starting at bit_array 0010, or +// the bit_span { bits: 2, len: 3 }, a.k.a. 0010 << 1. But now we will have +// to go over a different amount of 1 << (5 - 4) = 2 iterations to reap the +// next bit_spans of 0010 0 and 0010 1. +pub(crate) struct NodeMoreSpecificsPrefixIter { + base_prefix: NodeId, + pfxbitarr: u32, +} + +impl std::iter::Iterator + for NodeMoreSpecificsPrefixIter +{ + type Item = PrefixId; + + fn next(&mut self) -> Option { + // Empty bitmap + if self.pfxbitarr == 0 { + trace!("empty pfxbitarr. This iterator is done."); + return None; + } + + let cursor = self.pfxbitarr.leading_zeros() as u8; + let bs = BitSpan::from_bit_pos_index(cursor); + trace!( + "ms prefix iterator start_bs {:?} start cursor {}", + bs, + bs.cursor_from_bit_span() + ); + trace!("pfx {:032b}", self.pfxbitarr); + let bit_pos = bs.into_bit_pos(); + let prefix_id: PrefixId = self + .base_prefix + .add_bit_span(BitSpan::from_bit_pos_index( + bit_pos.leading_zeros() as u8, + )) + .into(); + self.pfxbitarr ^= bit_pos_from_index(cursor); + Some(prefix_id) + } +} + +impl Default for TreeBitMapNode +where + AF: AddressFamily, +{ + fn default() -> Self { + Self { + ptrbitarr: AtomicPtrBitArr::new(), + pfxbitarr: AtomicPfxBitArr::new(), + _af: PhantomData, + } + } +} + +pub(crate) enum NewNodeOrIndex { + NewNode(TreeBitMapNode), + ExistingNode(NodeId), + NewPrefix, + ExistingPrefix, +} + +//--------------------- NodeId ----------------------------------------------- + +// The type that acts as the id for a node in the treebitmap and the node CHT. +// Its data structure is the same as [PrefixId], but its behaviour is subtly +// different from PrefixId, i.e. a NodeId only exists at a stride boundary, +// so it always stores multiples of 4 bits. It cannot be converted to/from +// a Prefix. + +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub(crate) struct NodeId { + bits: AF, + len: u8, +} + +impl NodeId { + pub(crate) fn dangerously_new_with_id_as_is( + addr_bits: AF, + len: u8, + ) -> Self { + Self { + bits: addr_bits, + len, + } + } + + #[inline] + pub(crate) fn new_with_cleaned_id(addr_bits: AF, len: u8) -> Self { + Self { + bits: addr_bits.truncate_to_len(len), + len, + } + } + + pub(crate) fn len(&self) -> u8 { + self.len + } + + pub(crate) fn bits(&self) -> AF { + self.bits + } + + pub(crate) fn add_to_len(mut self, len: u8) -> Self { + self.len += len; + self + } + + #[inline] + pub(crate) fn truncate_to_len(self) -> Self { + NodeId::new_with_cleaned_id(self.bits, self.len) + } + + // clean out all bits that are set beyond the len. This function should + // be used before doing any ORing to add a bitspan. + #[inline] + pub(crate) fn with_cleaned_id(&self) -> (AF, u8) { + (self.bits.truncate_to_len(self.len), self.len) + } + + pub(crate) fn add_bit_span(&self, bs: BitSpan) -> Self { + let (addr_bits, len) = self.with_cleaned_id(); + let res = addr_bits.add_bit_span(len, bs); + res.into() + } +} + +impl std::fmt::Display for NodeId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}-{}", self.bits, self.len) + } +} + +impl std::convert::From> for PrefixId { + fn from(id: NodeId) -> Self { + PrefixId::new(id.bits, id.len) + } +} + +impl From<(AF, u8)> for NodeId { + fn from(value: (AF, u8)) -> Self { + NodeId { + bits: value.0, + len: value.1, + } + } +} diff --git a/src/tree_bitmap/tree_bitmap_query.rs b/src/tree_bitmap/tree_bitmap_query.rs new file mode 100644 index 00000000..ab94c12d --- /dev/null +++ b/src/tree_bitmap/tree_bitmap_query.rs @@ -0,0 +1,651 @@ +use crate::match_options::{MatchOptions, MatchType}; +use crate::types::AddressFamily; + +use crate::rib::starcast_af_query::TreeQueryResult; + +use crate::types::PrefixId; +use crate::TreeBitMap; + +impl TreeBitMap +where + AF: AddressFamily, +{ + pub(crate) fn match_prefix( + &self, + search_pfx: PrefixId, + options: &MatchOptions, + ) -> TreeQueryResult { + let mut ls_pfxs = None; + let mut ms_pfxs = None; + let lm_pfx = self.longest_matching_prefix(search_pfx); + + let (prefix, match_type) = match options.match_type { + MatchType::ExactMatch => { + if let Some(p) = lm_pfx { + if p == search_pfx { + (lm_pfx, MatchType::ExactMatch) + } else { + (None, MatchType::EmptyMatch) + } + } else { + (None, MatchType::EmptyMatch) + } + } + _ => (lm_pfx, MatchType::LongestMatch), + }; + + if options.include_less_specifics { + ls_pfxs = Some( + self.less_specific_prefix_iter(search_pfx) + .collect::>(), + ) + } + + if options.include_more_specifics { + ms_pfxs = Some( + self.more_specific_prefix_iter_from(search_pfx) + .filter(|p| p != &search_pfx) + .collect::>(), + ); + } + + TreeQueryResult { + match_type, + prefix, + less_specifics: ls_pfxs, + more_specifics: ms_pfxs, + } + } + + // This function assembles all entries in the `pfx_vec` of all child nodes + // of the `start_node` into one vec, starting from itself and then + // recursively assembling adding all `pfx_vec`s of its children. + // fn get_all_more_specifics_for_node( + // &self, + // start_node_id: StrideNodeId, + // found_pfx_vec: &mut Vec>, + // ) { + // trace!("{:?}", self.retrieve_node(start_node_id)); + // match self.retrieve_node(start_node_id) { + // Some(SizedStrideRef::Stride3(n)) => { + // found_pfx_vec.extend( + // n.pfx_iter(start_node_id).collect::>>(), + // ); + + // for child_node in n.ptr_iter(start_node_id) { + // self.get_all_more_specifics_for_node( + // child_node, + // found_pfx_vec, + // ); + // } + // } + // Some(SizedStrideRef::Stride4(n)) => { + // found_pfx_vec.extend( + // n.pfx_iter(start_node_id).collect::>>(), + // ); + + // for child_node in n.ptr_iter(start_node_id) { + // self.get_all_more_specifics_for_node( + // child_node, + // found_pfx_vec, + // ); + // } + // } + // Some(SizedStrideRef::Stride5(n)) => { + // found_pfx_vec.extend( + // n.pfx_iter(start_node_id).collect::>>(), + // ); + + // for child_node in n.ptr_iter(start_node_id) { + // self.get_all_more_specifics_for_node( + // child_node, + // found_pfx_vec, + // ); + // } + // } + // _ => { + // panic!("can't find node {}", start_node_id); + // } + // } + // } + + // This function assembles the prefixes of a child node starting on a + // specified bit position in a ptr_vec of `current_node` into a vec, + // then adds all prefixes of these children recursively into a vec and + // returns that. + // fn get_all_more_specifics_from_nibble( + // &self, + // current_node: &TreeBitMapNode, + // nibble: u32, + // nibble_len: u8, + // base_prefix: StrideNodeId, + // ) -> Option>> { + // let (cnvec, mut msvec) = current_node.add_more_specifics_at( + // nibble, + // nibble_len, + // base_prefix, + // ); + + // for child_node in cnvec.iter() { + // self.get_all_more_specifics_for_node(*child_node, &mut msvec); + // } + // Some(msvec) + // } + + // In a LMP search we have to go over all the nibble lengths in the + // stride up until the value of the actual nibble length were looking for + // (until we reach stride length for all strides that aren't the last) + // and see if the prefix bit in that position is set. Note that this does + // not search for prefixes with length 0 (which would always match). + // So for matching a nibble 1010, we have to search for 1, 10, 101 and + // 1010 on resp. position 1, 5, 12 and 25: + // ↓ ↓ ↓ + // nibble * 0 1 00 01 10 11 000 001 010 011 100 101 110 111 + // nibble len offset 0 1 2 3 + // + // (contd.) + // pfx bit arr (u32) 15 16 17 18 19 20 21 22 23 24 + // nibble 0000 0001 0010 0011 0100 0101 0110 0111 1000 1001 + // nibble len offset 4 + // + // (contd.) ↓ + // pfx bit arr (u32) 25 26 27 28 29 30 31 + // nibble 1010 1011 1100 1101 1110 1111 x + // nibble len offset 4(contd.) + + // pub(crate) fn match_prefix_by_tree_traversal( + // &self, + // search_pfx: PrefixId, + // options: &MatchOptions, + // // guard: &'a Guard, + // ) -> TreeQueryResult { + // // --- The Default Route Prefix ------------------------------------- + + // // The Default Route Prefix unfortunately does not fit in tree as we + // // have it. There's no room for it in the pfxbitarr of the root node, + // // since that can only contain serial numbers for prefixes that are + // // children of the root node. We, however, want the default prefix + // // which lives on the root node itself! We are *not* going to return + // // all of the prefixes in the tree as more-specifics. + // if search_pfx.get_len() == 0 { + // // match self.load_default_route_prefix_serial() { + // // 0 => { + // // return QueryResult { + // // prefix: None, + // // prefix_meta: vec![], + // // match_type: MatchType::EmptyMatch, + // // less_specifics: None, + // // more_specifics: None, + // // }; + // // } + + // // _serial => { + // return TreeQueryResult { + // prefix: None, + // match_type: MatchType::EmptyMatch, + // less_specifics: None, + // more_specifics: None, + // }; + // // } + // // } + // } + + // let mut stride_end = 0; + + // let root_node_id = self.get_root_node_id(); + // let mut node = match self.get_stride_for_id(root_node_id) { + // 3 => self.retrieve_node(root_node_id).unwrap(), + // 4 => self.retrieve_node(root_node_id).unwrap(), + // _ => self.retrieve_node(root_node_id).unwrap(), + // }; + + // let mut nibble; + // let mut nibble_len; + + // //---- result values ------------------------------------------------ + + // // These result values are kept in mutable variables, and assembled + // // at the end into a QueryResult struct. This proved to result in the + // // most efficient code, where we don't have to match on + // // SizedStrideNode over and over. The `match_type` field in the + // // QueryResult is computed at the end. + + // // The final prefix + // let mut match_prefix_idx: Option> = None; + + // // The indexes of the less-specifics + // let mut less_specifics_vec = if options.include_less_specifics { + // Some(Vec::>::new()) + // } else { + // None + // }; + + // // The indexes of the more-specifics. + // let mut more_specifics_vec = if options.include_more_specifics { + // Some(Vec::>::new()) + // } else { + // None + // }; + + // //---- Stride Processing -------------------------------------------- + + // // We're going to iterate over all the strides in the treebitmap (so + // // up to the last bit in the max prefix length for that tree). When + // // a final prefix is found or we get to the end of the strides, + // // depending on the options.match_type (the type requested by the + // // user). we ALWAYS break out of the loop. WE ALWAYS BREAK OUT OF THE + // // LOOP. Just before breaking some processing is done inside the loop + // // before the break (looking up more-specifics mainly), which looks a + // // bit repetitious, but again it's been done like that to avoid + // // having to match over a SizedStrideNode again in the + // // `post-processing` section. + + // for stride in self.get_stride_sizes() { + // stride_end += stride; + + // let last_stride = search_pfx.get_len() < stride_end; + + // nibble_len = if last_stride { + // stride + search_pfx.get_len() - stride_end + // } else { + // *stride + // }; + + // // Shift left and right to set the bits to zero that are not + // // in the nibble we're handling here. + // nibble = AddressFamily::get_nibble( + // search_pfx.get_net(), + // stride_end - stride, + // nibble_len, + // ); + + // match node { + // SizedStrideRef::Stride3(current_node) => { + // let search_fn = match options.match_type { + // MatchType::ExactMatch => { + // if options.include_less_specifics { + // TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at + // } else { + // TreeBitMapNode::search_stride_for_exact_match_at + // } + // } + // MatchType::LongestMatch => { + // TreeBitMapNode::search_stride_for_longest_match_at + // } + // MatchType::EmptyMatch => { + // TreeBitMapNode::search_stride_for_longest_match_at + // } + // }; + + // // This whole match assumes that: + // // - if the first value in the return tuple of + // // `search_fn` holds a value, then we need to continue + // // searching by following the node contained in the + // // value. + // // - The second value in the tuple holds the prefix that + // // was found. + // // The less_specifics_vec is mutated by `search_fn` to + // // hold the prefixes found along the way, in the cases + // // where `include_less_specifics` was requested by the + // // user. + // match search_fn( + // current_node, + // search_pfx, + // nibble, + // nibble_len, + // stride_end - stride, + // &mut less_specifics_vec, + // ) { + // // This and the next match will handle all + // // intermediary nodes, but they might also handle + // // exit nodes. + // (Some(n), Some(pfx_idx)) => { + // match_prefix_idx = Some(pfx_idx); + // node = self.retrieve_node(n).unwrap(); + + // if last_stride { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // break; + // } + // } + // (Some(n), None) => { + // node = self.retrieve_node(n).unwrap(); + + // if last_stride { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // break; + // } + // } + // // This handles exact and longest matches: there are + // // no more children, but there is a prefix on this + // // node. + // (None, Some(pfx_idx)) => { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // match_prefix_idx = Some(pfx_idx); + // break; + // } + // // This handles cases where there's no prefix (and no + // // child) for exact match or longest match, the empty + // // match - which doesn't care about actually finding + // // a prefix - just continues in search of + // // more-specifics. + // (None, None) => { + // match options.match_type { + // MatchType::EmptyMatch => { + // // To make sure we don't process this + // // match arm more then once, we return + // // early here. + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + + // match_prefix_idx = None; + // break; + // } + // MatchType::LongestMatch => {} + // MatchType::ExactMatch => { + // match_prefix_idx = None; + // } + // } + // break; + // } + // } + // } + // //---- From here only repetitions for all strides ----------- + // // For comments see the code above for the Stride3 arm. + // SizedStrideRef::Stride4(current_node) => { + // let search_fn = match options.match_type { + // MatchType::ExactMatch => { + // if options.include_less_specifics { + // TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at + // } else { + // TreeBitMapNode::search_stride_for_exact_match_at + // } + // } + // MatchType::LongestMatch => { + // TreeBitMapNode::search_stride_for_longest_match_at + // } + // MatchType::EmptyMatch => { + // TreeBitMapNode::search_stride_for_longest_match_at + // } + // }; + // match search_fn( + // current_node, + // search_pfx, + // nibble, + // nibble_len, + // stride_end - stride, + // &mut less_specifics_vec, + // ) { + // (Some(n), Some(pfx_idx)) => { + // match_prefix_idx = Some(pfx_idx); + // node = self.retrieve_node(n).unwrap(); + + // if last_stride { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // break; + // } + // } + // (Some(n), None) => { + // node = self.retrieve_node(n).unwrap(); + + // if last_stride { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // break; + // } + // } + // (None, Some(pfx_idx)) => { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // match_prefix_idx = Some(pfx_idx); + // break; + // } + // (None, None) => { + // match options.match_type { + // MatchType::EmptyMatch => { + // // To make sure we don't process this + // // match arm more then once, we return + // // early here. + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + + // match_prefix_idx = None; + // break; + // } + // MatchType::LongestMatch => {} + // MatchType::ExactMatch => { + // match_prefix_idx = None; + // } + // } + // break; + // } + // } + // } + // SizedStrideRef::Stride5(current_node) => { + // let search_fn = match options.match_type { + // MatchType::ExactMatch => { + // if options.include_less_specifics { + // TreeBitMapNode::search_stride_for_exact_match_with_less_specifics_at + // } else { + // TreeBitMapNode::search_stride_for_exact_match_at + // } + // } + // MatchType::LongestMatch => { + // TreeBitMapNode::search_stride_for_longest_match_at + // } + // MatchType::EmptyMatch => { + // TreeBitMapNode::search_stride_for_longest_match_at + // } + // }; + // match search_fn( + // current_node, + // search_pfx, + // nibble, + // nibble_len, + // stride_end - stride, + // &mut less_specifics_vec, + // ) { + // (Some(n), Some(pfx_idx)) => { + // match_prefix_idx = Some(pfx_idx); + // node = self.retrieve_node(n).unwrap(); + + // if last_stride { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // break; + // } + // } + // (Some(n), None) => { + // node = self.retrieve_node(n).unwrap(); + + // if last_stride { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // break; + // } + // } + // (None, Some(pfx_idx)) => { + // if options.include_more_specifics { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + // } + // match_prefix_idx = Some(pfx_idx); + // break; + // } + // (None, None) => { + // match options.match_type { + // MatchType::EmptyMatch => { + // more_specifics_vec = self + // .get_all_more_specifics_from_nibble( + // current_node, + // nibble, + // nibble_len, + // StrideNodeId::new_with_cleaned_id( + // search_pfx.get_net(), + // stride_end - stride, + // ), + // ); + + // match_prefix_idx = None; + // break; + // } + // MatchType::LongestMatch => {} + // MatchType::ExactMatch => { + // match_prefix_idx = None; + // } + // } + // break; + // } + // } + // } + // } + // } + // //------------------ end of Stride branch arm repetition ------------ + + // //------------------ post-processing -------------------------------- + + // // If the above loop finishes (so not hitting a break) we have + // // processed all strides and have found a child node and maybe a + // // prefix. Now we will look up more-specifics for longest-matching + // // prefixes that were found in the last stride only. Note that still + // // any of the match_types (as specified by the user, not the return + // // type) may end up here. + + // let match_type = if let Some(prefix) = match_prefix_idx { + // if prefix.get_len() == search_pfx.get_len() { + // MatchType::ExactMatch + // } else { + // MatchType::LongestMatch + // } + // } else { + // MatchType::EmptyMatch + // }; + + // TreeQueryResult { + // prefix: match_prefix_idx, + // match_type, + // less_specifics: if options.include_less_specifics { + // less_specifics_vec.map(|lsv| { + // lsv.into_iter() + // .filter(|r| r != &search_pfx) + // .collect::>() + // }) + // } else { + // None + // }, + // more_specifics: if options.include_more_specifics { + // more_specifics_vec + // } else { + // None + // }, + // } + // } +} diff --git a/src/types/af.rs b/src/types/af.rs new file mode 100644 index 00000000..b1a8b991 --- /dev/null +++ b/src/types/af.rs @@ -0,0 +1,227 @@ +use log::trace; +use zerocopy::{NetworkEndian, U128, U32}; + +use crate::types::BitSpan; + +//------------ AddressFamily (trait) ---------------------------------------- +// +/// The address family of an IP address as a Trait. +/// +/// The idea of this trait is that each family will have a separate type to be +/// able to only take the exact amount of memory needed. Useful when building +/// trees with large amounts of addresses/prefixes. Used by rotonda-store for +/// this purpose. +pub trait AddressFamily: + std::fmt::Binary + + std::fmt::Debug + + std::hash::Hash + + std::fmt::Display + + Eq + + std::ops::BitAnd + + std::ops::BitOr + + std::ops::Shr + + std::ops::Shl + + std::ops::Shl + + std::ops::Sub + + Copy + + Ord + + zerocopy::FromBytes + + zerocopy::IntoBytes + + zerocopy::KnownLayout + + zerocopy::Immutable + + zerocopy::Unaligned +{ + /// The number of bits in the byte representation of the family. + const BITS: u8; + + /// The type actually holding the value, u32 for IPv4, and u128 for IPv6. + type Inner: Into + From + From; + + /// The std::net that the value of self belongs to. So, + /// [std::net::Ipv4Addr], and [std::net::Ipv6Addr] for IPv4, and IPv6 + /// respectively. + type InnerIpAddr; + + fn new(value: Self::Inner) -> Self { + value.into() + } + + fn from_ipaddr(ip_addr: Self::InnerIpAddr) -> Self; + + fn from_u32(value: u32) -> Self; + fn from_u8(value: u8) -> Self; + + fn zero() -> Self; + + // returns the specified nibble from `start_bit` to (and including) + // `start_bit + len` and shifted to the right. + fn into_bit_span(net: Self, start_bit: u8, len: u8) -> BitSpan; + + /// Treat self as a prefix and append the given bitspan to it. + fn add_bit_span(self, len: u8, bs: BitSpan) -> (Self, u8); + + /// fill the bits after the specified len with zeros. Interpreted as an IP + /// Prefix, this means that self will be truncated to the specified len. + fn truncate_to_len(self, len: u8) -> Self; + + /// Turn self in to a [std::net::IpAddr]. + fn into_ipaddr(self) -> std::net::IpAddr; + + /// Truncate self to a u32. For IPv4 this is a NOP. For IPv6 this + /// truncates to 32 bits. + fn dangerously_truncate_to_u32(self) -> u32; + + // For the sake of searching for 0/0, check the the right shift, since + // since shifting with MAXLEN (32 in Ipv4, or 128 in IPv6) will panic + // in debug mode. A failed check will simply retutrn zero. Used in + // finding node_ids (always zero for 0/0). + fn checked_shr_or_zero(self, rhs: u32) -> Self; +} + +//-------------- Ipv4 Type -------------------------------------------------- + +/// Exactly fitting IPv4 bytes (4 octets). +pub type IPv4 = zerocopy::U32; + +impl AddressFamily for IPv4 { + const BITS: u8 = 32; + type Inner = u32; + type InnerIpAddr = std::net::Ipv4Addr; + + fn zero() -> Self { + 0.into() + } + + fn from_u8(value: u8) -> Self { + IPv4::from([0, 0, 0, value]) + } + + fn from_u32(value: u32) -> Self { + IPv4::from(value) + } + + fn from_ipaddr(ip_addr: Self::InnerIpAddr) -> Self { + IPv4::from(ip_addr.octets()) + } + + fn into_bit_span(net: Self, start_bit: u8, len: u8) -> BitSpan { + BitSpan { + bits: ((net << >::from(start_bit as u32)) + >> >::from(((32 - len) % 32) as u32)) + .into(), + len, + } + } + + fn add_bit_span(self, len: u8, bs: BitSpan) -> (U32, u8) { + let res = self | (bs.bits << (32 - len - bs.len) as usize); + (res, len + bs.len) + } + + fn into_ipaddr(self) -> std::net::IpAddr { + std::net::IpAddr::V4(std::net::Ipv4Addr::from(u32::from(self))) + } + + fn dangerously_truncate_to_u32(self) -> u32 { + // not dangerous at all. + self.into() + } + + fn truncate_to_len(self, len: u8) -> Self { + self & ((1_u32.rotate_right(len as u32) + ^ 1_u32.saturating_sub(len as u32)) + .wrapping_sub(1) + ^ u32::MAX) + } + + fn checked_shr_or_zero(self, rhs: u32) -> Self { + trace!("CHECKED_SHR_OR_ZERO {} >> {}", u32::from(self), rhs); + if rhs == 0 || rhs == 32 { + return 0.into(); + } + self >> U32::::from(rhs) + } +} + +//-------------- Ipv6 Type -------------------------------------------------- + +/// Exactly fitting IPv6 bytes (16 octets). +pub type IPv6 = U128; + +impl AddressFamily for IPv6 { + // const BITMASK: u128 = 0x1u128.rotate_right(1); + const BITS: u8 = 128; + type Inner = u128; + type InnerIpAddr = std::net::Ipv6Addr; + + fn zero() -> Self { + 0.into() + } + + fn from_ipaddr(ip_addr: Self::InnerIpAddr) -> Self { + IPv6::from(ip_addr.octets()) + } + + fn into_bit_span(net: Self, start_bit: u8, len: u8) -> BitSpan { + BitSpan { + bits: u128::from( + (net << >::from(start_bit as u128)) + >> (>::from(128 - len as u128) % 128), + ) as u32, + len, + } + } + + fn add_bit_span(self, len: u8, bs: BitSpan) -> (Self, u8) { + let res = self | ((bs.bits as u128) << (128 - len - bs.len) as usize); + (res, len + bs.len) + } + + fn truncate_to_len(self, len: u8) -> Self { + self & ((1_u128.rotate_right(len as u32) + ^ 1_u128.saturating_sub(len as u128)) + .wrapping_sub(1) + ^ u128::MAX) + } + + fn into_ipaddr(self) -> std::net::IpAddr { + std::net::IpAddr::V6(std::net::Ipv6Addr::from(u128::from(self))) + } + + fn dangerously_truncate_to_u32(self) -> u32 { + // this will chop off the high bits. + u128::from(self) as u32 + } + + fn checked_shr_or_zero(self, rhs: u32) -> Self { + if rhs == 0 || rhs == 128 { + return U128::from(0); + }; + + self >> U128::from(rhs as u128) + } + + fn from_u8(value: u8) -> Self { + IPv6::from([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, value]) + } + + fn from_u32(value: u32) -> Self { + (value as u128).into() + } +} + +pub trait IntoIpAddr { + fn into_ipaddr(self) -> std::net::IpAddr; +} + +impl IntoIpAddr for u32 { + fn into_ipaddr(self) -> std::net::IpAddr { + std::net::IpAddr::V4(std::net::Ipv4Addr::from(self)) + } +} + +impl IntoIpAddr for u128 { + fn into_ipaddr(self) -> std::net::IpAddr { + std::net::IpAddr::V6(std::net::Ipv6Addr::from(self)) + } +} diff --git a/src/types/bit_span.rs b/src/types/bit_span.rs new file mode 100644 index 00000000..aa8433ba --- /dev/null +++ b/src/types/bit_span.rs @@ -0,0 +1,70 @@ +use crate::rib::BIT_SPAN_SIZE; + +//------------ BitSpan ------------------------------------------------------- + +// A bitspan is a bunch of bits representing the last stride in a NodeId +// or PrefixId, as such it can have a length of 1, 2, or 3 bits, in a stride +// length of 4 bits (which is the hard-coded value for all of the store +// currently). +// +// We are storing these bits in a u32, which may seem to be wasting space +// on first glance. However: +// - this bitspan is never stored in the store as +// such, it is used for intermediary calculations. The assumption is that +// modern CPUs always throw around values aligned on 4 bytes. +// - even if wanted to optimise for space, we have to take into account that +// we need to shift right and left beyond the size of the final result of a +// series of calculations. +#[derive(Copy, Clone, Debug)] +pub struct BitSpan { + pub bits: u32, + pub len: u8, +} + +impl BitSpan { + pub(crate) fn new(bits: u32, len: u8) -> Self { + Self { bits, len } + } + + // Deep, dark, black magic. Calculate the bit span from the index in a + // bitarr. This is used by iterators, so they can have one sequential i + // loop, that goes over all positions in a bitarr by its indexes. + pub fn from_bit_pos_index(mut i: u8) -> Self { + let bits = i as u32; + i += 1; + i |= i >> 1; + i |= i >> 2; + i |= i >> 3; + i = (i >> 1).count_ones() as u8; + Self { + bits: bits - ((1 << i as u32) - 1), + len: i, + } + } + + pub(crate) fn check(&self) -> bool { + if self.len == 0 && self.bits == 0 { + return true; + }; + self.len < 5 + && self.bits < 16 + && (self.bits << (32 - self.len)) >> (32 - self.len) == self.bits + } + + pub(crate) fn into_bit_pos(self) -> u32 { + 1 << (BIT_SPAN_SIZE + - ((1 << self.len) - 1) as u8 + - self.bits as u8 + - 1) + } + + pub(crate) fn cursor_from_bit_span(self) -> u8 { + self.into_bit_pos().leading_zeros() as u8 + } +} + +impl std::fmt::Binary for BitSpan { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:032b} (len {})", self.bits, self.len) + } +} diff --git a/src/types/errors.rs b/src/types/errors.rs new file mode 100644 index 00000000..252e4f78 --- /dev/null +++ b/src/types/errors.rs @@ -0,0 +1,143 @@ +use std::fmt; + +/// Possible errors returned by methods on a RIB. Most of these errors are +// recoverable, there is one variant [PrefixStoreError::FatalError] that is +// unrecoverable, like the stand-alone type. +#[derive(Debug, PartialEq, Eq)] +pub enum PrefixStoreError { + /// There is too much contention while creating a node: the store has + /// given up. The method or function returning this error can be safely + /// retries. + NodeCreationMaxRetryError, + /// A node that does not exist (yet), maybe due to contention. The + ///function or method causing this error can be safely retried. + NodeNotFound, + /// The method returning this error presupposes a condition that has not + /// been met, and may never be met. Retrying is safe, but may result in + /// the same error. Therefore it should probably be retried only once. + StoreNotReadyError, + /// An unrecoverable error occurred, most probably during disk IO, or a + /// poisoned lock while writing. The store is probably corrupt. The caller + /// should terminate the store, and probably also terminate itself. This + /// error variant is the same as the `FatalError` type, but is used as a + /// return for methods that can also return non-fatal errors. + FatalError, + /// A best path was requested, but the selection procedure was performed + /// on a route set that is now stale. A new best path calculation over the + /// set should be performed before retrying. + PathSelectionOutdated, + /// The requested prefix was not found in the store. + PrefixNotFound, + /// The requested prefix length cannot exist. + PrefixLengthInvalid, + /// A best path was requested, but it was never calculated. Perform a best + ///path selection first, before retrying. + BestPathNotFound, + /// A record was specifically requested from the in-memory data structure, + /// but the record is not in memory. It may be persisted to disk. + RecordNotInMemory, + /// The method returning this error was trying to persist records to disk + /// but failed. Retrying is safe, but may be yield the same result. + PersistFailed, + /// A status for a record was requested, but it was never set. + StatusUnknown, +} + +impl std::error::Error for PrefixStoreError {} + +impl fmt::Display for PrefixStoreError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + PrefixStoreError::NodeCreationMaxRetryError => write!( + f, + "Error: Maximum number of retries for node creation reached." + ), + PrefixStoreError::NodeNotFound => { + write!(f, "Error: Node not found.") + } + PrefixStoreError::StoreNotReadyError => { + write!(f, "Error: Store isn't ready yet.") + } + PrefixStoreError::PathSelectionOutdated => { + write!( + f, + "Error: The Path Selection process is based on \ + outdated paths." + ) + } + PrefixStoreError::PrefixNotFound => { + write!(f, "Error: The Prefix cannot be found.") + } + PrefixStoreError::PrefixLengthInvalid => { + write!(f, "Error: The specified Prefix length is invalid.") + } + PrefixStoreError::BestPathNotFound => { + write!( + f, + "Error: The Prefix does not have a stored best path." + ) + } + PrefixStoreError::RecordNotInMemory => { + write!( + f, + "Error: The Record for this (prefix, mui) is not in \ + memory." + ) + } + PrefixStoreError::PersistFailed => { + write!( + f, + "Error: The record for this (prefix, mui) cannot be \ + persisted." + ) + } + PrefixStoreError::StatusUnknown => { + write!( + f, + "Warning: The record is persisted, but the upsert \ + counters cannot be reported for persist only strategy." + ) + } + PrefixStoreError::FatalError => { + write!( + f, + "FATAL: An unrecoverable error occurred during disk I/O \ + or writing memory. All data in the store should be \ + considered corrupy and the application should terminate." + ) + } + } + } +} + +/// An unrecoverable error, that can occur during disk I/O or writing memory. +/// All data in the store should be considered corrupy and the application +/// receiving this error should probably terminate. +#[derive(Debug, Copy, Clone)] +pub struct FatalError; + +impl std::fmt::Display for FatalError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "Error: A Fatal error has occurred. The store must be considered \ + corrupted. The application should terminate." + ) + } +} + +pub type FatalResult = Result; + +impl std::error::Error for FatalError { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + None + } + + fn description(&self) -> &str { + "description() is deprecated; use Display" + } + + fn cause(&self) -> Option<&dyn std::error::Error> { + self.source() + } +} diff --git a/src/types/match_options.rs b/src/types/match_options.rs new file mode 100644 index 00000000..49c9caea --- /dev/null +++ b/src/types/match_options.rs @@ -0,0 +1,160 @@ +use crate::types::{prefix_record::RecordSet, Record}; +use std::fmt; + +use inetnum::addr::Prefix; + +use super::prefix_record::Meta; + +//------------ MatchOptions / MatchType ------------------------------------- + +/// Options for the `match_prefix` method +/// +/// The `MatchOptions` struct is used to specify the options for the +/// `match_prefix` method on the store. +/// +/// Note that the `match_type` field may be different from the actual +/// `MatchType` returned from the result. +/// +/// See [crate::rib::StarCastRib::match_prefix] for more details. +#[derive(Debug, Clone)] +pub struct MatchOptions { + /// The requested [MatchType] + pub match_type: MatchType, + /// Unused + pub include_withdrawn: bool, + /// Whether to include all less-specific records in the query result + pub include_less_specifics: bool, + // Whether to include all more-specific records in the query result + pub include_more_specifics: bool, + /// Whether to return records for a specific multi_uniq_id, None indicates + /// all records. + pub mui: Option, + /// Whether to include historical records, i.e. records that have been + /// superceded by updates. `SearchPrefix` means only historical records + /// for the search prefix will be included (if present), `All` means + /// all retrieved prefixes, i.e. next to the search prefix, also the + /// historical records for less and more specific prefixes will be + /// included. + pub include_history: IncludeHistory, +} + +/// Option to set the match type for a prefix match. Type can be Exact, +/// Longest, or Empty. The match type only applies to the `prefix` and +/// `records` fields in the [QueryResult] that is returned by a +/// [StarCastRib::match_prefix()] query. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum MatchType { + /// Only return the requested prefix, and the associated records, if the + /// requested prefix exactly matches the found prefix(es) (if any). + ExactMatch, + /// Return the longest matching prefix for the requested prefix (if + /// any). May match the prefix exactly. + LongestMatch, + /// Return the longest matching prefix, or none at all. + EmptyMatch, +} + +impl MatchType { + pub fn is_empty(&self) -> bool { + matches!(self, Self::EmptyMatch) + } +} + +impl std::fmt::Display for MatchType { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + MatchType::ExactMatch => write!(f, "exact-match"), + MatchType::LongestMatch => write!(f, "longest-match"), + MatchType::EmptyMatch => write!(f, "empty-match"), + } + } +} + +/// Match option to indicate that the result should return historical records, +/// for the requested prefixes and the more- and less-specific prefixes. This +/// option is ignored if the persist strategy config option is anythin other +/// than `PersistHistory` or WriteAhead`. +#[derive(Copy, Clone, Debug, Eq, PartialEq)] +pub enum IncludeHistory { + /// Do not return any historical records. + None, + /// Return historical records for the requested prefix only. + SearchPrefix, + /// Return historical records for all prefixes in the result. + All, +} + +//------------- QueryResult ------------------------------------------------- + +/// The type that is returned by a query. +/// +/// This is the result type of a query. It contains the prefix record that was +/// found in the store, as well as less- or more-specifics as requested. +/// +/// See [crate::rib::StarCastRib::match_prefix] for more details. + +#[derive(Clone, Debug)] +pub struct QueryResult { + /// The match type of the resulting prefix + pub match_type: MatchType, + /// The resulting prefix record + pub prefix: Option, + /// The meta data associated with the resulting prefix record + pub records: Vec>, + /// The less-specifics of the resulting prefix together with their meta + /// data + pub less_specifics: Option>, + /// The more-specifics of the resulting prefix together with their meta + //// data + pub more_specifics: Option>, +} + +impl QueryResult { + pub fn empty() -> Self { + QueryResult { + match_type: MatchType::EmptyMatch, + prefix: None, + records: vec![], + less_specifics: None, + more_specifics: None, + } + } +} + +impl fmt::Display for QueryResult { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let pfx_str = match self.prefix { + Some(pfx) => format!("{}", pfx), + None => "".to_string(), + }; + // let pfx_meta_str = match &self.prefix_meta { + // Some(pfx_meta) => format!("{}", pfx_meta), + // None => "".to_string(), + // }; + writeln!(f, "match_type: {}", self.match_type)?; + writeln!(f, "prefix: {}", pfx_str)?; + write!(f, "meta: [ ")?; + for rec in &self.records { + write!(f, "{},", rec)?; + } + writeln!(f, " ]")?; + writeln!( + f, + "less_specifics: {{ {} }}", + if let Some(ls) = self.less_specifics.as_ref() { + format!("{}", ls) + } else { + "".to_string() + } + )?; + writeln!( + f, + "more_specifics: {{ {} }}", + if let Some(ms) = self.more_specifics.as_ref() { + format!("{}", ms) + } else { + "".to_string() + } + ) + } +} diff --git a/src/types/mod.rs b/src/types/mod.rs new file mode 100644 index 00000000..6b1f3560 --- /dev/null +++ b/src/types/mod.rs @@ -0,0 +1,18 @@ +mod bit_span; +mod prefix_id; +mod tests; + +pub(crate) mod af; +pub mod match_options; +pub mod prefix_record; +pub(crate) mod route_status; + +pub(crate) use af::AddressFamily; +pub(crate) use bit_span::BitSpan; +pub(crate) use prefix_id::PrefixId; +pub(crate) use prefix_record::Record; +pub(crate) use route_status::RouteStatus; + +pub mod errors; +pub mod stats; +pub mod test_types; diff --git a/src/types/prefix_id.rs b/src/types/prefix_id.rs new file mode 100644 index 00000000..819e2d0d --- /dev/null +++ b/src/types/prefix_id.rs @@ -0,0 +1,105 @@ +use zerocopy::FromBytes; + +use crate::AddressFamily; + +//------------ PrefixId ------------------------------------------------------ + +// The type that acts both as an id for every prefix node in the prefix CHT, +// and as the internal prefix type. It's cut to size for an AF, unlike the +// inetnum Prefix, as not to waste memory. We use the latter on the public +// API. + +#[derive( + Hash, + Eq, + PartialEq, + Debug, + Copy, + Clone, + zerocopy::FromBytes, + zerocopy::IntoBytes, + zerocopy::KnownLayout, + zerocopy::Immutable, + zerocopy::Unaligned, +)] +#[repr(C)] +pub struct PrefixId { + // DO NOT CHANGE THE ORDER OF THESE FIELDS! + // zerocopy uses this to concatenate the bytes in this order, and the + // lsm_tree needs to have `len` first, and `net` second to create keys + // that are correctly sorted on prefix length. + len: u8, + bits: AF, +} + +impl PrefixId { + pub(crate) fn new(net: AF, len: u8) -> Self { + PrefixId { len, bits: net } + } + + pub(crate) fn bits(&self) -> AF { + self.bits + } + + pub(crate) fn len(&self) -> u8 { + self.len + } + + pub(crate) fn truncate_to_len(self, len: u8) -> Self { + Self { + bits: self.bits.truncate_to_len(len), + len, + } + } +} + +// There is no reasonable way for this to panic, PrefixId and inetnum's Prefix +// represent the same data in slightly different ways. +#[allow(clippy::unwrap_used)] +impl From for PrefixId { + fn from(value: inetnum::addr::Prefix) -> Self { + Self { + bits: match value.addr() { + std::net::IpAddr::V4(addr) => { + *AF::try_ref_from_bytes(&addr.octets()).unwrap() + } + std::net::IpAddr::V6(addr) => { + *AF::try_ref_from_bytes(&addr.octets()).unwrap() + } + }, + len: value.len(), + } + } +} + +// There is no reasonable way for this to panic, PrefixId and inetnum's Prefix +// represent the same data in slightly different ways. +#[allow(clippy::unwrap_used)] +impl From> for inetnum::addr::Prefix { + fn from(value: PrefixId) -> Self { + Self::new(value.bits().into_ipaddr(), value.len()).unwrap() + } +} + +#[allow(clippy::unwrap_used, clippy::indexing_slicing)] +impl From<[u8; PREFIX_SIZE]> + for PrefixId +{ + fn from(value: [u8; PREFIX_SIZE]) -> Self { + Self { + // This cannot panic for values of PREFIX_SIZE greater than 1 + bits: *AF::ref_from_bytes(&value.as_slice()[1..]).unwrap(), + len: value[0], + } + } +} + +#[allow(clippy::unwrap_used)] +impl<'a, AF: AddressFamily, const PREFIX_SIZE: usize> + From<&'a [u8; PREFIX_SIZE]> for &'a PrefixId +{ + fn from(value: &'a [u8; PREFIX_SIZE]) -> Self { + // This cannot panic for values of PREFIX_SIZE greater than 1 + PrefixId::ref_from_bytes(value.as_slice()).unwrap() + } +} diff --git a/src/types/prefix_record.rs b/src/types/prefix_record.rs new file mode 100644 index 00000000..f7abd18c --- /dev/null +++ b/src/types/prefix_record.rs @@ -0,0 +1,342 @@ +use std::fmt; +use std::fmt::Debug; + +use crate::{errors::FatalError, types::AddressFamily}; +use inetnum::addr::Prefix; +use zerocopy::{Immutable, IntoBytes, KnownLayout, TryFromBytes, Unaligned}; + +use super::PrefixId; + +pub use super::route_status::RouteStatus; + +//------------ Meta ---------------------------------------------------------- + +/// Trait for types that can be used as metadata of a record +pub trait Meta +where + Self: fmt::Debug + + fmt::Display + + Clone + + Sized + + Send + + Sync + + AsRef<[u8]> + + From>, +{ + type Orderable<'a>: Ord + where + Self: 'a; + type TBI: Copy; + + fn as_orderable(&self, tbi: Self::TBI) -> Self::Orderable<'_>; +} + +//------------ PublicRecord -------------------------------------------------- + +#[derive(Clone, Debug)] +pub struct Record { + pub multi_uniq_id: u32, + pub ltime: u64, + pub status: RouteStatus, + pub meta: M, +} + +impl Record { + pub fn new( + multi_uniq_id: u32, + ltime: u64, + status: RouteStatus, + meta: M, + ) -> Self { + Self { + meta, + multi_uniq_id, + ltime, + status, + } + } +} + +impl std::fmt::Display for Record { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{{ mui: {}, ltime: {}, status: {}, meta: {} }}", + self.multi_uniq_id, self.ltime, self.status, self.meta + ) + } +} + +#[derive(KnownLayout, Immutable, Unaligned, IntoBytes, TryFromBytes)] +#[repr(C, packed)] +pub(crate) struct ZeroCopyRecord { + pub prefix: PrefixId, + pub multi_uniq_id: u32, + pub ltime: u64, + pub status: RouteStatus, + pub meta: [u8], +} + +impl ZeroCopyRecord { + pub(crate) fn from_bytes(b: &[u8]) -> Result<&Self, FatalError> { + Self::try_ref_from_bytes(b).map_err(|_| FatalError) + } +} + +impl std::fmt::Display + for ZeroCopyRecord +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mui = self.multi_uniq_id; + let ltime = self.ltime; + write!( + f, + "{{ mui: {}, ltime: {}, status: {}, meta: {:?} }}", + mui, ltime, self.status, &self.meta + ) + } +} + +#[derive(KnownLayout, Immutable, Unaligned, IntoBytes, TryFromBytes)] +#[repr(C, packed)] +pub(crate) struct ValueHeader { + pub ltime: u64, + pub status: RouteStatus, +} + +impl std::fmt::Display for ValueHeader { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let ltime = self.ltime; + write!(f, "{{ ltime: {}, status: {} }}", ltime, self.status,) + } +} + +//------------ PublicPrefixRecord -------------------------------------------- + +#[derive(Clone, Debug)] +pub struct PrefixRecord { + pub prefix: Prefix, + pub meta: Vec>, +} + +impl PrefixRecord { + pub fn new(prefix: Prefix, meta: Vec>) -> Self { + Self { prefix, meta } + } + + pub fn get_record_for_mui(&self, mui: u32) -> Option<&Record> { + self.meta.iter().find(|r| r.multi_uniq_id == mui) + } +} + +impl From<(PrefixId, Vec>)> for PrefixRecord +where + AF: AddressFamily, + M: Meta, +{ + fn from(record: (PrefixId, Vec>)) -> Self { + Self { + prefix: record.0.into(), + meta: record.1, + } + } +} + +impl std::fmt::Display for PrefixRecord { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}: [", self.prefix)?; + for rec in &self.meta { + write!(f, "{},", rec)?; + } + write!(f, "]") + } +} + +impl From<(Prefix, Vec>)> for PrefixRecord { + fn from((prefix, meta): (Prefix, Vec>)) -> Self { + Self { prefix, meta } + } +} + +//------------ RecordSet ----------------------------------------------------- + +#[derive(Clone, Debug)] +pub struct RecordSet { + pub v4: Vec>, + pub v6: Vec>, +} + +impl RecordSet { + pub fn new() -> Self { + Self { + v4: Default::default(), + v6: Default::default(), + } + } + + pub fn push(&mut self, prefix: Prefix, meta: Vec>) { + match prefix.addr() { + std::net::IpAddr::V4(_) => &mut self.v4, + std::net::IpAddr::V6(_) => &mut self.v6, + } + .push(PrefixRecord::new(prefix, meta)); + } + + pub fn is_empty(&self) -> bool { + self.v4.is_empty() && self.v6.is_empty() + } + + pub fn iter(&self) -> RecordSetIter { + RecordSetIter { + v4: if self.v4.is_empty() { + None + } else { + Some(self.v4.iter()) + }, + v6: self.v6.iter(), + } + } + + #[must_use] + pub fn reverse(mut self) -> RecordSet { + self.v4.reverse(); + self.v6.reverse(); + self + } + + pub fn len(&self) -> usize { + self.v4.len() + self.v6.len() + } +} + +impl Default for RecordSet { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Display for RecordSet { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let arr_str_v4 = + self.v4.iter().fold("".to_string(), |pfx_arr, pfx| { + format!("{} {}", pfx_arr, *pfx) + }); + let arr_str_v6 = + self.v6.iter().fold("".to_string(), |pfx_arr, pfx| { + format!("{} {}", pfx_arr, *pfx) + }); + + write!(f, "V4: [{}], V6: [{}]", arr_str_v4, arr_str_v6) + } +} + +impl From<(Vec>, Vec>)> + for RecordSet +{ + fn from((v4, v6): (Vec>, Vec>)) -> Self { + Self { v4, v6 } + } +} + +impl std::iter::FromIterator> for RecordSet { + fn from_iter>>(iter: I) -> Self { + let mut v4 = vec![]; + let mut v6 = vec![]; + for pfx in iter { + let u_pfx = pfx.prefix; + match u_pfx.addr() { + std::net::IpAddr::V4(_) => { + v4.push(PrefixRecord::new(u_pfx, pfx.meta)); + } + std::net::IpAddr::V6(_) => { + v6.push(PrefixRecord::new(u_pfx, pfx.meta)); + } + } + } + Self { v4, v6 } + } +} + +impl + std::iter::FromIterator<(PrefixId, Vec>)> for RecordSet +{ + fn from_iter, Vec>)>>( + iter: I, + ) -> Self { + let mut v4 = vec![]; + let mut v6 = vec![]; + for pfx in iter { + let u_pfx = Prefix::from(pfx.0); + match u_pfx.addr() { + std::net::IpAddr::V4(_) => { + v4.push(PrefixRecord::new(u_pfx, pfx.1)); + } + std::net::IpAddr::V6(_) => { + v6.push(PrefixRecord::new(u_pfx, pfx.1)); + } + } + } + Self { v4, v6 } + } +} + +impl<'a, M: Meta + 'a> std::iter::FromIterator<&'a PrefixRecord> + for RecordSet +{ + fn from_iter>>( + iter: I, + ) -> Self { + let mut v4 = vec![]; + let mut v6 = vec![]; + for pfx in iter { + let u_pfx = pfx.prefix; + match u_pfx.addr() { + std::net::IpAddr::V4(_) => { + v4.push(PrefixRecord::new(u_pfx, pfx.meta.clone())); + } + std::net::IpAddr::V6(_) => { + v6.push(PrefixRecord::new(u_pfx, pfx.meta.clone())); + } + } + } + Self { v4, v6 } + } +} + +impl std::ops::Index for RecordSet { + type Output = PrefixRecord; + + // This does not change the behaviour of the Index trait + #[allow(clippy::indexing_slicing)] + fn index(&self, index: usize) -> &Self::Output { + if index < self.v4.len() { + &self.v4[index] + } else { + &self.v6[index - self.v4.len()] + } + } +} + +//------------ RecordSetIter ------------------------------------------------- + +#[derive(Clone, Debug)] +pub struct RecordSetIter<'a, M: Meta> { + v4: Option>>, + v6: std::slice::Iter<'a, PrefixRecord>, +} + +impl Iterator for RecordSetIter<'_, M> { + type Item = PrefixRecord; + + fn next(&mut self) -> Option { + if self.v4.is_none() { + return self.v6.next().map(|res| res.to_owned()); + } + + if let Some(res) = self.v4.as_mut().and_then(|v4| v4.next()) { + return Some(res.to_owned()); + } + self.v4 = None; + self.next() + } +} diff --git a/src/types/route_status.rs b/src/types/route_status.rs new file mode 100644 index 00000000..c8453ee9 --- /dev/null +++ b/src/types/route_status.rs @@ -0,0 +1,56 @@ +use zerocopy::{Immutable, IntoBytes, KnownLayout, TryFromBytes, Unaligned}; + +use super::errors::PrefixStoreError; + +#[derive( + Clone, + Copy, + Debug, + Hash, + PartialEq, + Eq, + TryFromBytes, + KnownLayout, + Immutable, + Unaligned, + IntoBytes, +)] +#[repr(u8)] +pub enum RouteStatus { + Active = 1, + InActive = 2, + Withdrawn = 3, +} + +impl std::fmt::Display for RouteStatus { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + RouteStatus::Active => write!(f, "active"), + RouteStatus::InActive => write!(f, "inactive"), + RouteStatus::Withdrawn => write!(f, "withdrawn"), + } + } +} + +impl From for u8 { + fn from(value: RouteStatus) -> Self { + match value { + RouteStatus::Active => 1, + RouteStatus::InActive => 2, + RouteStatus::Withdrawn => 3, + } + } +} + +impl TryFrom for RouteStatus { + type Error = PrefixStoreError; + + fn try_from(value: u8) -> Result { + match value { + 1 => Ok(RouteStatus::Active), + 2 => Ok(RouteStatus::InActive), + 3 => Ok(RouteStatus::Withdrawn), + _ => Err(PrefixStoreError::StoreNotReadyError), + } + } +} diff --git a/src/types/stats.rs b/src/types/stats.rs new file mode 100644 index 00000000..7b1b6291 --- /dev/null +++ b/src/types/stats.rs @@ -0,0 +1,279 @@ +//------------ Types for Statistics ----------------------------------------- + +use std::{ + fmt::{Debug, Display}, + marker::PhantomData, + sync::atomic::{AtomicUsize, Ordering}, +}; + +use crate::{rib::STRIDE_SIZE, types::AddressFamily}; + +pub(crate) struct StrideStats { + pub(crate) created_nodes: Vec, + pub(crate) _prefixes_num: Vec, + _af: PhantomData, +} + +impl StrideStats { + pub fn new() -> Self { + Self { + created_nodes: Self::nodes_vec(AF::BITS / STRIDE_SIZE), + _prefixes_num: Self::nodes_vec(AF::BITS / STRIDE_SIZE), + _af: PhantomData, + } + } + + pub fn mem_usage(&self) -> usize { + STRIDE_SIZE as usize + * self.created_nodes.iter().fold(0, |mut acc, c| { + acc += c.count; + acc + }) + } + + fn nodes_vec(num_depth_levels: u8) -> Vec { + let mut vec: Vec = vec![]; + for n in 0..num_depth_levels { + vec.push(CreatedNodes { + depth_level: n, + count: 0, + }) + } + vec + } + + pub fn _inc(&mut self, depth_level: u8) { + if let Some(n) = self.created_nodes.get_mut(depth_level as usize) { + n.count += 1 + } + } + + pub fn _inc_prefix_count(&mut self, depth_level: u8) { + if let Some(p) = self._prefixes_num.get_mut(depth_level as usize) { + p.count += 1; + } + } +} + +impl Default for StrideStats { + fn default() -> Self { + Self::new() + } +} + +impl Debug for StrideStats { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Stride4:{:>8?} {:?} ({}k)", + &self.created_nodes.iter().fold(0, |mut a, n| { + a += n.count; + a + }), + &self.created_nodes, + &self.mem_usage() / 1024 + ) + } +} + +impl Display for StrideStats { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Stride4:{:>8?} {:?} ({}k)", + &self.created_nodes.iter().fold(0, |mut a, n| { + a += n.count; + a + }), + &self.created_nodes, + &self.mem_usage() / 1024 + ) + } +} + +#[derive(Copy, Clone)] +pub struct CreatedNodes { + pub depth_level: u8, + pub count: usize, +} + +impl CreatedNodes { + pub fn add(mut self, num: usize) { + self.count += num; + } +} + +impl Debug for CreatedNodes { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + f.write_fmt(format_args!("/{}: {}", &self.depth_level, &self.count)) + } +} + +//------------ Counters ----------------------------------------------------- +// +// This is the struct that's part of the data structure of each tree type. + +#[derive(Debug)] +pub(crate) struct Counters { + // number of created nodes in the TreeBitMap. Set to 0 for other trees. + nodes: AtomicUsize, + // number of unique prefixes in the tree + prefixes: [AtomicUsize; 129], + // number of unique (prefix, mui) values inserted in the tree. + routes: AtomicUsize, +} + +impl Counters { + pub fn nodes_count(&self) -> usize { + self.nodes.load(Ordering::Relaxed) + } + + pub fn inc_nodes_count(&self) { + self.nodes.fetch_add(1, Ordering::Relaxed); + } + + pub fn prefixes_count(&self) -> Vec { + self.prefixes + .iter() + .map(|pc| pc.load(Ordering::Relaxed)) + .collect::>() + } + + pub fn inc_prefixes_count(&self, len: u8) { + if let Some(p) = self.prefixes.get(len as usize) { + p.fetch_add(1, Ordering::Relaxed); + } + } + + pub fn _dec_prefixes_count(&self, len: u8) { + if let Some(p) = self.prefixes.get(len as usize) { + p.fetch_sub(1, Ordering::Relaxed); + } + } + + pub fn prefix_stats(&self) -> Vec { + self.prefixes + .iter() + .enumerate() + .filter_map(|(len, count)| -> Option { + let count = count.load(Ordering::Relaxed); + if count != 0 { + Some(CreatedNodes { + depth_level: len as u8, + count, + }) + } else { + None + } + }) + .collect() + } + + pub fn routes_count(&self) -> usize { + self.routes.load(Ordering::Relaxed) + } + + pub fn inc_routes_count(&self) { + self.routes.fetch_add(1, Ordering::Relaxed); + } +} + +// How can this unwrap in here ever fail? +#[allow(clippy::unwrap_used)] +impl Default for Counters { + fn default() -> Self { + let mut prefixes: Vec = Vec::with_capacity(129); + for _ in 0..=128 { + prefixes.push(AtomicUsize::new(0)); + } + + Self { + nodes: AtomicUsize::new(0), + prefixes: prefixes.try_into().unwrap(), + routes: AtomicUsize::new(0), + } + } +} + +//------------ UpsertCounters ------------------------------------------------ +// +// The Counters struct holds atomic values, so this struct exists to return a +// set of counters from the RIB to users. + +#[derive(Debug)] +pub struct UpsertCounters { + // number of unique inserted prefixes|routes in the in-mem tree + pub(crate) in_memory_count: usize, + // number of unique persisted prefixes|routes + pub(crate) persisted_count: usize, + // total number of unique inserted prefixes|routes in the RIB + pub(crate) total_count: usize, +} + +impl UpsertCounters { + pub fn in_memory(&self) -> usize { + self.in_memory_count + } + + pub fn persisted(&self) -> usize { + self.persisted_count + } + + pub fn total(&self) -> usize { + self.total_count + } +} + +// impl std::fmt::Display for UpsertCounters { +// fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { +// writeln!(f, "Unique Items in-memory:\t{}", self.in_memory_count)?; +// writeln!(f, "Unique persisted Items:\t{}", self.persisted_count)?; +// writeln!(f, "Total inserted Items:\t{}", self.total_count) +// } +// } + +impl std::ops::AddAssign for UpsertCounters { + fn add_assign(&mut self, rhs: Self) { + self.in_memory_count += rhs.in_memory_count; + self.persisted_count += rhs.persisted_count; + self.total_count += rhs.total_count; + } +} + +impl std::ops::Add for UpsertCounters { + type Output = UpsertCounters; + + fn add(self, rhs: Self) -> Self::Output { + Self { + in_memory_count: self.in_memory_count + rhs.in_memory_count, + persisted_count: self.persisted_count + rhs.persisted_count, + total_count: self.total_count + rhs.total_count, + } + } +} + +//------------ StoreStats ---------------------------------------------------- + +#[derive(Debug)] +pub struct StoreStats { + pub v4: Vec, + pub v6: Vec, +} + +//------------ UpsertReport -------------------------------------------------- + +#[derive(Debug)] +pub struct UpsertReport { + // Indicates the number of Atomic Compare-and-Swap operations were + // necessary to create/update the Record entry. High numbers indicate + // contention. + pub cas_count: usize, + // Indicates whether this was the first mui record for this prefix was + // created. So, the prefix did not exist before hand. + pub prefix_new: bool, + // Indicates whether this mui was new for this prefix. False means an old + // value was overwritten. + pub mui_new: bool, + // The number of mui records for this prefix after the upsert operation. + pub mui_count: usize, +} diff --git a/src/types/test_types.rs b/src/types/test_types.rs new file mode 100644 index 00000000..227d06bc --- /dev/null +++ b/src/types/test_types.rs @@ -0,0 +1,133 @@ +use inetnum::asn::Asn; + +use super::prefix_record::Meta; + +#[derive(Clone, Copy, Hash)] +pub enum NoMeta { + Empty, +} + +impl std::fmt::Debug for NoMeta { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("") + } +} + +impl std::fmt::Display for NoMeta { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str("NoMeta") + } +} + +impl Meta for NoMeta { + type Orderable<'a> = (); + type TBI = (); + fn as_orderable(&self, _tbi: Self::TBI) {} +} + +impl AsRef<[u8]> for NoMeta { + fn as_ref(&self) -> &[u8] { + &[] + } +} + +impl From> for NoMeta { + fn from(_value: Vec) -> Self { + Self::Empty + } +} + +//------------ BeBytesAsn ---------------------------------------------------- +// +#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] +pub struct BeBytesAsn(pub [u8; 4]); + +impl AsRef<[u8]> for BeBytesAsn { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +impl From> for BeBytesAsn { + fn from(value: Vec) -> Self { + if let Some(value) = value.first_chunk::<4>() { + Self(*value) + } else { + Self([0; 4]) + } + } +} + +impl Meta for BeBytesAsn { + type Orderable<'a> = Asn; + type TBI = (); + + fn as_orderable(&self, _tbi: Self::TBI) -> Asn { + u32::from_be_bytes(self.0).into() + } +} + +impl std::fmt::Display for BeBytesAsn { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "AS{}", ::from_le_bytes(self.0)) + } +} + +impl From for BeBytesAsn { + fn from(value: Asn) -> Self { + Self(u32::from_be_bytes(value.to_raw()).to_le_bytes()) + } +} + +impl From for BeBytesAsn { + fn from(value: u32) -> Self { + Self(value.to_le_bytes()) + } +} + +//------------ PrefixAs ------------------------------------------------------ + +#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub struct PrefixAs([u8; 4]); + +impl PrefixAs { + pub fn new(asn: Asn) -> Self { + PrefixAs(u32::from_be_bytes(asn.to_raw()).to_le_bytes()) + } + + pub fn new_from_u32(value: u32) -> Self { + PrefixAs(value.to_le_bytes()) + } + + pub fn asn(&self) -> Asn { + Asn::from_u32(u32::from_le_bytes(self.0)) + } +} + +impl Meta for PrefixAs { + type Orderable<'a> = Asn; + type TBI = (); + fn as_orderable(&self, _tbi: Self::TBI) -> Asn { + u32::from_le_bytes(self.0).into() + } +} + +impl AsRef<[u8]> for PrefixAs { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + +// This is no production code, crash all you want +#[allow(clippy::unwrap_used)] +impl From> for PrefixAs { + fn from(value: Vec) -> Self { + Self(*value.first_chunk::<4>().unwrap()) + } +} + +impl std::fmt::Display for PrefixAs { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "AS{}", u32::from_le_bytes(self.0)) + } +} diff --git a/src/types/tests.rs b/src/types/tests.rs new file mode 100644 index 00000000..20230d68 --- /dev/null +++ b/src/types/tests.rs @@ -0,0 +1,69 @@ +#[cfg(test)] +use std::error::Error; + +//------------ AddressFamily bit flippers ----------------------------------- + +#[test] +fn test_af_1() -> Result<(), Box> { + use crate::tree_bitmap::NodeId; + use crate::types::BitSpan; + use crate::AddressFamily; + use crate::IPv4; + + let bit_addr: IPv4 = 0b1111_1111_1111_1111_1111_1111_1111_1111.into(); + let base_prefix = NodeId::dangerously_new_with_id_as_is(bit_addr, 32); + + assert_eq!(base_prefix.bits(), bit_addr); + assert_eq!(base_prefix.truncate_to_len().bits(), base_prefix.bits()); + assert_eq!( + NodeId::dangerously_new_with_id_as_is( + base_prefix.bits().truncate_to_len(28), + 28 + ) + .add_bit_span(BitSpan { + bits: 0b0101, + len: 4 + }) + .bits(), + 0b1111_1111_1111_1111_1111_1111_1111_0101 + ); + + Ok(()) +} + +#[test] +fn test_af_2() -> Result<(), Box> { + use crate::IPv4; + use crate::{tree_bitmap::NodeId, types::BitSpan}; + + let bit_addr: IPv4 = 0b1111_1111_1111_1111_1111_1111_1111_1111.into(); + let nu_prefix = NodeId::dangerously_new_with_id_as_is(bit_addr, 8); + + assert_eq!(nu_prefix.bits(), bit_addr); + assert_eq!( + nu_prefix.truncate_to_len().bits(), + 0b1111_1111_0000_0000_0000_0000_0000_0000 + ); + + assert_eq!( + nu_prefix + .add_bit_span(BitSpan { + bits: 0b1010, + len: 4 + }) + .bits(), + 0b1111_1111_1010_0000_0000_0000_0000_0000 + ); + assert_eq!( + nu_prefix + .truncate_to_len() + .add_bit_span(BitSpan { + bits: 0b1010, + len: 4 + }) + .bits(), + 0b1111_1111_1010_0000_0000_0000_0000_0000 + ); + + Ok(()) +} diff --git a/tests/best-path.rs b/tests/best-path.rs index 72625c75..577ee463 100644 --- a/tests/best-path.rs +++ b/tests/best-path.rs @@ -1,20 +1,24 @@ use inetnum::addr::Prefix; -use rotonda_store::prelude::multi::PrefixStoreError; -use rotonda_store::prelude::multi::Record; -use rotonda_store::prelude::multi::RouteStatus; -use rotonda_store::MatchOptions; use inetnum::asn::Asn; +use log::trace; +use rotonda_store::errors::PrefixStoreError; +use rotonda_store::match_options::IncludeHistory; +use rotonda_store::match_options::MatchOptions; +use rotonda_store::match_options::MatchType; +use rotonda_store::prefix_record::Meta; +use rotonda_store::prefix_record::Record; +use rotonda_store::prefix_record::RouteStatus; +use rotonda_store::rib::config::MemoryOnlyConfig; +use rotonda_store::rib::StarCastRib; use routecore::bgp::aspath::HopPath; use routecore::bgp::path_attributes::BgpIdentifier; use routecore::bgp::path_attributes::PaMap; use routecore::bgp::path_selection::RouteSource; +use routecore::bgp::path_selection::{OrdRoute, Rfc4271, TiebreakerInfo}; use routecore::bgp::types::LocalPref; use routecore::bgp::types::Origin; use std::net::Ipv4Addr; use std::str::FromStr; -use rotonda_store::Meta; -use rotonda_store::MultiThreadedStore; -use routecore::bgp::path_selection::{OrdRoute, Rfc4271, TiebreakerInfo}; #[derive(Clone, Debug)] pub struct Ipv4Route(u32, PaMap, TiebreakerInfo); @@ -32,7 +36,20 @@ impl Meta for Ipv4Route { type TBI = (); fn as_orderable(&self, _tbi: Self::TBI) -> Self::Orderable<'_> { - routecore::bgp::path_selection::OrdRoute::rfc4271(&self.1, self.2).unwrap() + routecore::bgp::path_selection::OrdRoute::rfc4271(&self.1, self.2) + .unwrap() + } +} + +impl AsRef<[u8]> for Ipv4Route { + fn as_ref(&self) -> &[u8] { + todo!() + } +} + +impl From> for Ipv4Route { + fn from(_value: Vec) -> Self { + todo!() } } @@ -47,84 +64,164 @@ mod common { } } +// rotonda_store::all_strategies![ +// best_path; +// test_best_path_1; +// Ipv4Route +// ]; + #[test] -fn test_best_path_1() -> Result<(), Box> { +fn test_best_path_1(// tree_bitmap: MultiThreadedStore, +) -> Result<(), Box> { crate::common::init(); - - let tree_bitmap = std::sync::Arc::new(std::sync::Arc::new(MultiThreadedStore::::new()?)); + + let tree_bitmap = + std::sync::Arc::new(std::sync::Arc::new(StarCastRib::< + Ipv4Route, + MemoryOnlyConfig, + >::try_default()?)); + + trace!("Done creating tree..."); let pfx = Prefix::from_str("185.34.0.0/16")?; - let mut asns = [Asn::from(65400), Asn::from(65401), Asn::from(65402), Asn::from(65403), Asn::from(65404)].into_iter(); + let mut asns = [ + Asn::from(65400), + Asn::from(65401), + Asn::from(65402), + Asn::from(65403), + Asn::from(65404), + ] + .into_iter(); let mut pa_map = PaMap::empty(); pa_map.set::(routecore::bgp::types::LocalPref(50)); - pa_map.set::( - HopPath::from(vec![Asn::from(65400), Asn::from(65401), Asn::from(65402)]) - ); - pa_map.set::(routecore::bgp::types::Origin(routecore::bgp::types::OriginType::Egp)); + pa_map.set::(HopPath::from(vec![ + Asn::from(65400), + Asn::from(65401), + Asn::from(65402), + ])); + pa_map.set::(routecore::bgp::types::Origin( + routecore::bgp::types::OriginType::Egp, + )); let mut asns_insert = vec![]; - - // Out TiebreakInfo consists of some values that are the same for all of - // our routes, and some that are specific to the route. - let tbi_modifier = |peer_addr: Ipv4Addr, local_asn: Asn, bgp_identifier: BgpIdentifier| { - TiebreakerInfo::new( - RouteSource::Ebgp, - None, - local_asn, - bgp_identifier, - std::net::IpAddr::V4(peer_addr) - ) - }; + + // Out TiebreakInfo consists of some values that are the same for all of + // our routes, and some that are specific to the route. + let tbi_modifier = + |peer_addr: Ipv4Addr, + local_asn: Asn, + bgp_identifier: BgpIdentifier| { + TiebreakerInfo::new( + RouteSource::Ebgp, + None, + local_asn, + bgp_identifier, + std::net::IpAddr::V4(peer_addr), + ) + }; for (mui, tbi) in [ - (1, tbi_modifier(std::net::Ipv4Addr::from_str("192.168.12.1")?, Asn::from(65400), BgpIdentifier::from([0; 4]) )), - (2, tbi_modifier(std::net::Ipv4Addr::from_str("192.168.12.2")?, Asn::from(65400), BgpIdentifier::from([0; 4]) )), - (3, tbi_modifier(std::net::Ipv4Addr::from_str("192.168.12.3")?, Asn::from(65400), BgpIdentifier::from([0; 4]) )), - (4, tbi_modifier(std::net::Ipv4Addr::from_str("192.168.12.4")?, Asn::from(65400), BgpIdentifier::from([0; 4]) )), - (5, tbi_modifier(std::net::Ipv4Addr::from_str("192.168.12.5")?, Asn::from(65400), BgpIdentifier::from([0; 4]) )), + ( + 1, + tbi_modifier( + std::net::Ipv4Addr::from_str("192.168.12.1")?, + Asn::from(65400), + BgpIdentifier::from([0; 4]), + ), + ), + ( + 2, + tbi_modifier( + std::net::Ipv4Addr::from_str("192.168.12.2")?, + Asn::from(65400), + BgpIdentifier::from([0; 4]), + ), + ), + ( + 3, + tbi_modifier( + std::net::Ipv4Addr::from_str("192.168.12.3")?, + Asn::from(65400), + BgpIdentifier::from([0; 4]), + ), + ), + ( + 4, + tbi_modifier( + std::net::Ipv4Addr::from_str("192.168.12.4")?, + Asn::from(65400), + BgpIdentifier::from([0; 4]), + ), + ), + ( + 5, + tbi_modifier( + std::net::Ipv4Addr::from_str("192.168.12.5")?, + Asn::from(65400), + BgpIdentifier::from([0; 4]), + ), + ), ] { asns_insert.push(asns.next().unwrap()); pa_map.set::(HopPath::from(asns_insert.clone())); - let rec = Record::new(mui,0, RouteStatus::Active, Ipv4Route(mui, pa_map.clone(), tbi)); - tree_bitmap.insert( - &pfx, - rec, - None - )?; + let rec = Record::new( + mui, + 0, + RouteStatus::Active, + Ipv4Route(mui, pa_map.clone(), tbi), + ); + tree_bitmap.insert(&pfx, rec, None)?; + trace!("inserted {}", pfx); } + trace!("done inserting prefixes..."); + let res = tree_bitmap.match_prefix( &pfx, - &MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + &MatchOptions { + match_type: MatchType::ExactMatch, include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, - &rotonda_store::epoch::pin() + &rotonda_store::epoch::pin(), ); - println!("{:?}", res.prefix_meta); + println!("{:?}", res.as_ref().unwrap().records); let best_path = tree_bitmap.best_path(&pfx, &rotonda_store::epoch::pin()); - println!("ps outdated? {}", tree_bitmap.is_ps_outdated(&pfx, &rotonda_store::epoch::pin()).unwrap()); + println!( + "ps outdated? {}", + tree_bitmap + .is_ps_outdated(&pfx, &rotonda_store::epoch::pin()) + .unwrap() + ); println!("{:?}", best_path); // We didn't calculate the best path yet, but the prefix (and its entries) // exists, so this should be `Some(Err(BestPathNotFound))` at this point. - assert_eq!(best_path.unwrap().err().unwrap(), PrefixStoreError::BestPathNotFound); + assert_eq!( + best_path.unwrap().err().unwrap(), + PrefixStoreError::BestPathNotFound + ); tree_bitmap.calculate_and_store_best_and_backup_path( &pfx, &(), - &rotonda_store::epoch::pin() + &rotonda_store::epoch::pin(), )?; let best_path = tree_bitmap.best_path(&pfx, &rotonda_store::epoch::pin()); - println!("ps outdated? {}", tree_bitmap.is_ps_outdated(&pfx, &rotonda_store::epoch::pin()).unwrap()); + println!( + "ps outdated? {}", + tree_bitmap + .is_ps_outdated(&pfx, &rotonda_store::epoch::pin()) + .unwrap() + ); println!("{:?}", best_path); assert_eq!(best_path.unwrap().unwrap().multi_uniq_id, 1); Ok(()) -} \ No newline at end of file +} diff --git a/tests/concurrency.rs b/tests/concurrency.rs index 249dcd72..409896c3 100644 --- a/tests/concurrency.rs +++ b/tests/concurrency.rs @@ -2,8 +2,14 @@ use std::{str::FromStr, sync::atomic::Ordering}; use inetnum::{addr::Prefix, asn::Asn}; use rotonda_store::{ - prelude::multi::{Record, RouteStatus}, - MatchOptions, MultiThreadedStore, + errors::FatalResult, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{PrefixRecord, Record, RouteStatus}, + rib::{ + config::{Config, MemoryOnlyConfig}, + StarCastRib, + }, + test_types::{BeBytesAsn, NoMeta}, }; mod common { @@ -17,8 +23,58 @@ mod common { } } -#[test] -fn test_concurrent_updates_1() -> Result<(), Box> { +rotonda_store::all_strategies![ + test_cc_updates_1; + test_concurrent_updates_1; + BeBytesAsn +]; + +fn iter( + pfxs_iter: &[FatalResult>], + pfx: Prefix, +) -> impl Iterator> + '_ { + pfxs_iter + .iter() + .find(|p| p.as_ref().unwrap().prefix == pfx) + .unwrap() + .as_ref() + .unwrap() + .meta + .iter() +} + +fn iter_len( + pfxs_iter: &[FatalResult>], + pfx: Prefix, +) -> usize { + pfxs_iter + .iter() + .find(|p| p.as_ref().unwrap().prefix == pfx) + .unwrap() + .as_ref() + .unwrap() + .meta + .len() +} + +fn first_meta( + pfxs_iter: &[FatalResult>], + pfx: Prefix, +) -> BeBytesAsn { + pfxs_iter + .iter() + .find(|p| p.as_ref().unwrap().prefix == pfx) + .unwrap() + .as_ref() + .unwrap() + .meta[0] + .meta + .clone() +} + +fn test_concurrent_updates_1( + tree_bitmap: StarCastRib, +) -> Result<(), Box> { crate::common::init(); let pfx_vec_1 = vec![ @@ -48,7 +104,15 @@ fn test_concurrent_updates_1() -> Result<(), Box> { pfxs: Vec, } - let tree_bitmap = std::sync::Arc::new(MultiThreadedStore::::new()?); + // let store_config = StoreConfig { + // persist_strategy: rotonda_store::rib::PersistStrategy::PersistOnly, + // persist_path: "/tmp/rotonda/".into(), + // }; + + // let store = std::sync::Arc::new( + // MultiThreadedStore::::new_with_config(store_config)?, + // ); + let guard = &rotonda_store::epoch::pin(); let mui_data_1 = MuiData { mui: 1, @@ -69,12 +133,12 @@ fn test_concurrent_updates_1() -> Result<(), Box> { }; let cur_ltime = std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)); - + let tree_bitmap = std::sync::Arc::new(tree_bitmap); let _: Vec<_> = vec![mui_data_1, mui_data_2, mui_data_3] .into_iter() .map(|data: MuiData| { - let tree_bitmap = tree_bitmap.clone(); let cur_ltime = cur_ltime.clone(); + let tbm = tree_bitmap.clone(); std::thread::Builder::new() .name(data.mui.to_string()) @@ -83,13 +147,13 @@ fn test_concurrent_updates_1() -> Result<(), Box> { for pfx in data.pfxs { let _ = cur_ltime.fetch_add(1, Ordering::Release); - match tree_bitmap.insert( + match tbm.insert( &pfx, Record::new( data.mui, cur_ltime.load(Ordering::Acquire), RouteStatus::Active, - data.asn, + data.asn.into(), ), None, ) { @@ -107,193 +171,100 @@ fn test_concurrent_updates_1() -> Result<(), Box> { .map(|t| t.join()) .collect(); - println!("{:#?}", tree_bitmap.prefixes_iter().collect::>()); + println!("COUNT {:?}", tree_bitmap.prefixes_count()); - let all_pfxs_iter = tree_bitmap.prefixes_iter().collect::>(); + let all_pfxs_iter = tree_bitmap.prefixes_iter(guard).collect::>(); + println!("all_pfxs_iter {:#?}", all_pfxs_iter); let pfx = Prefix::from_str("185.34.0.0/16").unwrap(); - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); + + assert!(tree_bitmap.contains(&pfx, None)); + assert!(tree_bitmap.contains(&pfx, Some(1))); + assert!(tree_bitmap.contains(&pfx, Some(2))); + assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 && m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); let pfx = Prefix::from_str("185.34.10.0/24").unwrap(); - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 && m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 3 && m.meta == 65503.into())); let pfx = Prefix::from_str("185.34.11.0/24").unwrap(); - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| pfx == p.as_ref().unwrap().prefix)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 && m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 2 || m.meta == 65502.into()))); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 3 || m.meta == 65503.into()))); let pfx = Prefix::from_str("185.34.11.0/24").unwrap(); - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 && m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 2 || m.meta == 65502.into()))); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 3 && m.meta == 65503.into()))); let pfx = Prefix::from_str("185.34.12.0/24").unwrap(); - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 3 && m.meta == 65503.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 1 || m.meta == 65501.into()))); let pfx = Prefix::from_str("183.0.0.0/8")?; - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 || m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 2 && m.meta == 65502.into()))); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 3 && m.meta == 65503.into()))); let pfx = Prefix::from_str("186.0.0.0/8")?; - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 1 || m.meta == 65501.into()))); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 3 && m.meta == 65503.into()))); let pfx = Prefix::from_str("187.0.0.0/8")?; - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 3 && m.meta == 65503.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 2 && m.meta == 65502.into()))); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .all(|m| !(m.multi_uniq_id == 1 || m.meta == 65501.into()))); // Create Withdrawals @@ -312,7 +283,7 @@ fn test_concurrent_updates_1() -> Result<(), Box> { let _ = cur_ltime.fetch_add(1, Ordering::Release); tree_bitmap - .mark_mui_as_withdrawn_for_prefix(&pfx, 2) + .mark_mui_as_withdrawn_for_prefix(&pfx, 2, 10) .unwrap(); println!("--thread withdraw 2 done."); @@ -322,22 +293,31 @@ fn test_concurrent_updates_1() -> Result<(), Box> { .map(|t| t.join()) .collect(); - println!("{:#?}", tree_bitmap.prefixes_iter().collect::>()); + println!( + "prefixes_iter {:#?}", + tree_bitmap + .as_ref() + .prefixes_iter(guard) + .collect::>() + ); let match_options = MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }; for pfx in pfx_vec_2 { let guard = rotonda_store::epoch::pin(); - let res = tree_bitmap.match_prefix(&pfx, &match_options, &guard); + let res = tree_bitmap.match_prefix(&pfx, &match_options, &guard)?; assert_eq!(res.prefix, Some(pfx)); + println!("strategy {:?}", tree_bitmap.persist_strategy()); + println!("PFX {}", res); assert_eq!( - res.prefix_meta + res.records .iter() .find(|m| m.multi_uniq_id == 2) .unwrap() @@ -348,8 +328,15 @@ fn test_concurrent_updates_1() -> Result<(), Box> { Ok(()) } +// rotonda_store::all_strategies_arced![ +// test_cc_updates_2; +// test_concurrent_updates_2; +// BeBytesAsn +// ]; + #[test] -fn test_concurrent_updates_2() -> Result<(), Box> { +fn test_concurrent_updates_2(// tree_bitmap: Arc>, +) -> Result<(), Box> { crate::common::init(); let pfx_vec_1 = vec![ @@ -373,28 +360,22 @@ fn test_concurrent_updates_2() -> Result<(), Box> { Prefix::from_str("188.0.0.0/8")?, ]; - #[derive(Debug)] - struct MuiData { - asn: u32, - } - - let tree_bitmap = std::sync::Arc::new(MultiThreadedStore::::new()?); - - const MUI_DATA: [MuiData; 4] = [ - MuiData { asn: 65501 }, - MuiData { asn: 65502 }, - MuiData { asn: 65503 }, - MuiData { asn: 65504 }, - ]; + const MUI_DATA: [u32; 4] = [65501, 65502, 65503, 65504]; let cur_ltime = std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0)); + let tree_bitmap = std::sync::Arc::new(StarCastRib::< + BeBytesAsn, + MemoryOnlyConfig, + >::try_default()?); + let guard = &rotonda_store::epoch::pin(); + let _: Vec<_> = vec![pfx_vec_1.clone(), pfx_vec_2.clone(), pfx_vec_3.clone()] .into_iter() .enumerate() .map(|(n, pfxs)| { - let tree_bitmap = tree_bitmap.clone(); + let tbm = std::sync::Arc::clone(&tree_bitmap); let cur_ltime = cur_ltime.clone(); std::thread::Builder::new() @@ -404,19 +385,19 @@ fn test_concurrent_updates_2() -> Result<(), Box> { for (i, pfx) in pfxs.iter().enumerate() { let _ = cur_ltime.fetch_add(1, Ordering::Release); - match tree_bitmap.insert( + match tbm.insert( pfx, Record::new( i as u32 + 1, cur_ltime.load(Ordering::Acquire), RouteStatus::Active, - MUI_DATA[i].asn.into(), + Asn::from(MUI_DATA[i]).into(), ), None, ) { Ok(_) => {} Err(e) => { - println!("{}", e); + println!("Err: {}", e); } }; } @@ -428,150 +409,65 @@ fn test_concurrent_updates_2() -> Result<(), Box> { .map(|t| t.join()) .collect(); - println!("{:#?}", tree_bitmap.prefixes_iter().collect::>()); + println!( + "prefixes_iter#1 :{:#?}", + tree_bitmap.prefixes_iter(guard).collect::>() + ); - let all_pfxs_iter = tree_bitmap.prefixes_iter().collect::>(); + let all_pfxs_iter = tree_bitmap.prefixes_iter(guard).collect::>(); let pfx = Prefix::from_str("185.33.0.0/16").unwrap(); - assert!(all_pfxs_iter.iter().any(|p| p.prefix == pfx)); assert!(all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + .any(|p| p.as_ref().unwrap().prefix == pfx)); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 && m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 3 && m.meta == 65503.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 4 && m.meta == 65504.into())); let pfx = Prefix::from_str("185.34.0.0/16").unwrap(); - assert_eq!( - all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .len(), - 2 - ); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert_eq!(iter_len(&all_pfxs_iter, pfx), 2); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 1 && m.meta == 65501.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); let pfx = Prefix::from_str("185.34.14.0/24").unwrap(); + assert_eq!(iter_len(&all_pfxs_iter, pfx), 1); assert_eq!( all_pfxs_iter .iter() - .find(|p| p.prefix == pfx) + .find(|p| p.as_ref().unwrap().prefix == pfx) .unwrap() - .meta - .len(), - 1 - ); - assert_eq!( - all_pfxs_iter.iter().find(|p| p.prefix == pfx).unwrap().meta[0].meta, - Asn::from_u32(65503) + .as_ref() + .unwrap() + .meta[0] + .meta, + Asn::from_u32(65503).into() ); let pfx = Prefix::from_str("187.0.0.0/8").unwrap(); - assert_eq!( - all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .len(), - 1 - ); - assert_eq!( - all_pfxs_iter.iter().find(|p| p.prefix == pfx).unwrap().meta[0].meta, - Asn::from_u32(65504) - ); + assert_eq!(iter_len(&all_pfxs_iter, pfx), 1); + assert_eq!(first_meta(&all_pfxs_iter, pfx), Asn::from_u32(65504).into()); let pfx = Prefix::from_str("185.35.0.0/16").unwrap(); - assert_eq!( - all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .len(), - 1 - ); - assert_eq!( - all_pfxs_iter.iter().find(|p| p.prefix == pfx).unwrap().meta[0].meta, - Asn::from_u32(65501) - ); + assert_eq!(iter_len(&all_pfxs_iter, pfx), 1); + assert_eq!(first_meta(&all_pfxs_iter, pfx), Asn::from_u32(65501).into()); let pfx = Prefix::from_str("185.34.15.0/24").unwrap(); - assert_eq!( - all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .len(), - 2 - ); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert_eq!(iter_len(&all_pfxs_iter, pfx), 2); + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 2 && m.meta == 65502.into())); - assert!(all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .iter() + assert!(iter(&all_pfxs_iter, pfx) .any(|m| m.multi_uniq_id == 3 && m.meta == 65503.into())); let pfx = Prefix::from_str("188.0.0.0/8").unwrap(); - assert_eq!( - all_pfxs_iter - .iter() - .find(|p| p.prefix == pfx) - .unwrap() - .meta - .len(), - 1 - ); - assert_eq!( - all_pfxs_iter.iter().find(|p| p.prefix == pfx).unwrap().meta[0].meta, - Asn::from_u32(65504) - ); + assert_eq!(iter_len(&all_pfxs_iter, pfx), 1); + assert_eq!(first_meta(&all_pfxs_iter, pfx), Asn::from_u32(65504).into()); // Create Withdrawals let wd_pfxs = [pfx_vec_1[1], pfx_vec_2[1], pfx_vec_3[1]]; @@ -579,7 +475,7 @@ fn test_concurrent_updates_2() -> Result<(), Box> { let _: Vec<_> = wd_pfxs .into_iter() .map(|pfx: Prefix| { - let tree_bitmap = tree_bitmap.clone(); + let tbm = std::sync::Arc::clone(&tree_bitmap); let cur_ltime = cur_ltime.clone(); std::thread::Builder::new() @@ -588,8 +484,7 @@ fn test_concurrent_updates_2() -> Result<(), Box> { print!("\nstart withdraw {} ---", 2); let _ = cur_ltime.fetch_add(1, Ordering::Release); - tree_bitmap - .mark_mui_as_withdrawn_for_prefix(&pfx, 2) + tbm.mark_mui_as_withdrawn_for_prefix(&pfx, 2, 15) .unwrap(); println!("--thread withdraw 2 done."); @@ -600,19 +495,22 @@ fn test_concurrent_updates_2() -> Result<(), Box> { .collect(); let match_options = MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::ExactMatch, include_withdrawn: true, include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }; for pfx in wd_pfxs { let guard = rotonda_store::epoch::pin(); let res = tree_bitmap.match_prefix(&pfx, &match_options, &guard); - assert_eq!(res.prefix, Some(pfx)); + assert_eq!(res.as_ref().unwrap().prefix, Some(pfx)); + println!("RES {:#?}", res); assert_eq!( - res.prefix_meta + res.unwrap() + .records .iter() .find(|m| m.multi_uniq_id == 2) .unwrap() @@ -621,28 +519,167 @@ fn test_concurrent_updates_2() -> Result<(), Box> { ); } + println!("get all prefixes"); let match_options = MatchOptions { - match_type: rotonda_store::MatchType::ExactMatch, + match_type: MatchType::EmptyMatch, include_withdrawn: false, include_less_specifics: false, include_more_specifics: true, mui: None, + include_history: IncludeHistory::None, }; - let pfx = Prefix::from_str("0.0.0.0/0").unwrap(); + println!("strategy {:?}", tree_bitmap.persist_strategy()); + // should cover all the prefixes + // let pfx0 = Prefix::from_str("184.0.0.0/6").unwrap(); + let pfx128 = Prefix::from_str("128.0.0.0/1").unwrap(); let guard = rotonda_store::epoch::pin(); - let res = tree_bitmap.match_prefix(&pfx, &match_options, &guard); + // let res0 = tree_bitmap.match_prefix(&pfx0, &match_options, &guard); + + // println!("000 {:#?}", res0); + + assert!(tree_bitmap + .contains(&Prefix::from_str("185.34.14.0/24").unwrap(), None)); + + tree_bitmap + .insert( + &Prefix::from_str("32.0.0.0/4").unwrap(), + Record::new( + 1, + cur_ltime.load(Ordering::Acquire), + RouteStatus::Active, + Asn::from(653400).into(), + ), + None, + ) + .unwrap(); + + assert!( + tree_bitmap.contains(&Prefix::from_str("32.0.0.0/4").unwrap(), None) + ); - println!("{:#?}", res); + let mp02 = tree_bitmap + .match_prefix( + &Prefix::from_str("0.0.0.0/2").unwrap(), + &match_options, + &guard, + ) + .unwrap() + .more_specifics + .unwrap(); + println!("0/2 {}", mp02); + assert_eq!(mp02.len(), 1); + + let res128 = tree_bitmap.match_prefix(&pfx128, &match_options, &guard); + println!("128 {:#?}", res128); + // let guard = rotonda_store::epoch::pin(); + // println!( + // "more_specifics_iter_from {:#?}", + // tree_bitmap.more_specifics_keys_from(&pfx128) + // ); let active_len = all_pfxs_iter .iter() - .filter(|p| p.meta.iter().all(|m| m.status == RouteStatus::Active)) + .filter(|p| { + p.as_ref() + .unwrap() + .meta + .iter() + .all(|m| m.status == RouteStatus::Active) + }) .collect::>() .len(); assert_eq!(active_len, all_pfxs_iter.len()); - let len_2 = res.more_specifics.unwrap().v4.len(); - assert_eq!(active_len, len_2); + // let len_2 = res0.more_specifics.unwrap().v4.len() + assert_eq!(active_len, res128?.more_specifics.unwrap().v4.len()); + + Ok(()) +} + +#[test] +fn more_specifics_short_lengths() -> Result<(), Box> { + crate::common::init(); + + println!("PersistOnly strategy starting..."); + let tree_bitmap = std::sync::Arc::new(StarCastRib::< + NoMeta, + MemoryOnlyConfig, + >::try_default()?); + let match_options = MatchOptions { + match_type: MatchType::EmptyMatch, + include_withdrawn: false, + include_less_specifics: false, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }; + + let pfx1 = Prefix::from_str("185.34.0.0/16")?; + let pfx2 = Prefix::from_str("185.34.3.0/24")?; + let pfx3 = Prefix::from_str("185.34.4.0/24")?; + + tree_bitmap + .insert( + &pfx1, + Record::new(1, 0, RouteStatus::Active, NoMeta::Empty), + None, + ) + .unwrap(); + + tree_bitmap + .insert( + &pfx2, + Record::new(1, 0, RouteStatus::Active, NoMeta::Empty), + None, + ) + .unwrap(); + + tree_bitmap + .insert( + &pfx3, + Record::new(1, 0, RouteStatus::Active, NoMeta::Empty), + None, + ) + .unwrap(); + + let guard = rotonda_store::epoch::pin(); + + assert!(tree_bitmap.contains(&pfx1, None)); + assert!(tree_bitmap.contains(&pfx2, None)); + assert!(tree_bitmap.contains(&pfx3, None)); + + println!("-------------------"); + // let search_pfx = Prefix::from_str("0.0.0.0/0")?; + // let mp = tree_bitmap + // .more_specifics_iter_from(&search_pfx, None, false, &guard) + // .collect::>(); + + // println!("more specifics : {:#?}", mp); + + // assert_eq!(mp.len(), 2); + + let search_pfx = Prefix::from_str("128.0.0.0/1")?; + + let m = tree_bitmap.match_prefix(&search_pfx, &match_options, &guard); + + // let mp = tree_bitmap + // .more_specifics_iter_from(&search_pfx, None, false, &guard) + // .collect::>(); + + println!( + "more specifics#0: {}", + m.as_ref().unwrap().more_specifics.as_ref().unwrap()[0] + ); + println!( + "more specifics#1: {}", + m.as_ref().unwrap().more_specifics.as_ref().unwrap()[1] + ); + println!( + "more specifics#2: {}", + m.as_ref().unwrap().more_specifics.as_ref().unwrap()[2] + ); + + assert_eq!(m.unwrap().more_specifics.map(|mp| mp.len()), Some(3)); Ok(()) } diff --git a/tests/full-table.rs b/tests/full-table.rs index 72f4c38f..417879d8 100644 --- a/tests/full-table.rs +++ b/tests/full-table.rs @@ -1,11 +1,13 @@ #![cfg(feature = "csv")] #[cfg(test)] mod tests { - use inetnum::asn::Asn; use inetnum::addr::Prefix; + use inetnum::asn::Asn; use rotonda_store::{ - prelude::*, - prelude::multi::*, + epoch, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Meta, PrefixRecord, Record, RouteStatus}, + rib::{config::Config, StarCastRib}, }; use std::error::Error; @@ -13,25 +15,58 @@ mod tests { use std::process; #[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] - pub struct ComplexPrefixAs(pub Vec); + pub struct AsnList(Vec); - impl std::fmt::Display for ComplexPrefixAs { + // pub struct ComplexPrefixAs(pub Vec); + + impl std::fmt::Display for AsnList { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "AS{:?}", self.0) } } - impl Meta for ComplexPrefixAs { + impl Meta for AsnList { type Orderable<'a> = Asn; type TBI = (); fn as_orderable(&self, _tbi: Self::TBI) -> Asn { - Asn::from(self.0[0]) + Asn::from(u32::from_be_bytes(*self.0.first_chunk::<4>().unwrap())) + } + } + + impl AsRef<[u8]> for AsnList { + fn as_ref(&self) -> &[u8] { + &self.0 } } - #[test] - fn test_full_table_from_csv() -> Result<(), Box> { + impl From> for AsnList { + fn from(value: Vec) -> Self { + AsnList( + value + .into_iter() + .flat_map(|v| v.to_le_bytes()) + .collect::>(), + ) + } + } + + impl From> for AsnList { + fn from(value: Vec) -> Self { + Self(value) + } + } + + rotonda_store::all_strategies![ + full_table_1; + test_full_table_from_csv; + AsnList + ]; + + // #[test] + fn test_full_table_from_csv( + tree_bitmap: StarCastRib, + ) -> Result<(), Box> { // These constants are all contingent on the exact csv file, // being loaded! @@ -44,7 +79,7 @@ mod tests { let guard = &epoch::pin(); fn load_prefixes( - pfxs: &mut Vec>, + pfxs: &mut Vec>, ) -> Result<(), Box> { let file = File::open(CSV_FILE_PATH)?; @@ -64,7 +99,7 @@ mod tests { 0, 0, RouteStatus::Active, - ComplexPrefixAs(vec![asn]) + vec![asn].into(), )], ); pfxs.push(pfx); @@ -79,9 +114,9 @@ mod tests { // vec![3, 4, 4, 6, 7, 8], ]; for _strides in strides_vec.iter().enumerate() { - let mut pfxs: Vec> = vec![]; - let tree_bitmap = MultiThreadedStore::::new()?; - // .with_user_data("Testing".to_string()); + let mut pfxs: Vec> = vec![]; + // let tree_bitmap = MultiThreadedStore::::try_default()?; + // .with_user_data("Testing".to_string()); if let Err(err) = load_prefixes(&mut pfxs) { println!("error running example: {}", err); @@ -90,7 +125,11 @@ mod tests { let inserts_num = pfxs.len(); for pfx in pfxs.into_iter() { - match tree_bitmap.insert(&pfx.prefix, pfx.meta[0].clone(), None) { + match tree_bitmap.insert( + &pfx.prefix, + pfx.meta[0].clone(), + None, + ) { Ok(_) => {} Err(e) => { println!("{}", e); @@ -98,26 +137,31 @@ mod tests { } }; - let query = tree_bitmap.match_prefix(&pfx.prefix, - &MatchOptions { + let query = tree_bitmap.match_prefix( + &pfx.prefix, + &MatchOptions { match_type: MatchType::LongestMatch, include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, - guard + guard, ); - if query.prefix.is_none() { panic!("STOPSTOPSTOPST"); } - else { - assert_eq!(query.prefix.unwrap(), pfx.prefix); + if query.as_ref().unwrap().prefix.is_none() { + panic!("STOPSTOPSTOPST"); + } else { + assert_eq!( + query.as_ref().unwrap().prefix.unwrap(), + pfx.prefix + ); } } println!("done inserting {} prefixes", inserts_num); - let inet_max = 255; let len_max = 32; @@ -128,7 +172,6 @@ mod tests { (0..inet_max).for_each(|i_net| { len_count = 0; (0..len_max).for_each(|s_len| { - (0..inet_max).for_each(|ii_net| { let pfx = Prefix::new_relaxed( std::net::Ipv4Addr::new(i_net, ii_net, 0, 0) @@ -143,11 +186,12 @@ mod tests { include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, - mui: None + mui: None, + include_history: IncludeHistory::None, }, guard, ); - if let Some(_pfx) = res.prefix { + if let Some(_pfx) = res.as_ref().unwrap().prefix { // println!("_pfx {:?}", _pfx); // println!("pfx {:?}", pfx); // println!("{:#?}", res); @@ -180,7 +224,10 @@ mod tests { assert_eq!(searches_num, SEARCHES_NUM as u128); assert_eq!(inserts_num, INSERTS_NUM); - assert_eq!(tree_bitmap.prefixes_count(), GLOBAL_PREFIXES_VEC_SIZE); + assert_eq!( + tree_bitmap.prefixes_count().total(), + GLOBAL_PREFIXES_VEC_SIZE + ); assert_eq!(found_counter, FOUND_PREFIXES); assert_eq!(not_found_counter, SEARCHES_NUM - FOUND_PREFIXES); } diff --git a/tests/less-specifics.rs b/tests/less-specifics.rs new file mode 100644 index 00000000..b783baa9 --- /dev/null +++ b/tests/less-specifics.rs @@ -0,0 +1,133 @@ +// type Prefix4<'a> = Prefix; +use inetnum::addr::Prefix; +use rotonda_store::{ + epoch, + prefix_record::{Record, RouteStatus}, + rib::{config::Config, StarCastRib}, + test_types::PrefixAs, +}; + +use std::error::Error; + +mod common { + use std::io::Write; + + pub fn init() { + let _ = env_logger::builder() + .format(|buf, record| writeln!(buf, "{}", record.args())) + .is_test(true) + .try_init(); + } +} + +rotonda_store::all_strategies![ + test_ms_1; + test_less_specifics; + PrefixAs +]; + +fn test_less_specifics( + tree_bitmap: StarCastRib, +) -> Result<(), Box> { + crate::common::init(); + + let pfxs = [ + Prefix::new(std::net::Ipv4Addr::new(57, 86, 0, 0).into(), 16)?, + Prefix::new(std::net::Ipv4Addr::new(57, 86, 0, 0).into(), 15)?, + Prefix::new(std::net::Ipv4Addr::new(57, 84, 0, 0).into(), 14)?, + ]; + for pfx in pfxs.iter() { + tree_bitmap.insert( + pfx, + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(666), + ), + None, + )?; + } + println!("------ end of inserts\n"); + + let guard = &epoch::pin(); + for (i, spfx) in &[ + ( + 0, + ( + &Prefix::new( + std::net::Ipv4Addr::new(57, 86, 0, 0).into(), + 17, + ), + None, + // These are the indexes to pfxs.2 vec. + // These are all supposed to show up in the result. + vec![0, 1, 2], + ), + ), + ( + 0, + ( + &Prefix::new( + std::net::Ipv4Addr::new(57, 86, 0, 0).into(), + 16, + ), + None, + vec![1, 2], + ), + ), + ( + 0, + ( + &Prefix::new( + std::net::Ipv4Addr::new(57, 86, 0, 0).into(), + 15, + ), + None, + vec![2], + ), + ), + ( + 0, + ( + &Prefix::new( + std::net::Ipv4Addr::new(57, 84, 0, 0).into(), + 14, + ), + None, + vec![], + ), + ), + ] { + println!("round {}", i); + println!("search for: {}", (*spfx.0)?); + println!("search prefix: {}", spfx.0.unwrap()); + + let less_iter = tree_bitmap.less_specifics_iter_from( + &spfx.0.unwrap(), + spfx.1, + true, + guard, + ); + + for (i, p) in less_iter.enumerate() { + let p = p.unwrap(); + println!("less_iter {} i {}", p, i); + assert_eq!(p.prefix, pfxs[spfx.2[i]]) + } + + println!("--"); + println!("all prefixes"); + + for (i, p) in tree_bitmap + .prefixes_iter_v4(guard) + .enumerate() + .map(|(i, p)| (i, p.as_ref().unwrap().prefix)) + { + println!("ls {}: {}", i, p); + } + + println!("-----------"); + } + Ok(()) +} diff --git a/tests/more-more-specifics.rs b/tests/more-more-specifics.rs index 2c790bcb..7f221824 100644 --- a/tests/more-more-specifics.rs +++ b/tests/more-more-specifics.rs @@ -1,166 +1,222 @@ // type Prefix4<'a> = Prefix; -mod tests { - use inetnum::addr::Prefix; - use rotonda_store::{ - meta_examples::PrefixAs, - prelude::*, - prelude::multi::*, - }; - - use std::error::Error; - - #[test] - fn test_more_specifics_without_less_specifics( - ) -> Result<(), Box> { - let tree_bitmap = MultiThreadedStore::::new()?; - let pfxs = vec![ - Prefix::new(std::net::Ipv4Addr::new(17, 0, 64, 0).into(), 18)?, // 0 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 109, 0).into(), 24)?, // 1 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 153, 0).into(), 24)?, // 2 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 21)?, // 3 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 176, 0).into(), 20)?, // 4 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8)?, // 5 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 184, 0).into(), 23)?, // 6 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 71, 0).into(), 24)?, // 7 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9)?, // 8 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 117, 0).into(), 24)?, // 9 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 99, 0).into(), 24)?, // 10 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 224, 0).into(), 24)?, // 11 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 128, 0).into(), 18)?, // 12 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 120, 0).into(), 24)?, // 13 - ]; - - for pfx in pfxs.iter() { - tree_bitmap.insert( - pfx, Record::new(0, 0, RouteStatus::Active, PrefixAs(666)), None - )?; - } - println!("------ end of inserts\n"); - // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); - let guard = &epoch::pin(); - for spfx in &[ +use std::error::Error; + +use inetnum::addr::Prefix; +use rotonda_store::match_options::{IncludeHistory, MatchOptions, MatchType}; +use rotonda_store::prefix_record::{Record, RouteStatus}; +use rotonda_store::rib::{config::Config, StarCastRib}; +use rotonda_store::test_types::PrefixAs; + +mod common { + use std::io::Write; + + pub fn init() { + let _ = env_logger::builder() + .format(|buf, record| writeln!(buf, "{}", record.args())) + .is_test(true) + .try_init(); + } +} + +rotonda_store::all_strategies![ + test_ms_w_ls_1; + test_more_specifics_without_less_specifics; + PrefixAs +]; + +// #[test] +fn test_more_specifics_without_less_specifics( + tree_bitmap: StarCastRib, +) -> Result<(), Box> { + crate::common::init(); + + // let tree_bitmap = MultiThreadedStore::::try_default()?; + let pfxs = vec![ + Prefix::new(std::net::Ipv4Addr::new(17, 0, 64, 0).into(), 18)?, // 0 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 109, 0).into(), 24)?, // 1 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 153, 0).into(), 24)?, // 2 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 21)?, // 3 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 176, 0).into(), 20)?, // 4 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8)?, // 5 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 184, 0).into(), 23)?, // 6 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 71, 0).into(), 24)?, // 7 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9)?, // 8 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 117, 0).into(), 24)?, // 9 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 99, 0).into(), 24)?, // 10 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 224, 0).into(), 24)?, // 11 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 128, 0).into(), 18)?, // 12 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 120, 0).into(), 24)?, // 13 + ]; + + for pfx in pfxs.iter() { + tree_bitmap.insert( + pfx, + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(666), + ), + None, + )?; + } + println!("------ end of inserts\n"); + + // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); + let guard = &rotonda_store::epoch::pin(); + for (r, spfx) in &[ + ( + 0, ( &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), // 0 vec![0, 1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13], ), + ), + ( + 1, ( &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), // 0 vec![0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13], ), - ] { - println!("search for: {:?}", spfx.0); - let found_result = tree_bitmap.match_prefix( - &spfx.0.unwrap(), - &MatchOptions { - match_type: MatchType::ExactMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: true, - mui: None, - }, - guard - ); - println!("em/m-s: {:#?}", found_result); - - let more_specifics = found_result.more_specifics.unwrap(); - - assert_eq!(found_result.prefix.unwrap(), spfx.1.unwrap()); - assert_eq!(&more_specifics.len(), &spfx.2.len()); - - for i in spfx.2.iter() { - print!("{} ", i); - - let result_pfx = - more_specifics.iter().find(|pfx| pfx.prefix == pfxs[*i]); - assert!(result_pfx.is_some()); - } - println!("-----------"); + ), + ] { + println!("start round {}", r); + println!("search for: {}", spfx.0.unwrap()); + let found_result = tree_bitmap.match_prefix( + &spfx.0.unwrap(), + &MatchOptions { + match_type: MatchType::ExactMatch, + include_withdrawn: false, + include_less_specifics: false, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + )?; + println!("em/m-s: {:#?}", found_result); + + let more_specifics = found_result + .more_specifics + .as_ref() + .unwrap() + .iter() + .filter(|p| p.prefix != spfx.0.unwrap()) + .collect::>(); + + assert_eq!(found_result.prefix.unwrap(), spfx.1.unwrap()); + assert_eq!(&more_specifics.len(), &spfx.2.len()); + + for i in spfx.2.iter() { + print!("{} ", i); + + let result_pfx = + more_specifics.iter().find(|pfx| pfx.prefix == pfxs[*i]); + assert!(result_pfx.is_some()); } - Ok(()) + println!("end round {}", r); + println!("-----------"); + } + Ok(()) +} + +rotonda_store::all_strategies![ + test_ms_w_ls_2; + test_more_specifics_with_less_specifics; + PrefixAs +]; + +fn test_more_specifics_with_less_specifics( + tree_bitmap: StarCastRib, +) -> Result<(), Box> { + crate::common::init(); + + // let tree_bitmap = + // MultiThreadedStore::::try_default()?; + let pfxs = vec![ + Prefix::new(std::net::Ipv4Addr::new(17, 0, 64, 0).into(), 18), // 0 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 109, 0).into(), 24), // 1 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 153, 0).into(), 24), // 2 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 21), // 3 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 176, 0).into(), 20), // 4 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), // 5 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 184, 0).into(), 23), // 6 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 71, 0).into(), 24), // 7 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), // 8 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 117, 0).into(), 24), // 9 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 99, 0).into(), 24), // 10 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 224, 0).into(), 24), // 11 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 128, 0).into(), 18), // 12 + Prefix::new(std::net::Ipv4Addr::new(17, 0, 120, 0).into(), 24), // 13 + ]; + + let ltime = 0; + let status = RouteStatus::Active; + for pfx in pfxs.iter() { + tree_bitmap.insert( + &pfx.unwrap(), + Record::new(0, ltime, status, PrefixAs::new_from_u32(666)), + None, + )?; } + println!("------ end of inserts\n"); + let guard = &rotonda_store::epoch::pin(); - #[test] - fn test_more_specifics_with_less_specifics() -> Result<(), Box> - { - let tree_bitmap = MultiThreadedStore::::new()?; - let pfxs = vec![ - Prefix::new(std::net::Ipv4Addr::new(17, 0, 64, 0).into(), 18), // 0 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 109, 0).into(), 24), // 1 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 153, 0).into(), 24), // 2 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 21), // 3 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 176, 0).into(), 20), // 4 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), // 5 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 184, 0).into(), 23), // 6 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 71, 0).into(), 24), // 7 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), // 8 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 117, 0).into(), 24), // 9 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 99, 0).into(), 24), // 10 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 224, 0).into(), 24), // 11 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 128, 0).into(), 18), // 12 - Prefix::new(std::net::Ipv4Addr::new(17, 0, 120, 0).into(), 24), // 13 - ]; - - let ltime = 0; - let status = RouteStatus::Active; - for pfx in pfxs.iter() { - tree_bitmap.insert(&pfx.unwrap(), Record::new(0, ltime, status, PrefixAs(666)), None)?; - } - println!("------ end of inserts\n"); - let guard = &epoch::pin(); - - for spfx in &[ - ( - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), - Some(&Prefix::new( - std::net::Ipv4Addr::new(17, 0, 0, 0).into(), - 9, - )), // 0 - vec![0, 1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13], - ), - ( - &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), - Some(&Prefix::new( - std::net::Ipv4Addr::new(17, 0, 0, 0).into(), - 8, - )), // 0 - vec![0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13], - ), - ] { - println!("search for: {:#}", (*spfx.0)?); - let found_result = tree_bitmap.match_prefix( - &spfx.0.unwrap(), - &MatchOptions { - match_type: MatchType::LongestMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: true, - mui: None - }, - guard - ); - println!("em/m-s: {}", found_result); - - let more_specifics = found_result.more_specifics.unwrap(); - - assert_eq!( - found_result.prefix.unwrap(), - spfx.1.unwrap().unwrap() - ); - assert_eq!(&more_specifics.len(), &spfx.2.len()); - - for i in spfx.2.iter() { - print!("{} ", i); - - let result_pfx = more_specifics - .iter() - .find(|pfx| pfx.prefix == pfxs[*i].unwrap()); - assert!(result_pfx.is_some()); - } - println!("-----------"); + for spfx in &[ + ( + &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 9), + Some(&Prefix::new( + std::net::Ipv4Addr::new(17, 0, 0, 0).into(), + 9, + )), // 0 + vec![0, 1, 2, 3, 4, 6, 7, 9, 10, 11, 12, 13], + ), + ( + &Prefix::new(std::net::Ipv4Addr::new(17, 0, 0, 0).into(), 8), + Some(&Prefix::new( + std::net::Ipv4Addr::new(17, 0, 0, 0).into(), + 8, + )), // 0 + vec![0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13], + ), + ] { + println!("search for: {:#}", (*spfx.0)?); + let found_result = tree_bitmap.match_prefix( + &spfx.0.unwrap(), + &MatchOptions { + match_type: MatchType::LongestMatch, + include_withdrawn: false, + include_less_specifics: false, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + )?; + println!("em/m-s: {}", found_result); + + let more_specifics = found_result + .more_specifics + .unwrap() + .iter() + .filter(|p| p.prefix != spfx.0.unwrap()) + .collect::>(); + + assert_eq!(found_result.prefix.unwrap(), spfx.1.unwrap().unwrap()); + assert_eq!(&more_specifics.len(), &spfx.2.len()); + + for i in spfx.2.iter() { + print!("{} ", i); + + let result_pfx = more_specifics + .iter() + .find(|pfx| pfx.prefix == pfxs[*i].unwrap()); + assert!(result_pfx.is_some()); } - Ok(()) + println!("-----------"); } + Ok(()) } diff --git a/tests/more-specifics.rs b/tests/more-specifics.rs index bf2c8eeb..9d494775 100644 --- a/tests/more-specifics.rs +++ b/tests/more-specifics.rs @@ -1,110 +1,92 @@ // type Prefix4<'a> = Prefix; -mod tests { - use inetnum::addr::Prefix; - use rotonda_store::meta_examples::PrefixAs; - use rotonda_store::{ - prelude::*, prelude::multi::* - }; - - use std::error::Error; - - #[test] - fn test_more_specifics() -> Result<(), Box> { - let tree_bitmap = MultiThreadedStore::::new()?; - let pfxs = vec![ - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 24), // 0 - // - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 25), // 1 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 128).into(), - 25, - ), // 2 - // - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 26), // 3 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 64).into(), 26), // 4 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 128).into(), - 26, - ), // 5 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 192).into(), - 26, - ), // 6 - // - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 27), // 7 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 32).into(), 27), // 8 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 64).into(), 27), // 9 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 128).into(), - 27, - ), // 10 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 160).into(), - 27, - ), // 11 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 192).into(), - 27, - ), // 12 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 224).into(), - 27, - ), // 13 - // - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 32), // 14 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 31).into(), 32), // 15 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 32).into(), 32), // 16 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 63).into(), 32), // 17 - Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 64).into(), 32), // 18 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 127).into(), - 32, - ), // 19 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 128).into(), - 32, - ), // 20 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 159).into(), - 32, - ), // 21 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 160).into(), - 32, - ), // 22 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 191).into(), - 32, - ), // 23 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 192).into(), - 32, - ), // 24 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 223).into(), - 32, - ), // 25 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 224).into(), - 32, - ), // 26 - Prefix::new( - std::net::Ipv4Addr::new(130, 55, 240, 255).into(), - 32, - ), // 27 - ]; - for pfx in pfxs.iter().flatten() { - tree_bitmap.insert( - pfx, - Record::new(0, 0, RouteStatus::Active, PrefixAs(666)), - None - )?; - } - println!("------ end of inserts\n"); +use inetnum::addr::Prefix; +use rotonda_store::{ + epoch, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Record, RouteStatus}, + rib::{config::Config, StarCastRib}, + test_types::PrefixAs, +}; + +use std::error::Error; + +mod common { + use std::io::Write; + + pub fn init() { + let _ = env_logger::builder() + .format(|buf, record| writeln!(buf, "{}", record.args())) + .is_test(true) + .try_init(); + } +} + +rotonda_store::all_strategies![ + test_ms_1; + test_more_specifics; + PrefixAs +]; + +// #[test] +fn test_more_specifics( + tree_bitmap: StarCastRib, +) -> Result<(), Box> { + crate::common::init(); - // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); - let guard = &epoch::pin(); - for spfx in &[ + // let tree_bitmap = MultiThreadedStore::::try_default()?; + let pfxs = vec![ + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 24), // 0 + // + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 25), // 1 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 128).into(), 25), // 2 + // + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 26), // 3 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 64).into(), 26), // 4 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 128).into(), 26), // 5 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 192).into(), 26), // 6 + // + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 27), // 7 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 32).into(), 27), // 8 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 64).into(), 27), // 9 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 128).into(), 27), // 10 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 160).into(), 27), // 11 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 192).into(), 27), // 12 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 224).into(), 27), // 13 + // + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 0).into(), 32), // 14 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 31).into(), 32), // 15 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 32).into(), 32), // 16 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 63).into(), 32), // 17 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 64).into(), 32), // 18 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 127).into(), 32), // 19 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 128).into(), 32), // 20 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 159).into(), 32), // 21 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 160).into(), 32), // 22 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 191).into(), 32), // 23 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 192).into(), 32), // 24 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 223).into(), 32), // 25 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 224).into(), 32), // 26 + Prefix::new(std::net::Ipv4Addr::new(130, 55, 240, 255).into(), 32), // 27 + ]; + for pfx in pfxs.iter().flatten() { + tree_bitmap.insert( + pfx, + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(666), + ), + None, + )?; + } + println!("------ end of inserts\n"); + + // let locks = tree_bitmap.acquire_prefixes_rwlock_read(); + let guard = &epoch::pin(); + for (i, spfx) in &[ + ( + 0, ( &Prefix::new( std::net::Ipv4Addr::new(130, 55, 240, 0).into(), @@ -118,6 +100,9 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, ], ), + ), + ( + 1, ( &Prefix::new( std::net::Ipv4Addr::new(130, 55, 240, 0).into(), @@ -134,6 +119,9 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, ], ), + ), + ( + 2, ( &Prefix::new( std::net::Ipv4Addr::new(130, 55, 240, 0).into(), @@ -145,6 +133,9 @@ mod tests { )?), vec![3, 4, 7, 8, 9, 14, 15, 16, 17, 18, 19], ), + ), + ( + 3, ( &Prefix::new( std::net::Ipv4Addr::new(130, 55, 240, 0).into(), @@ -156,6 +147,9 @@ mod tests { )?), vec![7, 8, 14, 15, 16, 17], ), + ), + ( + 4, ( &Prefix::new( std::net::Ipv4Addr::new(130, 55, 240, 192).into(), @@ -167,11 +161,11 @@ mod tests { )?), vec![12, 13, 24, 25, 26, 27], ), + ), + ( + 5, ( - &Prefix::new( - std::net::Ipv4Addr::new(0,0,0,0).into(), - 0, - ), + &Prefix::new(std::net::Ipv4Addr::new(0, 0, 0, 0).into(), 0), None, // These are the indexes to pfxs.2 vec. // These are all supposed to show up in the result. @@ -180,36 +174,137 @@ mod tests { 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, ], ), - ] { - println!("search for: {}", (*spfx.0)?); - let found_result = tree_bitmap.match_prefix( - &spfx.0.unwrap(), - &MatchOptions { - match_type: MatchType::ExactMatch, - include_withdrawn: false, - include_less_specifics: false, - include_more_specifics: true, - mui: None - }, - guard - ); - println!("em/m-s: {:#?}", found_result); - - let more_specifics = found_result.more_specifics.unwrap(); - assert_eq!(found_result.prefix, spfx.1); - - assert_eq!(&more_specifics.len(), &spfx.2.len()); - - for i in spfx.2.iter() { - print!("{} ", i); - - let result_pfx = more_specifics - .iter() - .find(|pfx| pfx.prefix == pfxs[*i].unwrap()); - assert!(result_pfx.is_some()); - } - println!("-----------"); + ), + ] { + println!("round {}", i); + println!("search for: {}", (*spfx.0)?); + let found_result = tree_bitmap.match_prefix( + &spfx.0.unwrap(), + &MatchOptions { + match_type: MatchType::ExactMatch, + include_withdrawn: false, + include_less_specifics: false, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + )?; + // println!("em/m-s: {:#?}", found_result); + // + println!("search prefix: {}", spfx.0.unwrap()); + if let Some(pfx) = found_result.clone().prefix { + println!("found prefix: {}", pfx); + } else { + println!("no found prefix"); + } + + for (i, p) in found_result + .clone() + .more_specifics + .unwrap() + .v4 + .iter() + .enumerate() + .map(|(i, p)| (i, p.prefix)) + { + println!("ms {}: {}", i, p); } - Ok(()) + + println!("--"); + println!("all prefixes"); + + for (i, p) in tree_bitmap + .prefixes_iter_v4(guard) + .enumerate() + .map(|(i, p)| (i, p.as_ref().unwrap().prefix)) + { + println!("ms {}: {}", i, p); + } + + println!("25 {}", pfxs[25].unwrap()); + assert!(tree_bitmap.contains(&pfxs[25].unwrap(), None)); + assert!(tree_bitmap.contains(&pfxs[26].unwrap(), None)); + assert!(tree_bitmap.contains(&pfxs[27].unwrap(), None)); + // let mut ms2 = tree_bitmap.more_specifics_keys_from(&spfx.0.unwrap()); + // println!("ms2 {:#?}", ms2); + // println!("ms2 len {}", ms2.len()); + // ms2.dedup(); + // println!("ms2 deduped {}", ms2.len()); + let more_specifics = found_result + .more_specifics + .unwrap() + .iter() + .filter(|p| p.prefix != spfx.0.unwrap()) + .collect::>(); + + println!( + ">> {:?}", + more_specifics + .iter() + .find(|ms| ms.prefix == spfx.0.unwrap()) + ); + assert_eq!(found_result.prefix, spfx.1); + + println!("round {}", i); + println!("{:?}", tree_bitmap.persist_strategy()); + assert_eq!(&more_specifics.len(), &spfx.2.len()); + + for i in spfx.2.iter() { + print!("{} ", i); + + let result_pfx = more_specifics + .iter() + .find(|pfx| pfx.prefix == pfxs[*i].unwrap()); + assert!(result_pfx.is_some()); + } + println!("-----------"); } + Ok(()) +} + +rotonda_store::all_strategies![ + test_b_1; + test_brunos_more_specifics; + PrefixAs +]; + +fn test_brunos_more_specifics( + tree_bitmap: StarCastRib, +) -> Result<(), Box> { + tree_bitmap.insert( + &Prefix::new(std::net::Ipv4Addr::new(168, 181, 224, 0).into(), 22) + .unwrap(), + Record::new(0, 0, RouteStatus::Active, PrefixAs::new_from_u32(666)), + None, + )?; + tree_bitmap.insert( + &Prefix::new(std::net::Ipv4Addr::new(168, 181, 120, 0).into(), 24)?, + Record::new(0, 0, RouteStatus::Active, PrefixAs::new_from_u32(666)), + None, + )?; + tree_bitmap.insert( + &Prefix::new(std::net::Ipv4Addr::new(168, 181, 121, 0).into(), 24) + .unwrap(), + Record::new(0, 0, RouteStatus::Active, PrefixAs::new_from_u32(666)), + None, + )?; + + let guard = &epoch::pin(); + let found_result = tree_bitmap.match_prefix( + &Prefix::new(std::net::Ipv4Addr::new(168, 181, 224, 0).into(), 22) + .unwrap(), + &MatchOptions { + match_type: MatchType::ExactMatch, + include_withdrawn: false, + include_less_specifics: false, + include_more_specifics: true, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + )?; + + assert!(found_result.more_specifics.unwrap().is_empty()); + Ok(()) } diff --git a/tests/treebitmap.rs b/tests/treebitmap.rs index 3d1632e4..1af79705 100644 --- a/tests/treebitmap.rs +++ b/tests/treebitmap.rs @@ -14,15 +14,26 @@ mod tests { use std::str::FromStr; use inetnum::addr::Prefix; + use log::trace; use rotonda_store::{ - meta_examples::{NoMeta, PrefixAs}, - prelude::multi::*, - prelude::*, + epoch, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Record, RouteStatus}, + rib::{config::Config, StarCastRib}, + test_types::{NoMeta, PrefixAs}, + IntoIpAddr, }; - #[test] - fn test_insert_extremes_ipv4() -> Result<(), Box> { - let trie = &mut MultiThreadedStore::::new()?; + rotonda_store::all_strategies![ + test_treebitmap; + test_insert_extremes_ipv4; + NoMeta + ]; + + // #[test] + fn test_insert_extremes_ipv4( + trie: StarCastRib, + ) -> Result<(), Box> { let min_pfx = Prefix::new_relaxed( std::net::Ipv4Addr::new(0, 0, 0, 0).into(), 1, @@ -48,9 +59,10 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("prefix: {:?}", &expect_pfx); println!("result: {:#?}", &res); assert!(res.prefix.is_some()); @@ -81,19 +93,28 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; assert!(res.prefix.is_some()); assert_eq!(res.prefix, Some(expect_pfx?)); Ok(()) } - #[test] - fn test_tree_ipv4() -> Result<(), Box> { + rotonda_store::all_strategies![ + tree_ipv4; + test_tree_ipv4; + PrefixAs + ]; + + // #[test] + fn test_tree_ipv4( + tree_bitmap: StarCastRib, + ) -> Result<(), Box> { crate::common::init(); - let tree_bitmap = MultiThreadedStore::::new()?; + // let tree_bitmap = MultiThreadedStore::::try_default()?; let pfxs = vec![ // Prefix::new_relaxed(0b0000_0000_0000_0000_0000_0000_0000_000 0_u32.into_ipaddr(), 0), Prefix::new_relaxed( @@ -321,7 +342,12 @@ mod tests { for pfx in pfxs.into_iter() { tree_bitmap.insert( &pfx?, - Record::new(0, 0, RouteStatus::Active, PrefixAs(666)), + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(666), + ), None, )?; } @@ -333,21 +359,24 @@ mod tests { // }; let guard = &epoch::pin(); - for pfx in tree_bitmap.prefixes_iter() { + for pfx in tree_bitmap.prefixes_iter(guard) { // let pfx_nm = pfx.strip_meta(); + let pfx = pfx.unwrap().prefix; let res = tree_bitmap.match_prefix( - &pfx.prefix, + &pfx, &MatchOptions { match_type: MatchType::LongestMatch, include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); - println!("{}", pfx); - assert_eq!(res.prefix.unwrap(), pfx.prefix); + )?; + println!("PFX {}", pfx); + println!("RES {}", res); + assert_eq!(res.prefix.unwrap(), pfx); } let res = tree_bitmap.match_prefix( @@ -358,18 +387,16 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("prefix {:?}", res.prefix); - println!("res: {:#?}", &res); + println!("res: {}", &res); assert_eq!( res.prefix.unwrap(), - Prefix::new_relaxed( - std::net::Ipv4Addr::new(192, 0, 0, 0).into(), - 23 - )? + Prefix::new(std::net::Ipv4Addr::new(192, 0, 0, 0).into(), 23)? ); let less_specifics = res.less_specifics.unwrap(); @@ -393,10 +420,24 @@ mod tests { Ok(()) } - #[test] - fn test_ranges_ipv4() -> Result<(), Box> { + rotonda_store::all_strategies![ + ranges_ipv4; + test_ranges_ipv4; + NoMeta + ]; + + // #[test] + fn test_ranges_ipv4( + _tree_bitmap: StarCastRib, + ) -> Result<(), Box> { + // for persist_strategy in [ + // PersistStrategy::MemoryOnly, + // // PersistStrategy::PersistOnly, + // // PersistStrategy::WriteAhead, + // // PersistStrategy::PersistHistory, + for i_net in 0..255 { - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = StarCastRib::::try_default()?; let pfx_vec: Vec = (1..32) .collect::>() @@ -438,23 +479,34 @@ mod tests { include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("{:?}", pfx); assert_eq!(res.prefix.unwrap(), res_pfx?); } } } + Ok(()) } - #[test] - fn test_multi_ranges_ipv4() -> Result<(), Box> { + rotonda_store::all_strategies![ + multi_ranges; + test_multi_ranges_ipv4; + NoMeta + ]; + + // #[test] + fn test_multi_ranges_ipv4( + tree_bitmap: StarCastRib, + ) -> Result<(), Box> { crate::common::init(); - let tree_bitmap = MultiThreadedStore::::new()?; + // let tree_bitmap = + // MultiThreadedStore::::try_default()?; for mui in [1_u32, 2, 3, 4, 5] { println!("Multi Uniq ID {mui}"); @@ -506,6 +558,7 @@ mod tests { include_less_specifics: false, include_more_specifics: false, mui: Some(mui), + include_history: IncludeHistory::None, }, guard, ); @@ -523,6 +576,7 @@ mod tests { .iter_records_for_mui_v4(5, false, guard) .collect::>() { + let rec = rec.unwrap(); println!("{}", rec); assert_eq!(rec.meta.len(), 1); @@ -533,7 +587,7 @@ mod tests { .iter_records_for_mui_v4(1, false, guard) .collect::>() { - println!("{}", rec); + println!("{}", rec.unwrap()); } // println!("all records"); @@ -552,13 +606,14 @@ mod tests { include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; print!(".pfx {:#?}.", all_recs_for_pfx); - assert_eq!(all_recs_for_pfx.prefix_meta.len(), 5); + assert_eq!(all_recs_for_pfx.records.len(), 5); let wd_rec = all_recs_for_pfx - .prefix_meta + .records .iter() .filter(|r| r.status == RouteStatus::Withdrawn) .collect::>(); @@ -573,43 +628,54 @@ mod tests { include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); - assert_eq!(active_recs_for_pfx.prefix_meta.len(), 4); + )?; + assert_eq!(active_recs_for_pfx.records.len(), 4); assert!(!active_recs_for_pfx - .prefix_meta + .records .iter() .any(|r| r.multi_uniq_id == 1)); let wd_pfx = Prefix::from_str("1.0.0.0/16")?; - tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 2)?; + tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 2, 1)?; println!("all records"); - let all_recs = tree_bitmap.prefixes_iter(); + let all_recs = tree_bitmap.prefixes_iter(guard); - for rec in tree_bitmap.prefixes_iter().collect::>() { + for rec in tree_bitmap.prefixes_iter(guard).collect::>() { + let rec = rec.unwrap(); println!("{}", rec); } - let mui_2_recs = - all_recs.filter_map(|r| r.get_record_for_mui(2).cloned()); + let mui_2_recs = all_recs.filter_map(|r| { + r.as_ref().unwrap().get_record_for_mui(2).cloned() + }); let wd_2_rec = mui_2_recs .filter(|r| r.status == RouteStatus::Withdrawn) .collect::>(); assert_eq!(wd_2_rec.len(), 1); assert_eq!(wd_2_rec[0].multi_uniq_id, 2); - let mui_2_recs = tree_bitmap.prefixes_iter().filter_map(|r| { - r.get_record_for_mui(2).cloned().map(|rec| (r.prefix, rec)) + let mui_2_recs = tree_bitmap.prefixes_iter(guard).filter_map(|r| { + r.as_ref() + .unwrap() + .get_record_for_mui(2) + .cloned() + .map(|rec| (r.as_ref().unwrap().prefix, rec)) }); println!("mui_2_recs prefixes_iter"); for rec in mui_2_recs { println!("{} {:#?}", rec.0, rec.1); } - let mui_2_recs = tree_bitmap.prefixes_iter().filter_map(|r| { - r.get_record_for_mui(2).cloned().map(|rec| (r.prefix, rec)) + let mui_2_recs = tree_bitmap.prefixes_iter(guard).filter_map(|r| { + r.as_ref() + .unwrap() + .get_record_for_mui(2) + .cloned() + .map(|rec| (r.as_ref().unwrap().prefix, rec)) }); let active_2_rec = mui_2_recs @@ -621,6 +687,7 @@ mod tests { let mui_2_recs = tree_bitmap.iter_records_for_mui_v4(2, false, guard); println!("mui_2_recs iter_records_for_mui_v4"); for rec in mui_2_recs { + let rec = rec.unwrap(); println!("{} {:#?}", rec.prefix, rec.meta); } @@ -638,7 +705,11 @@ mod tests { assert_eq!(mui_1_recs.len(), 4); println!("mui_1_recs iter_records_for_mui_v4 w/ withdrawn"); for rec in mui_1_recs { - assert_eq!(rec.meta[0].status, RouteStatus::Withdrawn); + let rec = rec.unwrap(); + assert_eq!( + rec.meta.first().unwrap().status, + RouteStatus::Withdrawn + ); } //-------------- @@ -651,15 +722,24 @@ mod tests { include_less_specifics: false, include_more_specifics: true, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("more_specifics match {} w/ withdrawn", more_specifics); + + let guard = &rotonda_store::epoch::pin(); + for p in tree_bitmap.prefixes_iter_v4(guard) { + let p = p.unwrap(); + println!("{}", p); + } + let more_specifics = more_specifics.more_specifics.unwrap(); + let ms_v4 = more_specifics.v4.iter().collect::>(); assert_eq!(more_specifics.len(), 1); - assert_eq!(more_specifics.v4.len(), 1); - let more_specifics = &more_specifics.v4[0]; + assert_eq!(ms_v4.len(), 1); + let more_specifics = &ms_v4[0]; assert_eq!(more_specifics.prefix, Prefix::from_str("1.0.0.0/17")?); assert_eq!(more_specifics.meta.len(), 5); assert_eq!( @@ -689,15 +769,21 @@ mod tests { include_less_specifics: false, include_more_specifics: true, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("more_specifics match {} w/o withdrawn", more_specifics); let more_specifics = more_specifics.more_specifics.unwrap(); + let ms_v4 = more_specifics + .v4 + .iter() + .filter(|p| p.prefix != Prefix::from_str("1.0.0.0/16").unwrap()) + .collect::>(); assert_eq!(more_specifics.len(), 1); - assert_eq!(more_specifics.v4.len(), 1); - let more_specifics = &more_specifics.v4[0]; + assert_eq!(ms_v4.len(), 1); + let more_specifics = &ms_v4[0]; assert_eq!(more_specifics.prefix, Prefix::from_str("1.0.0.0/17")?); assert_eq!(more_specifics.meta.len(), 4); assert_eq!( @@ -718,7 +804,7 @@ mod tests { //------------------ - tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 1)?; + tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 1, 10)?; tree_bitmap.mark_mui_as_active_v4(1)?; let more_specifics = tree_bitmap.match_prefix( @@ -729,19 +815,27 @@ mod tests { include_less_specifics: false, include_more_specifics: true, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("more_specifics match w/o withdrawn #2 {}", more_specifics); // We withdrew mui 1 for the requested prefix itself, since mui 2 was // already withdrawn above, we're left with 3 records - assert_eq!(more_specifics.prefix_meta.len(), 3); + println!("PREFIX META: {:#?}", more_specifics.records); + assert_eq!(more_specifics.records.len(), 3); let more_specifics = more_specifics.more_specifics.unwrap(); + + let ms_v4 = more_specifics + .v4 + .iter() + .filter(|p| p.prefix != Prefix::from_str("1.0.0.0/16").unwrap()) + .collect::>(); assert_eq!(more_specifics.len(), 1); - assert_eq!(more_specifics.v4.len(), 1); - let more_specifics = &more_specifics.v4[0]; + assert_eq!(ms_v4.len(), 1); + let more_specifics = &ms_v4[0]; assert_eq!(more_specifics.prefix, Prefix::from_str("1.0.0.0/17")?); // one more more_specific should have been added due to mui 1 being @@ -766,10 +860,10 @@ mod tests { assert!(rec.is_empty()); // withdraw muis 2,3,4,5 for the requested prefix - tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 2)?; - tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 3)?; - tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 4)?; - tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 5)?; + tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 2, 11)?; + tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 3, 12)?; + tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 4, 13)?; + tree_bitmap.mark_mui_as_withdrawn_for_prefix(&wd_pfx, 5, 14)?; let more_specifics = tree_bitmap.match_prefix( &Prefix::from_str("1.0.0.0/16")?, @@ -779,21 +873,28 @@ mod tests { include_less_specifics: false, include_more_specifics: true, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("more_specifics match w/o withdrawn #3 {}", more_specifics); - // This prefix should not be found, since we withdrew all records for it. - assert!(more_specifics.prefix_meta.is_empty()); + // This prefix should not be found, since we withdrew all records + // for it. + assert!(more_specifics.records.is_empty()); // ..as a result, its resulting match_type should be EmptyMatch assert_eq!(more_specifics.match_type, MatchType::EmptyMatch); let more_specifics = more_specifics.more_specifics.unwrap(); + let ms_v4 = more_specifics + .v4 + .iter() + .filter(|p| p.prefix != Prefix::from_str("1.0.0.0/16").unwrap()) + .collect::>(); assert_eq!(more_specifics.len(), 1); - assert_eq!(more_specifics.v4.len(), 1); - let more_specifics = &more_specifics.v4[0]; + assert_eq!(ms_v4.len(), 1); + let more_specifics = &ms_v4[0]; assert_eq!(more_specifics.prefix, Prefix::from_str("1.0.0.0/17")?); // all muis should be visible for the more specifics @@ -818,9 +919,10 @@ mod tests { //---------------------- + trace!("less_specifics match w/o withdrawn #4"); // Change the requested prefix to the more specific from the former // queries. - let less_specifics = tree_bitmap.match_prefix( + let query = tree_bitmap.match_prefix( &Prefix::from_str("1.0.0.0/17")?, &MatchOptions { match_type: MatchType::ExactMatch, @@ -828,21 +930,29 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; - println!("less_specifics match w/o withdrawn #4 {}", less_specifics); + trace!("{:#?}", query); - assert_eq!(less_specifics.prefix_meta.len(), 5); + assert_eq!(query.records.len(), 5); - let less_specifics = less_specifics.less_specifics.unwrap(); - // All records for the less specific /16 are withdrawn, so this should be empty. + let less_specifics = query.less_specifics.unwrap(); + + // All records for the less specific /16 are withdrawn, so this should + // be empty. assert!(less_specifics.is_empty()); //-------------------- - tree_bitmap.mark_mui_as_active_for_prefix(&wd_pfx, 5)?; + println!("less_specifics match w/o withdrawn #5"); + + trace!("mark {} as active", wd_pfx); + tree_bitmap + .mark_mui_as_active_for_prefix(&wd_pfx, 5, 1) + .unwrap(); let less_specifics = tree_bitmap.match_prefix( &Prefix::from_str("1.0.0.0/17")?, @@ -852,11 +962,12 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); - println!("more_specifics match w/o withdrawn #5 {}", less_specifics); + )?; let less_specifics = less_specifics.less_specifics.unwrap(); + println!("{:#?}", less_specifics); assert_eq!(less_specifics.v4.len(), 1); let less_specifics = &less_specifics.v4[0]; diff --git a/tests/treebitmap_v6.rs b/tests/treebitmap_v6.rs index 5606ff30..9ecd5b7d 100644 --- a/tests/treebitmap_v6.rs +++ b/tests/treebitmap_v6.rs @@ -11,16 +11,31 @@ mod common { #[cfg(test)] mod tests { - use inetnum::addr::Prefix; use rotonda_store::{ - meta_examples::NoMeta, meta_examples::PrefixAs, prelude::multi::*, - prelude::*, + addr::Prefix, + epoch, + match_options::{IncludeHistory, MatchOptions, MatchType}, + prefix_record::{Record, RouteStatus}, + rib::{ + config::{Config, MemoryOnlyConfig, PersistOnlyConfig}, + StarCastRib, + }, + test_types::{NoMeta, PrefixAs}, + IntoIpAddr, }; - #[test] - fn test_arbitrary_insert_ipv6() -> Result<(), Box> - { - let trie = &mut MultiThreadedStore::::new()?; + rotonda_store::all_strategies![ + tests_ipv6; + test_arbitrary_insert_ipv6; + NoMeta + ]; + + // #[test] + fn test_arbitrary_insert_ipv6( + trie: StarCastRib, + ) -> Result<(), Box> { + crate::common::init(); + // let trie = &mut MultiThreadedStore::::try_default()?; let guard = &epoch::pin(); let a_pfx = Prefix::new_relaxed( ("2001:67c:1bfc::").parse::()?.into(), @@ -45,20 +60,32 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("prefix: {:?}", &expect_pfx); println!("result: {:#?}", &res); assert!(res.prefix.is_some()); + assert_eq!(res.prefix, Some(expect_pfx?)); Ok(()) } - #[test] - fn test_insert_extremes_ipv6() -> Result<(), Box> { - let trie = &mut MultiThreadedStore::::new()?; + rotonda_store::all_strategies![ + tests_ipv6_2; + test_insert_extremes_ipv6; + NoMeta + ]; + + // #[test] + fn test_insert_extremes_ipv6( + trie: StarCastRib, + ) -> Result<(), Box> { + crate::common::init(); + + // let trie = &mut MultiThreadedStore::::try_default()?; let min_pfx = Prefix::new_relaxed( std::net::Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0).into(), 1, @@ -84,17 +111,21 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); - println!("prefix: {:?}", &expect_pfx); - println!("result: {:#?}", &res); + )?; + println!("prefix: {}", &expect_pfx.unwrap()); + println!("result: {}", &res); assert!(res.prefix.is_some()); assert_eq!(res.prefix, Some(expect_pfx?)); let max_pfx = Prefix::new_relaxed( - std::net::Ipv6Addr::new(255, 255, 255, 255, 255, 255, 255, 255) - .into(), + std::net::Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, + ) + .into(), 128, ); @@ -105,11 +136,15 @@ mod tests { None, )?; let expect_pfx = Prefix::new_relaxed( - std::net::Ipv6Addr::new(255, 255, 255, 255, 255, 255, 255, 255) - .into(), + std::net::Ipv6Addr::new( + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, + ) + .into(), 128, ); + println!("done inserting..."); let guard = &epoch::pin(); let res = trie.match_prefix( &expect_pfx?, @@ -119,23 +154,32 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; assert!(res.prefix.is_some()); assert_eq!(res.prefix, Some(expect_pfx?)); Ok(()) } + rotonda_store::all_strategies![ + max_levels; + test_max_levels; + PrefixAs + ]; + // This test aims to fill all the levels available in the PrefixBuckets // mapping. This tests the prefix-length-to-bucket-sizes-per-storage- // level mapping, most notably if the exit condition is met (a zero at // the end of a prefix-length array). - #[test] - fn test_max_levels() -> Result<(), Box> { + // #[test] + fn test_max_levels( + tree_bitmap: StarCastRib, + ) -> Result<(), Box> { crate::common::init(); - let tree_bitmap = MultiThreadedStore::::new()?; + // let tree_bitmap = MultiThreadedStore::::try_default()?; let pfxs = vec![ // 0-7 Prefix::new_relaxed( @@ -274,35 +318,49 @@ mod tests { for pfx in pfxs.into_iter() { tree_bitmap.insert( &pfx?, - Record::new(0, 0, RouteStatus::Active, PrefixAs(666)), + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(666), + ), None, )?; } let guard = &epoch::pin(); - for pfx in tree_bitmap.prefixes_iter() { - // let pfx_nm = pfx.strip_meta(); + for pfx in tree_bitmap.prefixes_iter(guard) { + let pfx = pfx.as_ref().unwrap().prefix; let res = tree_bitmap.match_prefix( - &pfx.prefix, + &pfx, &MatchOptions { match_type: MatchType::LongestMatch, include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("{}", pfx); - assert_eq!(res.prefix.unwrap(), pfx.prefix); + assert_eq!(res.prefix.unwrap(), pfx); } Ok(()) } - #[test] - fn test_tree_ipv6() -> Result<(), Box> { - let tree_bitmap = MultiThreadedStore::::new()?; + rotonda_store::all_strategies![ + tree_ipv6_2; + test_tree_ipv6; + PrefixAs + ]; + + // #[test] + fn test_tree_ipv6( + tree_bitmap: StarCastRib, + ) -> Result<(), Box> { + // let tree_bitmap = MultiThreadedStore::::try_default()?; let pfxs = vec![ // Prefix::new_relaxed(0b0000_0000_0000_0000_0000_0000_0000_000 0_u128.into_ipaddr(), 0), Prefix::new_relaxed( @@ -535,7 +593,12 @@ mod tests { for pfx in pfxs.into_iter() { tree_bitmap.insert( &pfx?, - Record::new(0, 0, RouteStatus::Active, PrefixAs(666)), + Record::new( + 0, + 0, + RouteStatus::Active, + PrefixAs::new_from_u32(666), + ), None, )?; } @@ -547,21 +610,22 @@ mod tests { // }; let guard = &epoch::pin(); - for pfx in tree_bitmap.prefixes_iter() { - // let pfx_nm = pfx.strip_meta(); + for pfx in tree_bitmap.prefixes_iter(guard) { + let pfx = pfx.unwrap().prefix; let res = tree_bitmap.match_prefix( - &pfx.prefix, + &pfx, &MatchOptions { match_type: MatchType::LongestMatch, include_withdrawn: false, include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("{}", pfx); - assert_eq!(res.prefix.unwrap(), pfx.prefix); + assert_eq!(res.prefix.unwrap(), pfx); } let res = tree_bitmap.match_prefix( @@ -575,9 +639,10 @@ mod tests { include_less_specifics: true, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; println!("prefix {:?}", res.prefix); println!("res: {:#?}", &res); @@ -612,10 +677,13 @@ mod tests { Ok(()) } + // This test cannot be run with the current test creation macro. The + // test recreates the store for each outer loop! #[test] - fn test_ranges_ipv4() -> Result<(), Box> { + fn test_ranges_ipv6_mo() -> Result<(), Box> { for i_net in 0..255 { - let tree_bitmap = MultiThreadedStore::::new()?; + let tree_bitmap = + StarCastRib::::try_default()?; let pfx_vec: Vec = (1..32) .collect::>() @@ -660,9 +728,72 @@ mod tests { include_less_specifics: false, include_more_specifics: false, mui: None, + include_history: IncludeHistory::None, }, guard, - ); + )?; + println!("{:?}", pfx); + + assert_eq!(res.prefix.unwrap(), res_pfx?); + } + } + } + Ok(()) + } + + #[test] + fn test_ranges_ipv6_po() -> Result<(), Box> { + for i_net in 0..255 { + let tree_bitmap = + StarCastRib::::try_default()?; + + let pfx_vec: Vec = (1..32) + .collect::>() + .into_iter() + .map(|i_len| { + Prefix::new_relaxed( + std::net::Ipv6Addr::new(i_net, 0, 0, 0, 0, 0, 0, 0) + .into(), + i_len, + ) + .unwrap() + }) + .collect(); + + let mut i_len_s = 0; + for pfx in pfx_vec { + i_len_s += 1; + tree_bitmap.insert( + &pfx, + Record::new(0, 0, RouteStatus::Active, NoMeta::Empty), + None, + )?; + + let res_pfx = Prefix::new_relaxed( + std::net::Ipv6Addr::new(i_net, 0, 0, 0, 0, 0, 0, 0) + .into(), + i_len_s, + ); + + let guard = &epoch::pin(); + for s_len in i_len_s..32 { + let pfx = Prefix::new_relaxed( + std::net::Ipv6Addr::new(i_net, 0, 0, 0, 0, 0, 0, 0) + .into(), + s_len, + )?; + let res = tree_bitmap.match_prefix( + &pfx, + &MatchOptions { + match_type: MatchType::LongestMatch, + include_withdrawn: false, + include_less_specifics: false, + include_more_specifics: false, + mui: None, + include_history: IncludeHistory::None, + }, + guard, + )?; println!("{:?}", pfx); assert_eq!(res.prefix.unwrap(), res_pfx?);