diff --git a/.gitignore b/.gitignore index 85486d4c..bfccc3bb 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,8 @@ temp # Local caches .cache artifacts/pr-review/ +__pycache__/ +*.pyc # Playwright artifacts test-results diff --git a/Cargo.lock b/Cargo.lock index 2c29231e..d4e2d2c4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6413,6 +6413,7 @@ dependencies = [ "pulldown-cmark", "serde", "serde_json", + "sha2", "time", "warp-cli", ] diff --git a/Makefile b/Makefile index 58a7d031..357dcade 100644 --- a/Makefile +++ b/Makefile @@ -77,9 +77,9 @@ docs-ci: @echo "[docs] CI build (no npm install)" @npm run --silent docs:build # Benchmarks and reports -.PHONY: bench-report vendor-d3 bench-serve bench-open +.PHONY: bench-report bench-vendor vendor-d3 bench-serve bench-open -vendor-d3: +bench-vendor: @mkdir -p docs/benchmarks/vendor @if [ ! -f docs/benchmarks/vendor/d3.v7.min.js ]; then \ echo "Downloading D3 v7 to docs/benchmarks/vendor..."; \ @@ -88,6 +88,22 @@ vendor-d3: else \ echo "D3 already present (docs/benchmarks/vendor/d3.v7.min.js)"; \ fi + @if [ ! -f docs/benchmarks/vendor/open-props.min.css ]; then \ + echo "Downloading Open Props to docs/benchmarks/vendor..."; \ + curl -fsSL https://unpkg.com/open-props@1.7.16/open-props.min.css -o docs/benchmarks/vendor/open-props.min.css; \ + echo "Open Props saved to docs/benchmarks/vendor/open-props.min.css"; \ + else \ + echo "Open Props already present (docs/benchmarks/vendor/open-props.min.css)"; \ + fi + @if [ ! -f docs/benchmarks/vendor/normalize.dark.min.css ]; then \ + echo "Downloading Open Props normalize.dark to docs/benchmarks/vendor..."; \ + curl -fsSL https://unpkg.com/open-props@1.7.16/normalize.dark.min.css -o docs/benchmarks/vendor/normalize.dark.min.css; \ + echo "Open Props normalize.dark saved to docs/benchmarks/vendor/normalize.dark.min.css"; \ + else \ + echo "Open Props normalize.dark already present (docs/benchmarks/vendor/normalize.dark.min.css)"; \ + fi + +vendor-d3: bench-vendor bench-serve: @echo "Serving repo at http://localhost:$(BENCH_PORT) (Ctrl+C to stop)" @@ -96,6 +112,9 @@ bench-serve: OPEN := $(shell if command -v open >/dev/null 2>&1; then echo open; \ elif command -v xdg-open >/dev/null 2>&1; then echo xdg-open; \ elif command -v powershell.exe >/dev/null 2>&1; then echo powershell.exe; fi) +BENCH_INLINE_REPORT_PATH := $(abspath docs/benchmarks/report-inline.html) +BENCH_INLINE_REPORT_URI := file://$(BENCH_INLINE_REPORT_PATH) +BENCH_POLICY_REPORT_URI := $(BENCH_INLINE_REPORT_URI)\#parallel-policy bench-open: @if [ -n "$(OPEN)" ]; then \ @@ -104,7 +123,7 @@ bench-open: echo "Open URL: http://localhost:$(BENCH_PORT)/docs/benchmarks/" ; \ fi -bench-report: vendor-d3 +bench-report: bench-vendor @echo "Running benches (warp-benches)..." cargo bench -p warp-benches @echo "Starting local server on :$(BENCH_PORT) and opening dashboard..." @@ -147,19 +166,60 @@ bench-stop: echo "[bench] No PID file at target/bench_http.pid"; \ fi -.PHONY: bench-bake bench-open-inline +.PHONY: bench-bake bench-open-inline bench-policy-bake bench-policy-export bench-policy-open-inline -# Bake a standalone HTML with inline data that works over file:// -bench-bake: vendor-d3 +# Bake an offline-friendly HTML report with inline data and local vendored assets. +bench-bake: bench-vendor @echo "Running benches (warp-benches)..." cargo bench -p warp-benches @echo "Baking inline report..." - @python3 scripts/bench_bake.py --out docs/benchmarks/report-inline.html + @cargo xtask bench bake \ + --out docs/benchmarks/report-inline.html \ + --policy-json-out docs/benchmarks/parallel-policy-matrix.json + @cargo xtask bench check-artifacts \ + --html docs/benchmarks/report-inline.html \ + --json docs/benchmarks/parallel-policy-matrix.json @echo "Opening inline report..." - @open docs/benchmarks/report-inline.html + @if [ -n "$(OPEN)" ]; then \ + $(OPEN) docs/benchmarks/report-inline.html >/dev/null 2>&1 || echo "Open file: docs/benchmarks/report-inline.html" ; \ + else \ + echo "Open file: docs/benchmarks/report-inline.html" ; \ + fi bench-open-inline: - @open docs/benchmarks/report-inline.html + @if [ -n "$(OPEN)" ]; then \ + $(OPEN) docs/benchmarks/report-inline.html >/dev/null 2>&1 || echo "Open file: docs/benchmarks/report-inline.html" ; \ + else \ + echo "Open file: docs/benchmarks/report-inline.html" ; \ + fi + +bench-policy-export: bench-vendor + @echo "Exporting parallel policy matrix JSON..." + @cargo xtask bench policy-export \ + --json-out docs/benchmarks/parallel-policy-matrix.json + @echo "Baking unified inline report..." + @cargo xtask bench bake --out docs/benchmarks/report-inline.html + @cargo xtask bench check-artifacts \ + --html docs/benchmarks/report-inline.html \ + --json docs/benchmarks/parallel-policy-matrix.json + @pnpm exec prettier --write docs/benchmarks/report-inline.html >/dev/null + +bench-policy-bake: bench-vendor + @echo "Running parallel policy matrix benchmarks..." + cargo bench -p warp-benches --bench parallel_baseline -- parallel_policy_matrix + @$(MAKE) bench-policy-export + @if [ -n "$(OPEN)" ]; then \ + $(OPEN) "$(BENCH_POLICY_REPORT_URI)" >/dev/null 2>&1 || echo "Open URL: $(BENCH_POLICY_REPORT_URI)" ; \ + else \ + echo "Open URL: $(BENCH_POLICY_REPORT_URI)" ; \ + fi + +bench-policy-open-inline: + @if [ -n "$(OPEN)" ]; then \ + $(OPEN) "$(BENCH_POLICY_REPORT_URI)" >/dev/null 2>&1 || echo "Open URL: $(BENCH_POLICY_REPORT_URI)" ; \ + else \ + echo "Open URL: $(BENCH_POLICY_REPORT_URI)" ; \ + fi # Spec-000 (WASM) helpers .PHONY: spec-000-dev spec-000-build diff --git a/crates/warp-benches/benches/README.md b/crates/warp-benches/benches/README.md index b84676aa..fb073a81 100644 --- a/crates/warp-benches/benches/README.md +++ b/crates/warp-benches/benches/README.md @@ -24,6 +24,21 @@ results. This README summarizes how to run them and read the output. - Throughput “elements” = rule applications (`n`). Uses `BatchSize::PerIteration` so engine construction is excluded from timing. +- `parallel_baseline.rs` + - Compares serial execution, the current shard-parallel baseline, the Phase 6B + work-queue pipeline, worker-count scaling, and the shard-policy matrix. + - The policy matrix compares: + - dynamic shard claiming + per-worker deltas + - dynamic shard claiming + per-shard deltas + - static round-robin shard assignment + per-worker deltas + - static round-robin shard assignment + per-shard deltas + - dedicated one-worker-per-shard + one-delta-per-shard + - Each case includes canonical delta merge after parallel execution, so the + study reflects full policy cost for the synthetic independent workload. + - The policy matrix runs across loads `100`, `1000`, and `10000`, with worker + counts `1`, `4`, and `8` where the policy uses a worker pool. + - Throughput “elements” = executed items in the synthetic independent workload. + ## Run Run the full benches suite: @@ -37,6 +52,7 @@ Run a single bench target (faster dev loop): ```sh cargo bench -p warp-benches --bench snapshot_hash cargo bench -p warp-benches --bench scheduler_drain +cargo bench -p warp-benches --bench parallel_baseline ``` Criterion HTML reports are written under `target/criterion//report/index.html`. @@ -44,8 +60,11 @@ Criterion HTML reports are written under `target/criterion//report/index. ### Charts & Reports - Live server + dashboard: `make bench-report` opens `http://localhost:8000/docs/benchmarks/`. -- Offline static report (no server): `make bench-bake` writes `docs/benchmarks/report-inline.html` with results injected. +- Offline static report (no server): `make bench-bake` writes `docs/benchmarks/report-inline.html` with results, policy payload, and provenance injected. - Open the file directly (Finder or `open docs/benchmarks/report-inline.html`). +- The same static page also hosts the parallel shard-policy study. + - Run `make bench-policy-bake`, then open the `Parallel policy matrix` tab. + - `make bench-policy-export` rebakes from the existing local Criterion tree without rerunning benches. ## Interpreting Results diff --git a/crates/warp-benches/benches/parallel_baseline.rs b/crates/warp-benches/benches/parallel_baseline.rs index 93e2284a..426f18f1 100644 --- a/crates/warp-benches/benches/parallel_baseline.rs +++ b/crates/warp-benches/benches/parallel_baseline.rs @@ -20,14 +20,16 @@ //! - `serial_vs_parallel_N`: Compare parallel sharded execution vs serial baseline //! - `work_queue_pipeline_N`: Full Phase 6B pipeline (build_work_units → execute_work_queue) //! - `worker_scaling_100`: How throughput scales with worker count (1, 2, 4, 8, 16) +//! - `parallel_policy_matrix`: Compare shard assignment and delta accumulation policies across loads use criterion::{criterion_group, criterion_main, BatchSize, BenchmarkId, Criterion, Throughput}; use std::collections::BTreeMap; +use std::num::NonZeroUsize; use std::time::Duration; use warp_core::parallel::{build_work_units, execute_work_queue, WorkerResult}; use warp_core::{ - execute_parallel, execute_serial, make_node_id, make_type_id, make_warp_id, AtomPayload, - AttachmentKey, AttachmentValue, ExecItem, GraphStore, GraphView, NodeId, NodeKey, NodeRecord, - OpOrigin, TickDelta, WarpId, WarpOp, + execute_parallel, execute_parallel_with_policy, execute_serial, make_node_id, make_type_id, + make_warp_id, AtomPayload, AttachmentKey, AttachmentValue, ExecItem, GraphStore, GraphView, + NodeId, NodeKey, NodeRecord, OpOrigin, ParallelExecutionPolicy, TickDelta, WarpId, WarpOp, }; /// Simple executor that sets an attachment on the scope node. @@ -46,6 +48,20 @@ fn touch_executor(view: GraphView<'_>, scope: &NodeId, delta: &mut TickDelta) { }); } +/// Mirrors the production commit-path merge shape used without `delta_validate`. +fn merge_for_commit_path(deltas: Vec) -> Vec { + let mut flat: Vec<_> = deltas + .into_iter() + .flat_map(TickDelta::into_ops_unsorted) + .map(|op| (op.sort_key(), op)) + .collect(); + + flat.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + + flat.dedup_by(|a, b| a.0 == b.0); + flat.into_iter().map(|(_, op)| op).collect() +} + /// Create a test graph with N independent nodes. fn make_test_store(n: usize) -> (GraphStore, Vec) { let node_ty = make_type_id("bench/node"); @@ -297,10 +313,107 @@ fn bench_worker_scaling(c: &mut Criterion) { group.finish(); } +// ============================================================================= +// Policy matrix comparison +// ============================================================================= + +fn policy_label(policy: ParallelExecutionPolicy) -> &'static str { + match policy { + ParallelExecutionPolicy::DYNAMIC_PER_WORKER => "dynamic_per_worker", + ParallelExecutionPolicy::DYNAMIC_PER_SHARD => "dynamic_per_shard", + ParallelExecutionPolicy::STATIC_PER_WORKER => "static_per_worker", + ParallelExecutionPolicy::STATIC_PER_SHARD => "static_per_shard", + ParallelExecutionPolicy::DEDICATED_PER_SHARD => "dedicated_per_shard", + _ => panic!("unmapped ParallelExecutionPolicy in parallel_policy_matrix"), + } +} + +fn worker_hint(workers: usize) -> NonZeroUsize { + NonZeroUsize::new(workers.max(1)).map_or(NonZeroUsize::MIN, |w| w) +} + +/// Compares shard assignment and delta accumulation strategies directly. +/// +/// This includes canonical delta merge after parallel execution so the +/// `PerWorker` vs `PerShard` axis reflects the full policy cost visible to the +/// engine, not just executor-stage delta production. +fn bench_policy_matrix(c: &mut Criterion) { + let mut group = c.benchmark_group("parallel_policy_matrix"); + group + .warm_up_time(Duration::from_secs(2)) + .measurement_time(Duration::from_secs(5)) + .sample_size(40); + + let policies = [ + ParallelExecutionPolicy::DYNAMIC_PER_WORKER, + ParallelExecutionPolicy::DYNAMIC_PER_SHARD, + ParallelExecutionPolicy::STATIC_PER_WORKER, + ParallelExecutionPolicy::STATIC_PER_SHARD, + ParallelExecutionPolicy::DEDICATED_PER_SHARD, + ]; + + for &n in &[100usize, 1_000, 10_000] { + group.throughput(Throughput::Elements(n as u64)); + for policy in policies { + if policy == ParallelExecutionPolicy::DEDICATED_PER_SHARD { + group.bench_with_input(BenchmarkId::new(policy_label(policy), n), &n, |b, &n| { + b.iter_batched( + || { + let (store, nodes) = make_test_store(n); + let items = make_exec_items(&nodes); + (store, items) + }, + |(store, items)| { + let view = GraphView::new(&store); + let deltas = + execute_parallel_with_policy(view, &items, worker_hint(1), policy); + let merged = merge_for_commit_path(deltas); + criterion::black_box(merged) + }, + BatchSize::SmallInput, + ); + }); + continue; + } + + for &workers in &[1usize, 4, 8] { + group.bench_with_input( + BenchmarkId::new(format!("{}/{}w", policy_label(policy), workers), n), + &n, + |b, &n| { + b.iter_batched( + || { + let (store, nodes) = make_test_store(n); + let items = make_exec_items(&nodes); + (store, items) + }, + |(store, items)| { + let view = GraphView::new(&store); + let deltas = execute_parallel_with_policy( + view, + &items, + worker_hint(workers), + policy, + ); + let merged = merge_for_commit_path(deltas); + criterion::black_box(merged) + }, + BatchSize::SmallInput, + ); + }, + ); + } + } + } + + group.finish(); +} + criterion_group!( benches, bench_serial_vs_parallel, bench_work_queue, - bench_worker_scaling + bench_worker_scaling, + bench_policy_matrix ); criterion_main!(benches); diff --git a/crates/warp-core/src/lib.rs b/crates/warp-core/src/lib.rs index 0ba4494c..595808c2 100644 --- a/crates/warp-core/src/lib.rs +++ b/crates/warp-core/src/lib.rs @@ -167,8 +167,9 @@ pub use ident::{ TypeId, WarpId, }; pub use parallel::{ - execute_parallel, execute_parallel_sharded, execute_serial, shard_of, ExecItem, MergeConflict, - PoisonedDelta, NUM_SHARDS, + execute_parallel, execute_parallel_sharded, execute_parallel_sharded_with_policy, + execute_parallel_with_policy, execute_serial, shard_of, DeltaAccumulationPolicy, ExecItem, + MergeConflict, ParallelExecutionPolicy, PoisonedDelta, ShardAssignmentPolicy, NUM_SHARDS, }; /// Delta merging functions, only available with `delta_validate` feature. /// diff --git a/crates/warp-core/src/parallel/exec.rs b/crates/warp-core/src/parallel/exec.rs index a8655e7f..bbb81b84 100644 --- a/crates/warp-core/src/parallel/exec.rs +++ b/crates/warp-core/src/parallel/exec.rs @@ -6,6 +6,7 @@ //! Workers dynamically claim shards via atomic counter (work-stealing). use std::any::Any; +use std::num::NonZeroUsize; use std::sync::atomic::{AtomicUsize, Ordering}; #[cfg(any(debug_assertions, feature = "footprint_enforce_release"))] @@ -20,6 +21,100 @@ use crate::NodeId; use super::shard::{partition_into_shards, NUM_SHARDS}; +/// How virtual shards are assigned to workers during parallel execution. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[non_exhaustive] +pub enum ShardAssignmentPolicy { + /// Workers claim shards dynamically via an atomic counter. + DynamicSteal, + /// Shards are assigned deterministically to workers by `shard_id % workers`. + StaticRoundRobin, + /// Each non-empty shard gets its own worker thread. + /// + /// This is primarily a benchmarking / comparison policy, not the default + /// engine topology. It intentionally maximizes scheduling isolation at the + /// cost of spawning up to one thread per non-empty shard. + DedicatedPerShard, +} + +/// How worker execution outputs are grouped into `TickDelta`s. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[non_exhaustive] +pub enum DeltaAccumulationPolicy { + /// Each worker accumulates all claimed shards into one `TickDelta`. + PerWorker, + /// Each non-empty shard produces its own `TickDelta`. + PerShard, +} + +/// Execution policy for the shard-based parallel executor. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct ParallelExecutionPolicy { + /// How shards are assigned to workers. + assignment: ShardAssignmentPolicy, + /// How execution outputs are grouped into deltas. + accumulation: DeltaAccumulationPolicy, +} + +impl ParallelExecutionPolicy { + /// Current default execution policy used by `execute_parallel()`. + pub const DEFAULT: Self = Self { + assignment: ShardAssignmentPolicy::DynamicSteal, + accumulation: DeltaAccumulationPolicy::PerWorker, + }; + + /// Dynamic shard claiming with one output delta per worker. + pub const DYNAMIC_PER_WORKER: Self = Self { + assignment: ShardAssignmentPolicy::DynamicSteal, + accumulation: DeltaAccumulationPolicy::PerWorker, + }; + + /// Dynamic shard claiming with one output delta per non-empty shard. + pub const DYNAMIC_PER_SHARD: Self = Self { + assignment: ShardAssignmentPolicy::DynamicSteal, + accumulation: DeltaAccumulationPolicy::PerShard, + }; + + /// Deterministic round-robin shard assignment with one output delta per worker. + pub const STATIC_PER_WORKER: Self = Self { + assignment: ShardAssignmentPolicy::StaticRoundRobin, + accumulation: DeltaAccumulationPolicy::PerWorker, + }; + + /// Deterministic round-robin shard assignment with one output delta per non-empty shard. + pub const STATIC_PER_SHARD: Self = Self { + assignment: ShardAssignmentPolicy::StaticRoundRobin, + accumulation: DeltaAccumulationPolicy::PerShard, + }; + + /// One worker per non-empty shard with one output delta per shard. + /// + /// The `workers` argument is ignored for this policy. Empty input returns + /// zero deltas because there are no non-empty shards to assign. + pub const DEDICATED_PER_SHARD: Self = Self { + assignment: ShardAssignmentPolicy::DedicatedPerShard, + accumulation: DeltaAccumulationPolicy::PerShard, + }; + + /// Returns the shard-assignment mode for this execution policy. + #[must_use] + pub const fn assignment(self) -> ShardAssignmentPolicy { + self.assignment + } + + /// Returns the delta-grouping mode for this execution policy. + #[must_use] + pub const fn accumulation(self) -> DeltaAccumulationPolicy { + self.accumulation + } +} + +impl Default for ParallelExecutionPolicy { + fn default() -> Self { + Self::DEFAULT + } +} + /// Classification of an executor for footprint enforcement. /// /// System items (engine-internal inbox rules) may emit instance-level ops @@ -169,9 +264,15 @@ pub fn execute_parallel(view: GraphView<'_>, items: &[ExecItem], workers: usize) assert!(workers >= 1, "need at least one worker"); // Cap workers at NUM_SHARDS - no point spawning 512 threads for 256 shards - let capped_workers = workers.min(NUM_SHARDS); + let capped_workers = + NonZeroUsize::new(workers.min(NUM_SHARDS)).map_or(NonZeroUsize::MIN, |w| w); - execute_parallel_sharded(view, items, capped_workers) + execute_parallel_sharded_with_policy( + view, + items, + capped_workers, + ParallelExecutionPolicy::DEFAULT, + ) } /// Parallel execution with virtual shard partitioning (Phase 6B). @@ -199,15 +300,95 @@ pub fn execute_parallel_sharded( items: &[ExecItem], workers: usize, ) -> Vec { - assert!(workers > 0, "workers must be > 0"); + assert!(workers >= 1, "need at least one worker"); + let workers = NonZeroUsize::new(workers).map_or(NonZeroUsize::MIN, |w| w); + execute_parallel_sharded_with_policy(view, items, workers, ParallelExecutionPolicy::DEFAULT) +} + +/// Parallel execution with an explicit shard assignment and delta accumulation policy. +/// +/// This exposes the execution-policy matrix for benchmarking and experimentation +/// while preserving `execute_parallel()` as the stable default entrypoint. +/// +/// `ParallelExecutionPolicy::DEDICATED_PER_SHARD` intentionally ignores the +/// `workers` argument and emits one delta per non-empty shard. All other +/// policies preserve the worker-shaped empty-result contract and return +/// `workers` empty deltas when `items` is empty. +/// +pub fn execute_parallel_sharded_with_policy( + view: GraphView<'_>, + items: &[ExecItem], + workers: NonZeroUsize, + policy: ParallelExecutionPolicy, +) -> Vec { + let workers = workers.get(); if items.is_empty() { - // Can't use vec![TickDelta::new(); workers] because TickDelta doesn't impl Clone - return (0..workers).map(|_| TickDelta::new()).collect(); + return match policy.assignment { + ShardAssignmentPolicy::DedicatedPerShard => Vec::new(), + _ => { + // Can't use vec![TickDelta::new(); workers] because TickDelta doesn't impl Clone + (0..workers).map(|_| TickDelta::new()).collect() + } + }; } // Partition into virtual shards by scope let shards = partition_into_shards(items); + match (policy.assignment, policy.accumulation) { + (ShardAssignmentPolicy::DynamicSteal, DeltaAccumulationPolicy::PerWorker) => { + execute_dynamic_per_worker(view, &shards, workers) + } + (ShardAssignmentPolicy::DynamicSteal, DeltaAccumulationPolicy::PerShard) => { + execute_dynamic_per_shard(view, &shards, workers) + } + (ShardAssignmentPolicy::StaticRoundRobin, DeltaAccumulationPolicy::PerWorker) => { + execute_static_per_worker(view, &shards, workers) + } + (ShardAssignmentPolicy::StaticRoundRobin, DeltaAccumulationPolicy::PerShard) => { + execute_static_per_shard(view, &shards, workers) + } + ( + ShardAssignmentPolicy::DedicatedPerShard, + DeltaAccumulationPolicy::PerWorker | DeltaAccumulationPolicy::PerShard, + ) => { + debug_assert_eq!( + policy.accumulation, + DeltaAccumulationPolicy::PerShard, + "DedicatedPerShard is only exposed with PerShard accumulation" + ); + execute_dedicated_per_shard(view, &shards) + } + } +} + +/// Parallel execution entry point with an explicit policy and worker cap. +/// +/// This mirrors `execute_parallel()` but exposes the policy seam for benchmarks. +/// +pub fn execute_parallel_with_policy( + view: GraphView<'_>, + items: &[ExecItem], + workers: NonZeroUsize, + policy: ParallelExecutionPolicy, +) -> Vec { + let capped_workers = + NonZeroUsize::new(workers.get().min(NUM_SHARDS)).map_or(NonZeroUsize::MIN, |w| w); + execute_parallel_sharded_with_policy(view, items, capped_workers, policy) +} + +fn execute_shard_into_delta(view: GraphView<'_>, items: &[ExecItem], delta: &mut TickDelta) { + for item in items { + let mut scoped = delta.scoped(item.origin); + (item.exec)(view, &item.scope, scoped.inner_mut()); + } +} + +fn execute_dynamic_per_worker( + view: GraphView<'_>, + shards: &[super::shard::VirtualShard], + workers: usize, +) -> Vec { let next_shard = AtomicUsize::new(0); std::thread::scope(|s| { @@ -219,21 +400,90 @@ pub fn execute_parallel_sharded( s.spawn(move || { let mut delta = TickDelta::new(); - - // Work-stealing loop: claim shards until none remain loop { let shard_id = next_shard.fetch_add(1, Ordering::Relaxed); if shard_id >= NUM_SHARDS { break; } + execute_shard_into_delta(view_copy, &shards[shard_id].items, &mut delta); + } + delta + }) + }) + .collect(); + + handles + .into_iter() + .map(|h| match h.join() { + Ok(delta) => delta, + Err(e) => std::panic::resume_unwind(e), + }) + .collect() + }) +} + +fn execute_dynamic_per_shard( + view: GraphView<'_>, + shards: &[super::shard::VirtualShard], + workers: usize, +) -> Vec { + let next_shard = AtomicUsize::new(0); + + std::thread::scope(|s| { + let handles: Vec<_> = (0..workers) + .map(|_| { + let view_copy = view; + let shards = &shards; + let next_shard = &next_shard; - // Execute all items in this shard (cache locality) - for item in &shards[shard_id].items { - let mut scoped = delta.scoped(item.origin); - (item.exec)(view_copy, &item.scope, scoped.inner_mut()); + s.spawn(move || { + let mut deltas: Vec<(usize, TickDelta)> = Vec::new(); + loop { + let shard_id = next_shard.fetch_add(1, Ordering::Relaxed); + if shard_id >= NUM_SHARDS { + break; } + let items = &shards[shard_id].items; + if items.is_empty() { + continue; + } + let mut delta = TickDelta::new(); + execute_shard_into_delta(view_copy, items, &mut delta); + deltas.push((shard_id, delta)); } + deltas + }) + }) + .collect(); + let mut deltas: Vec<(usize, TickDelta)> = handles + .into_iter() + .flat_map(|h| match h.join() { + Ok(worker_deltas) => worker_deltas, + Err(e) => std::panic::resume_unwind(e), + }) + .collect(); + deltas.sort_by_key(|(shard_id, _)| *shard_id); + deltas.into_iter().map(|(_, delta)| delta).collect() + }) +} + +fn execute_static_per_worker( + view: GraphView<'_>, + shards: &[super::shard::VirtualShard], + workers: usize, +) -> Vec { + std::thread::scope(|s| { + let handles: Vec<_> = (0..workers) + .map(|worker_ix| { + let view_copy = view; + let shards = &shards; + + s.spawn(move || { + let mut delta = TickDelta::new(); + for shard_id in (worker_ix..NUM_SHARDS).step_by(workers) { + execute_shard_into_delta(view_copy, &shards[shard_id].items, &mut delta); + } delta }) }) @@ -249,6 +499,77 @@ pub fn execute_parallel_sharded( }) } +fn execute_static_per_shard( + view: GraphView<'_>, + shards: &[super::shard::VirtualShard], + workers: usize, +) -> Vec { + std::thread::scope(|s| { + let handles: Vec<_> = (0..workers) + .map(|worker_ix| { + let view_copy = view; + let shards = &shards; + + s.spawn(move || { + let mut deltas: Vec<(usize, TickDelta)> = Vec::new(); + for shard_id in (worker_ix..NUM_SHARDS).step_by(workers) { + let items = &shards[shard_id].items; + if items.is_empty() { + continue; + } + let mut delta = TickDelta::new(); + execute_shard_into_delta(view_copy, items, &mut delta); + deltas.push((shard_id, delta)); + } + deltas + }) + }) + .collect(); + + let mut deltas: Vec<(usize, TickDelta)> = handles + .into_iter() + .flat_map(|h| match h.join() { + Ok(worker_deltas) => worker_deltas, + Err(e) => std::panic::resume_unwind(e), + }) + .collect(); + deltas.sort_by_key(|(shard_id, _)| *shard_id); + deltas.into_iter().map(|(_, delta)| delta).collect() + }) +} + +fn execute_dedicated_per_shard( + view: GraphView<'_>, + shards: &[super::shard::VirtualShard], +) -> Vec { + std::thread::scope(|s| { + let handles: Vec<_> = shards + .iter() + .enumerate() + .filter(|(_, shard)| !shard.items.is_empty()) + .map(|(shard_id, shard)| { + let view_copy = view; + let items = &shard.items; + s.spawn(move || { + let mut delta = TickDelta::new(); + execute_shard_into_delta(view_copy, items, &mut delta); + (shard_id, delta) + }) + }) + .collect(); + + let mut deltas: Vec<(usize, TickDelta)> = handles + .into_iter() + .map(|h| match h.join() { + Ok(delta) => delta, + Err(e) => std::panic::resume_unwind(e), + }) + .collect(); + deltas.sort_by_key(|(shard_id, _)| *shard_id); + deltas.into_iter().map(|(_, delta)| delta).collect() + }) +} + // ============================================================================= // Cross-Warp Parallelism (Phase 6B+) // ============================================================================= @@ -529,3 +850,235 @@ fn execute_item_enforced( Ok(delta) } } + +#[cfg(test)] +mod tests { + use super::{execute_parallel_with_policy, ExecItem, ParallelExecutionPolicy}; + use crate::{ + execute_serial, make_type_id, merge_deltas_ok, AtomPayload, AttachmentKey, AttachmentValue, + GraphStore, GraphView, NodeId, NodeKey, NodeRecord, OpOrigin, TickDelta, WarpOp, + }; + use std::num::NonZeroUsize; + + fn test_executor(view: GraphView<'_>, scope: &NodeId, delta: &mut TickDelta) { + let payload = AtomPayload::new( + make_type_id("parallel/policy-test"), + bytes::Bytes::from_static(b"ok"), + ); + let key = AttachmentKey::node_alpha(NodeKey { + warp_id: view.warp_id(), + local_id: *scope, + }); + delta.push(WarpOp::SetAttachment { + key, + value: Some(AttachmentValue::Atom(payload)), + }); + } + + fn make_store_and_items(count: usize) -> (GraphStore, Vec) { + let mut store = GraphStore::default(); + let node_ty = make_type_id("parallel/policy-node"); + let mut items = Vec::with_capacity(count); + for i in 0..count { + let mut bytes = [0u8; 32]; + assert!( + u8::try_from(i).is_ok(), + "test fixture only supports up to 256 scopes" + ); + bytes[0] = u8::try_from(i).unwrap_or(0); + let scope = NodeId(bytes); + store.insert_node(scope, NodeRecord { ty: node_ty }); + items.push(ExecItem::new( + test_executor, + scope, + OpOrigin { + intent_id: i as u64, + rule_id: 1, + match_ix: 0, + op_ix: 0, + }, + )); + } + (store, items) + } + + fn worker_hint(workers: usize) -> NonZeroUsize { + NonZeroUsize::new(workers.max(1)).map_or(NonZeroUsize::MIN, |w| w) + } + + #[test] + fn all_parallel_policies_match_serial_oracle() { + let policies = [ + ParallelExecutionPolicy::DYNAMIC_PER_WORKER, + ParallelExecutionPolicy::DYNAMIC_PER_SHARD, + ParallelExecutionPolicy::STATIC_PER_WORKER, + ParallelExecutionPolicy::STATIC_PER_SHARD, + ParallelExecutionPolicy::DEDICATED_PER_SHARD, + ]; + let (store, items) = make_store_and_items(32); + let view = GraphView::new(&store); + let serial_oracle_result = merge_deltas_ok(vec![execute_serial(view, &items)]); + assert!( + serial_oracle_result.is_ok(), + "serial oracle merge failed: {serial_oracle_result:?}" + ); + let Ok(serial_oracle) = serial_oracle_result else { + unreachable!("assert above guarantees a valid serial oracle"); + }; + + for policy in policies { + for workers in [1_usize, 4, 8] { + let deltas = + execute_parallel_with_policy(view, &items, worker_hint(workers), policy); + let merged_result = merge_deltas_ok(deltas); + assert!( + merged_result.is_ok(), + "policy merge failed for {policy:?} @ {workers}w: {merged_result:?}" + ); + let Ok(merged) = merged_result else { + unreachable!("assert above guarantees a successful policy merge"); + }; + assert_eq!( + merged, serial_oracle, + "policy {policy:?} changed merged ops at {workers}w" + ); + } + } + } + + #[test] + fn per_shard_policy_emits_more_than_one_delta_when_one_worker_sees_many_shards() { + let (store, items) = make_store_and_items(8); + let view = GraphView::new(&store); + + let per_worker = execute_parallel_with_policy( + view, + &items, + worker_hint(1), + ParallelExecutionPolicy::DYNAMIC_PER_WORKER, + ); + let per_shard = execute_parallel_with_policy( + view, + &items, + worker_hint(1), + ParallelExecutionPolicy::DYNAMIC_PER_SHARD, + ); + let serial_oracle = merge_deltas_ok(vec![execute_serial(view, &items)]); + assert!( + serial_oracle.is_ok(), + "serial oracle merge failed for per-shard count test: {serial_oracle:?}" + ); + let Ok(serial_oracle) = serial_oracle else { + unreachable!("assert above guarantees a valid serial oracle"); + }; + let per_worker_len = per_worker.len(); + let per_shard_len = per_shard.len(); + let per_worker_merged = merge_deltas_ok(per_worker); + let per_shard_merged = merge_deltas_ok(per_shard); + assert!( + per_worker_merged.is_ok(), + "per-worker merge failed: {per_worker_merged:?}" + ); + assert!( + per_shard_merged.is_ok(), + "per-shard merge failed: {per_shard_merged:?}" + ); + let Ok(per_worker_merged) = per_worker_merged else { + unreachable!("assert above guarantees a merged per-worker result"); + }; + let Ok(per_shard_merged) = per_shard_merged else { + unreachable!("assert above guarantees a merged per-shard result"); + }; + + assert_eq!(per_worker_len, 1, "per-worker policy should emit one delta"); + assert!( + per_shard_len > 1, + "per-shard policy should emit multiple deltas when one worker processes multiple shards" + ); + assert_eq!( + per_worker_merged, serial_oracle, + "per-worker 1w path changed merged ops" + ); + assert_eq!( + per_shard_merged, serial_oracle, + "per-shard 1w path changed merged ops" + ); + } + + #[test] + fn dedicated_per_shard_ignores_worker_count() { + let (store, items) = make_store_and_items(8); + let view = GraphView::new(&store); + + let one_worker = execute_parallel_with_policy( + view, + &items, + worker_hint(1), + ParallelExecutionPolicy::DEDICATED_PER_SHARD, + ); + let many_workers = execute_parallel_with_policy( + view, + &items, + worker_hint(8), + ParallelExecutionPolicy::DEDICATED_PER_SHARD, + ); + let serial_oracle = merge_deltas_ok(vec![execute_serial(view, &items)]); + assert!( + serial_oracle.is_ok(), + "serial oracle merge failed for dedicated-per-shard test: {serial_oracle:?}" + ); + let Ok(serial_oracle) = serial_oracle else { + unreachable!("assert above guarantees a valid serial oracle"); + }; + let one_worker_len = one_worker.len(); + let many_workers_len = many_workers.len(); + let one_worker_merged = merge_deltas_ok(one_worker); + let many_workers_merged = merge_deltas_ok(many_workers); + assert!( + one_worker_merged.is_ok(), + "dedicated-per-shard 1w merge failed: {one_worker_merged:?}" + ); + assert!( + many_workers_merged.is_ok(), + "dedicated-per-shard 8w merge failed: {many_workers_merged:?}" + ); + let Ok(one_worker_merged) = one_worker_merged else { + unreachable!("assert above guarantees a merged dedicated-per-shard 1w result"); + }; + let Ok(many_workers_merged) = many_workers_merged else { + unreachable!("assert above guarantees a merged dedicated-per-shard 8w result"); + }; + + assert_eq!( + one_worker_len, many_workers_len, + "dedicated-per-shard ignores the worker-count hint" + ); + assert_eq!( + one_worker_merged, serial_oracle, + "dedicated-per-shard 1w changed merged ops" + ); + assert_eq!( + many_workers_merged, serial_oracle, + "dedicated-per-shard 8w changed merged ops" + ); + } + + #[test] + fn dedicated_per_shard_empty_workload_emits_no_deltas() { + let store = GraphStore::default(); + let items: Vec = Vec::new(); + let view = GraphView::new(&store); + + let deltas = execute_parallel_with_policy( + view, + &items, + worker_hint(4), + ParallelExecutionPolicy::DEDICATED_PER_SHARD, + ); + + assert!( + deltas.is_empty(), + "empty workload should produce no deltas for dedicated-per-shard" + ); + } +} diff --git a/crates/warp-core/src/parallel/mod.rs b/crates/warp-core/src/parallel/mod.rs index 6f4ec66b..cc7ceaa3 100644 --- a/crates/warp-core/src/parallel/mod.rs +++ b/crates/warp-core/src/parallel/mod.rs @@ -12,8 +12,10 @@ pub mod shard; #[cfg(not(feature = "unsafe_graph"))] pub(crate) use exec::ExecItemKind; pub use exec::{ - build_work_units, execute_parallel, execute_parallel_sharded, execute_serial, - execute_work_queue, ExecItem, PoisonedDelta, WorkUnit, WorkerResult, + build_work_units, execute_parallel, execute_parallel_sharded, + execute_parallel_sharded_with_policy, execute_parallel_with_policy, execute_serial, + execute_work_queue, DeltaAccumulationPolicy, ExecItem, ParallelExecutionPolicy, PoisonedDelta, + ShardAssignmentPolicy, WorkUnit, WorkerResult, }; #[cfg(not(any(test, feature = "delta_validate")))] pub(crate) use merge::check_write_to_new_warp; diff --git a/crates/warp-core/tests/parallel_merge_warpopkey.rs b/crates/warp-core/tests/parallel_merge_warpopkey.rs index 36c5bea4..92223ef1 100644 --- a/crates/warp-core/tests/parallel_merge_warpopkey.rs +++ b/crates/warp-core/tests/parallel_merge_warpopkey.rs @@ -21,6 +21,27 @@ use warp_core::{ // T1.1: WarpOpKey Collision Safety // ============================================================================= +fn merged_ops(deltas: Vec) -> Vec { + let merged = merge_deltas_ok(deltas); + assert!(merged.is_ok(), "merge should succeed: {merged:?}"); + let Ok(ops) = merged else { + unreachable!("assert above guarantees merge success"); + }; + ops +} + +fn merge_conflict(deltas: Vec) -> warp_core::MergeConflict { + let merged = merge_deltas_ok(deltas); + assert!( + matches!(&merged, Err(MergeError::Conflict(_))), + "expected MergeError::Conflict, got: {merged:?}" + ); + let Err(MergeError::Conflict(conflict)) = merged else { + unreachable!("assert above guarantees merge conflict"); + }; + *conflict +} + /// T1.1.1 - Two deltas with same local target but different warp survive merge. /// /// This test verifies that operations targeting the same local node ID but in @@ -90,8 +111,7 @@ fn warp_op_key_distinguishes_by_warp_and_survives_merge() { ); // Merge should succeed without conflict - ops target different logical keys - let merged = merge_deltas_ok(vec![delta_a, delta_b]); - let ops = merged.expect("merge should succeed: ops target different warps"); + let ops = merged_ops(vec![delta_a, delta_b]); // Both ops must survive assert_eq!(ops.len(), 2, "Both cross-warp ops must survive merge"); @@ -166,7 +186,7 @@ fn warp_op_key_ordering_stability_btreemap() { .enumerate() .map(|(i, op)| make_delta(i, op)) .collect(); - let result_forward = merge_deltas_ok(deltas_forward).expect("merge should succeed"); + let result_forward = merged_ops(deltas_forward); // Merge in reverse order let deltas_reverse: Vec<_> = ops @@ -175,7 +195,7 @@ fn warp_op_key_ordering_stability_btreemap() { .rev() .map(|(i, op)| make_delta(i, op)) .collect(); - let result_reverse = merge_deltas_ok(deltas_reverse).expect("merge should succeed"); + let result_reverse = merged_ops(deltas_reverse); // Must have all 3 ops (no collisions) assert_eq!(result_forward.len(), 3, "All 3 ops must survive merge"); @@ -218,10 +238,7 @@ fn warp_op_key_same_warp_same_target_merges_correctly() { bytes::Bytes::from_static(b"same-data"), ))); - let op = WarpOp::SetAttachment { - key, - value: value.clone(), - }; + let op = WarpOp::SetAttachment { key, value }; let mut delta1 = TickDelta::new(); delta1.emit_with_origin( @@ -245,8 +262,7 @@ fn warp_op_key_same_warp_same_target_merges_correctly() { }, ); - let merged = merge_deltas_ok(vec![delta1, delta2]); - let ops = merged.expect("identical ops should dedupe without conflict"); + let ops = merged_ops(vec![delta1, delta2]); assert_eq!(ops.len(), 1, "Identical ops must dedupe to 1"); assert_eq!(ops[0], op); } @@ -293,16 +309,7 @@ fn warp_op_key_same_warp_same_target_merges_correctly() { }, ); - let merged = merge_deltas_ok(vec![delta1, delta2]); - assert!( - merged.is_err(), - "Different values for same key must produce MergeConflict" - ); - - let err = merged.unwrap_err(); - let MergeError::Conflict(conflict) = err else { - panic!("Expected MergeError::Conflict, got: {:?}", err); - }; + let conflict = merge_conflict(vec![delta1, delta2]); assert_eq!( conflict.writers.len(), 2, @@ -369,8 +376,7 @@ fn merge_preserves_all_warp_distinct_ops() { } // Merge should succeed - let merged = merge_deltas_ok(deltas); - let result_ops = merged.expect("merge should succeed: all ops target different warps"); + let result_ops = merged_ops(deltas); // All 3 ops must survive assert_eq!( @@ -437,8 +443,7 @@ fn different_op_types_do_not_conflict() { ); // Merge should succeed: different op types have different WarpOpKeys - let merged = merge_deltas_ok(vec![delta1, delta2]); - let result_ops = merged.expect("different op types should not conflict"); + let result_ops = merged_ops(vec![delta1, delta2]); assert_eq!(result_ops.len(), 2, "Both ops must survive"); assert!( @@ -451,8 +456,13 @@ fn different_op_types_do_not_conflict() { ); // Verify ordering: DeleteNode (kind=5) should come before UpsertNode (kind=6) - let delete_idx = result_ops.iter().position(|op| op == &delete_op).unwrap(); - let upsert_idx = result_ops.iter().position(|op| op == &upsert_op).unwrap(); + let delete_idx = result_ops.iter().position(|op| op == &delete_op); + let upsert_idx = result_ops.iter().position(|op| op == &upsert_op); + assert!(delete_idx.is_some(), "DeleteNode must be present"); + assert!(upsert_idx.is_some(), "UpsertNode must be present"); + let (Some(delete_idx), Some(upsert_idx)) = (delete_idx, upsert_idx) else { + unreachable!("asserts above guarantee indices"); + }; assert!( delete_idx < upsert_idx, "DeleteNode must sort before UpsertNode in canonical order" @@ -508,8 +518,7 @@ fn attachment_ops_distinguish_node_vs_edge_owners() { ); // Merge should succeed: node and edge attachments have different WarpOpKeys - let merged = merge_deltas_ok(vec![delta1, delta2]); - let result_ops = merged.expect("node vs edge attachment ops should not conflict"); + let result_ops = merged_ops(vec![delta1, delta2]); assert_eq!(result_ops.len(), 2, "Both ops must survive"); assert!( @@ -581,7 +590,7 @@ fn merge_is_deterministic_regardless_of_delta_order() { .enumerate() .map(|(i, op)| make_delta(i, op)) .collect(); - let result_forward = merge_deltas_ok(deltas_forward).expect("merge should succeed"); + let result_forward = merged_ops(deltas_forward); // Merge in reverse order let deltas_reverse: Vec<_> = ops @@ -590,7 +599,7 @@ fn merge_is_deterministic_regardless_of_delta_order() { .rev() .map(|(i, op)| make_delta(i, op)) .collect(); - let result_reverse = merge_deltas_ok(deltas_reverse).expect("merge should succeed"); + let result_reverse = merged_ops(deltas_reverse); // Results must be identical assert_eq!( @@ -642,14 +651,12 @@ fn many_warps_same_local_target_all_survive() { .collect(); // Merge should succeed - let merged = merge_deltas_ok(deltas); - let result_ops = merged.expect("all cross-warp ops should merge successfully"); + let result_ops = merged_ops(deltas); assert_eq!( result_ops.len(), num_warps, - "All {} warp-distinct ops must survive merge", - num_warps + "All {num_warps} warp-distinct ops must survive merge" ); // Verify all original ops are present @@ -709,15 +716,6 @@ fn conflict_detected_for_same_nodekey_different_records() { }, ); - let merged = merge_deltas_ok(vec![delta1, delta2]); - assert!( - merged.is_err(), - "UpsertNode ops with same NodeKey but different records must conflict" - ); - - let err = merged.unwrap_err(); - let MergeError::Conflict(conflict) = err else { - panic!("Expected MergeError::Conflict, got: {:?}", err); - }; + let conflict = merge_conflict(vec![delta1, delta2]); assert_eq!(conflict.writers.len(), 2, "Both writers must be reported"); } diff --git a/crates/warp-core/tests/parallel_parallel_exec.rs b/crates/warp-core/tests/parallel_parallel_exec.rs index 27690134..16be5b01 100644 --- a/crates/warp-core/tests/parallel_parallel_exec.rs +++ b/crates/warp-core/tests/parallel_parallel_exec.rs @@ -76,6 +76,16 @@ fn make_exec_items(nodes: &[NodeId]) -> Vec { .collect() } +/// Merge deterministic test deltas into canonical op order. +fn merged_ops(deltas: Vec, context: &str) -> Vec { + let result = merge_deltas_ok(deltas); + assert!(result.is_ok(), "{context}: merge failed: {result:?}"); + let Ok(ops) = result else { + unreachable!("{context}: merge success asserted above"); + }; + ops +} + #[test] fn parallel_equals_serial_basic() { let (store, nodes) = make_test_store(10); @@ -84,11 +94,11 @@ fn parallel_equals_serial_basic() { // Serial execution let serial_delta = execute_serial(view, &items); - let serial_ops = merge_deltas_ok(vec![serial_delta]).expect("merge failed"); + let serial_ops = merged_ops(vec![serial_delta], "parallel_equals_serial_basic/serial"); // Parallel execution with 4 workers let parallel_deltas = execute_parallel(view, &items, 4); - let parallel_ops = merge_deltas_ok(parallel_deltas).expect("merge failed"); + let parallel_ops = merged_ops(parallel_deltas, "parallel_equals_serial_basic/parallel"); // Must produce same number of ops assert_eq!( @@ -113,12 +123,13 @@ fn worker_count_invariance() { // Baseline with 1 worker let baseline_deltas = execute_parallel(view, &items, 1); - let baseline_ops = merge_deltas_ok(baseline_deltas).expect("merge failed"); + let baseline_ops = merged_ops(baseline_deltas, "worker_count_invariance/baseline"); // Test all worker counts for &workers in WORKER_COUNTS { let deltas = execute_parallel(view, &items, workers); - let ops = merge_deltas_ok(deltas).expect("merge failed"); + let ctx = format!("worker_count_invariance/workers={workers}"); + let ops = merged_ops(deltas, &ctx); assert_eq!( baseline_ops.len(), @@ -140,7 +151,10 @@ fn permutation_invariance_under_parallelism() { // Baseline let baseline_deltas = execute_parallel(view, &items, 1); - let baseline_ops = merge_deltas_ok(baseline_deltas).expect("merge failed"); + let baseline_ops = merged_ops( + baseline_deltas, + "permutation_invariance_under_parallelism/baseline", + ); for &seed in SEEDS { let mut rng = XorShift64::new(seed); @@ -150,7 +164,10 @@ fn permutation_invariance_under_parallelism() { for &workers in WORKER_COUNTS { let deltas = execute_parallel(view, &items, workers); - let ops = merge_deltas_ok(deltas).expect("merge failed"); + let ctx = format!( + "permutation_invariance_under_parallelism/seed={seed:#x}/workers={workers}" + ); + let ops = merged_ops(deltas, &ctx); assert_eq!( baseline_ops.len(), @@ -203,7 +220,7 @@ fn merge_dedupes_identical_ops() { } // Merge should dedupe identical ops - let merged = merge_deltas_ok(vec![delta1, delta2]).expect("merge failed"); + let merged = merged_ops(vec![delta1, delta2], "merge_dedupes_identical_ops"); // Should have exactly 4 ops (one per node), not 8 assert_eq!(merged.len(), 4, "merge should dedupe identical ops"); @@ -221,7 +238,7 @@ fn empty_execution_produces_empty_result() { // Parallel let parallel_deltas = execute_parallel(view, &items, 4); - let merged = merge_deltas_ok(parallel_deltas).expect("merge failed"); + let merged = merged_ops(parallel_deltas, "empty_execution_produces_empty_result"); assert!(merged.is_empty(), "parallel merged should be empty"); } @@ -233,12 +250,13 @@ fn single_item_execution() { // Serial let serial_delta = execute_serial(view, &items); - let serial_ops = merge_deltas_ok(vec![serial_delta]).expect("merge failed"); + let serial_ops = merged_ops(vec![serial_delta], "single_item_execution/serial"); // Parallel with various worker counts for &workers in WORKER_COUNTS { let parallel_deltas = execute_parallel(view, &items, workers); - let parallel_ops = merge_deltas_ok(parallel_deltas).expect("merge failed"); + let ctx = format!("single_item_execution/workers={workers}"); + let parallel_ops = merged_ops(parallel_deltas, &ctx); assert_eq!( serial_ops.len(), @@ -261,14 +279,18 @@ fn large_workload_worker_count_invariance() { // Baseline let baseline_deltas = execute_parallel(view, &items, 1); - let baseline_ops = merge_deltas_ok(baseline_deltas).expect("merge failed"); + let baseline_ops = merged_ops( + baseline_deltas, + "large_workload_worker_count_invariance/baseline", + ); assert_eq!(baseline_ops.len(), 100, "should have 100 ops"); // Test all worker counts for &workers in WORKER_COUNTS { let deltas = execute_parallel(view, &items, workers); - let ops = merge_deltas_ok(deltas).expect("merge failed"); + let ctx = format!("large_workload_worker_count_invariance/workers={workers}"); + let ops = merged_ops(deltas, &ctx); assert_eq!( baseline_ops.len(), @@ -298,7 +320,10 @@ fn worker_count_capped_at_num_shards() { // Baseline with NUM_SHARDS workers (the cap) let baseline_deltas = execute_parallel(view, &items, NUM_SHARDS); - let baseline_ops = merge_deltas_ok(baseline_deltas).expect("merge failed"); + let baseline_ops = merged_ops( + baseline_deltas, + "worker_count_capped_at_num_shards/baseline", + ); // Request more workers than shards - should be capped let capped_deltas = execute_parallel(view, &items, NUM_SHARDS * 2); @@ -312,7 +337,7 @@ fn worker_count_capped_at_num_shards() { capped_deltas.len() ); - let capped_ops = merge_deltas_ok(capped_deltas).expect("merge failed"); + let capped_ops = merged_ops(capped_deltas, "worker_count_capped_at_num_shards/capped"); // Results should still be correct assert_eq!( @@ -356,11 +381,12 @@ fn sharded_distribution_is_deterministic() { // Run sharded execution multiple times - should be deterministic let first_deltas = execute_parallel_sharded(view, &items, 8); - let first_ops = merge_deltas_ok(first_deltas).expect("merge failed"); + let first_ops = merged_ops(first_deltas, "sharded_distribution_is_deterministic/run=0"); for run in 1..=5 { let deltas = execute_parallel_sharded(view, &items, 8); - let ops = merge_deltas_ok(deltas).expect("merge failed"); + let ctx = format!("sharded_distribution_is_deterministic/run={run}"); + let ops = merged_ops(deltas, &ctx); assert_eq!( first_ops.len(), @@ -386,11 +412,11 @@ fn default_parallel_uses_sharded() { // Default execute_parallel let default_deltas = execute_parallel(view, &items, 4); - let default_ops = merge_deltas_ok(default_deltas).expect("merge failed"); + let default_ops = merged_ops(default_deltas, "default_parallel_uses_sharded/default"); // Explicit sharded let sharded_deltas = execute_parallel_sharded(view, &items, 4); - let sharded_ops = merge_deltas_ok(sharded_deltas).expect("merge failed"); + let sharded_ops = merged_ops(sharded_deltas, "default_parallel_uses_sharded/sharded"); assert_eq!( default_ops.len(), diff --git a/docs/BENCHMARK_GUIDE.md b/docs/BENCHMARK_GUIDE.md index 37596db1..a21d91f1 100644 --- a/docs/BENCHMARK_GUIDE.md +++ b/docs/BENCHMARK_GUIDE.md @@ -118,7 +118,11 @@ const GROUPS = [ - `#9ece6a` - Green (scheduler_drain) - `#e0af68` - Yellow (scheduler_enqueue) - `#f7768e` - Red (scheduler_drain/drain) -- `#7dcfff` - Cyan (reserve_independence) + +These are the colors currently used by the core-overhead dashboard groups in +`BENCH_CORE_GROUP_KEYS`. Specialized studies such as +`reserve_independence` or `parallel_policy_matrix` manage their own report +surfaces and do not automatically appear in that overview tab. **Pick a new color or use available:** @@ -133,15 +137,16 @@ const GROUPS = [ - `'4,4'` - Medium dashes - `'8,4'` - Long dashes -#### 4b. Add to `scripts/bench_bake.py` +#### 4b. Add to `cargo xtask bench bake` -Find the `GROUPS` list and add your benchmark: +Find the `BENCH_CORE_GROUP_KEYS` list in `xtask/src/main.rs` and add your +benchmark key if the new benchmark belongs on the core-overhead dashboard tab: -```python -GROUPS = [ - # ... existing benchmarks ... - ("my_feature", "My Feature Description"), -] +```rust +const BENCH_CORE_GROUP_KEYS: &[&str] = &[ + // ... existing benchmarks ... + "my_feature", +]; ``` ### 5. Generate the Dashboard @@ -406,7 +411,7 @@ Before considering your benchmark "done": - [ ] Runs successfully: `cargo bench -p warp-benches --bench my_feature` - [ ] JSON artifacts generated in `target/criterion/` - [ ] Added to `docs/benchmarks/index.html` GROUPS array -- [ ] Added to `scripts/bench_bake.py` GROUPS list +- [ ] Added to `xtask/src/main.rs` `BENCH_CORE_GROUP_KEYS` - [ ] Dashboard displays line with unique color/dash pattern - [ ] Results validate complexity hypothesis - [ ] Documentation created in `docs/benchmarks/` diff --git a/docs/benchmarks/PARALLEL_POLICY_MATRIX.md b/docs/benchmarks/PARALLEL_POLICY_MATRIX.md new file mode 100644 index 00000000..facc897c --- /dev/null +++ b/docs/benchmarks/PARALLEL_POLICY_MATRIX.md @@ -0,0 +1,92 @@ + + + +# Parallel Policy Matrix Benchmark + +## Purpose + +This benchmark compares shard execution topology choices, not just raw worker +count: + +- dynamic shard claiming + one delta per worker +- dynamic shard claiming + one delta per shard +- static round-robin shard assignment + one delta per worker +- static round-robin shard assignment + one delta per shard +- dedicated one-worker-per-shard + one delta per shard + +The point is to answer a narrower question than "is parallel good?": + +- which shard assignment policy is cheaper, +- which delta grouping policy is cheaper, and +- whether "one worker = one shard = one delta" is ever worth the overhead. + +The harness includes canonical delta merge after parallel execution, so the +study measures the full policy cost visible to the engine for these synthetic +independent workloads, not just executor-stage delta production. + +## Loads + +The benchmark currently runs at: + +- `100` +- `1000` +- `10000` + +For pooled-worker policies, it also varies worker counts: + +- `1` +- `4` +- `8` + +The dedicated per-shard policy intentionally ignores the worker-count knob and +spawns one thread per non-empty shard. + +## Outputs + +Running the dedicated bake target produces: + +- raw JSON with provenance metadata: + [parallel-policy-matrix.json](/Users/james/git/echo/docs/benchmarks/parallel-policy-matrix.json) +- unified static benchmarks page: + [report-inline.html](/Users/james/git/echo/docs/benchmarks/report-inline.html) + Open the `Parallel policy matrix` tab. + +Criterion's original raw estimates remain under `target/criterion/parallel_policy_matrix/`. + +## Commands + +Run the targeted policy study and bake outputs: + +```sh +make bench-policy-bake +``` + +If benchmark results already exist and you only want to regenerate JSON + HTML: + +```sh +make bench-policy-export +``` + +The export payload includes: + +- generated timestamp +- git SHA +- machine descriptor +- criterion source root + +To inspect the registered benchmark cases without running them: + +```sh +cargo bench -p warp-benches --bench parallel_baseline -- --list +``` + +## Notes + +- The benchmark measures execution topology overhead on a synthetic independent + workload. It is not a substitute for end-to-end engine traces. +- The page provenance records when and where the artifact was baked from the + local Criterion tree. It does **not** claim to know the original commit that + produced those raw Criterion estimates if you rebake from pre-existing data. +- The dedicated per-shard policy is primarily a comparison tool. It is expected + to pay substantial thread-spawn overhead and exists to bound the extreme + `1 worker = 1 shard = 1 delta` shape against the pooled-worker policies. diff --git a/docs/benchmarks/RESERVE_BENCHMARK.md b/docs/benchmarks/RESERVE_BENCHMARK.md index 27f0f8a8..744f1bb1 100644 --- a/docs/benchmarks/RESERVE_BENCHMARK.md +++ b/docs/benchmarks/RESERVE_BENCHMARK.md @@ -30,7 +30,7 @@ Added comprehensive benchmarking for the `reserve()` independence checking funct **Files Modified:** - `docs/benchmarks/index.html` - Added reserve_independence to GROUPS -- `scripts/bench_bake.py` - Added to GROUPS list for baking +- `cargo xtask bench bake` - Includes the benchmark group in the baked report path - `crates/warp-benches/Cargo.toml` - Registered benchmark with harness=false **Visual Style:** diff --git a/docs/benchmarks/index.html b/docs/benchmarks/index.html index a4dd55ba..747bca71 100644 --- a/docs/benchmarks/index.html +++ b/docs/benchmarks/index.html @@ -2,463 +2,2169 @@ - - - - Echo Benchmarks Dashboard - - - -
-

Echo Benchmarks

-

What we're measuring: Deterministic scheduler overhead for executing n rewrites per transaction. Lower is better.

-
Loading benchmark data...
-

Why this matters: The scheduler maintains O(n) linear scaling through adaptive sorting—comparison sort for small batches, radix sort beyond n=1024.

-
-
-
- - - -
-
-
-
-
- - - - + + + + Echo Benchmarks + + + + + +
+
+

Single-page engineering readout

+

Echo Runtime Benchmarks

+

+ This report answers two practical questions: does the + deterministic core stay cheap as load grows, and which + parallel shard-policy shape actually wins when the workload + is held constant? +

+
+
+
+

Core overhead

+

Stable costs, visible at a glance

+

+ Snapshot hashing and scheduler drain stay on one tab, + with both budget and complexity views. +

+
+
+

Policy study

+

Five shapes, one workload

+

+ The policy matrix holds the executor constant and only + changes shard claiming, worker shape, and delta + grouping. +

+
+
+

Baked artifact

+

Offline-friendly and provenance-aware

+

+ The generated page carries its data, styling, and run + metadata with it so the story survives outside the live + Criterion directory. +

+
+
+ +
+ +
+
+
+

Core overhead

+

+ This tab tracks the costs the engine pays every tick: + hashing the reachable snapshot and draining the + deterministic scheduler. Budget view keeps the frame + line visible. Complexity view makes the scaling shape + easier to inspect. +

+
+ Loading benchmark data... +
+
+ + +
+
+
+
+
+
+ +
+
+

Per-benchmark tables

+
+
+
+

Capture status

+
+
+
+
+ + +
+ + + + + + diff --git a/docs/benchmarks/parallel-policy-matrix.json b/docs/benchmarks/parallel-policy-matrix.json new file mode 100644 index 00000000..1afe5011 --- /dev/null +++ b/docs/benchmarks/parallel-policy-matrix.json @@ -0,0 +1,406 @@ +{ + "group": "parallel_policy_matrix", + "baked_at": "2026-03-29T19:51:34.009982Z", + "baked_git_sha": "c235096", + "baked_source_digest": "7510f1cdabaeb08c4550190c225794d4871c9fb18a916d639ad4f42eb8367521", + "template_path": "docs/benchmarks/index.html", + "machine": { + "os": "macos", + "arch": "aarch64", + "hostname": null, + "label": "macos/aarch64" + }, + "criterion_root": "target/criterion/parallel_policy_matrix", + "results": [ + { + "policy": "dynamic_per_shard", + "workers": "1w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_1w/100/new/estimates.json", + "mean_ns": 55464.43408260571, + "lb_ns": 54756.69361375341, + "ub_ns": 56202.66931437411, + "series": "dynamic_per_shard:1w" + }, + { + "policy": "dynamic_per_shard", + "workers": "1w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_1w/1000/new/estimates.json", + "mean_ns": 343427.6850951146, + "lb_ns": 319083.4070900986, + "ub_ns": 373475.23771829135, + "series": "dynamic_per_shard:1w" + }, + { + "policy": "dynamic_per_shard", + "workers": "1w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_1w/10000/new/estimates.json", + "mean_ns": 4201208.348823542, + "lb_ns": 3875169.7980738874, + "ub_ns": 4567399.876901627, + "series": "dynamic_per_shard:1w" + }, + { + "policy": "dynamic_per_worker", + "workers": "1w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_1w/100/new/estimates.json", + "mean_ns": 118001.1679377424, + "lb_ns": 106180.36814934696, + "ub_ns": 135372.81707939805, + "series": "dynamic_per_worker:1w" + }, + { + "policy": "dynamic_per_worker", + "workers": "1w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_1w/1000/new/estimates.json", + "mean_ns": 270892.1481032658, + "lb_ns": 267017.7138902802, + "ub_ns": 275278.0843480539, + "series": "dynamic_per_worker:1w" + }, + { + "policy": "dynamic_per_worker", + "workers": "1w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_1w/10000/new/estimates.json", + "mean_ns": 2762096.073968301, + "lb_ns": 2691334.9446979314, + "ub_ns": 2853504.588098411, + "series": "dynamic_per_worker:1w" + }, + { + "policy": "static_per_shard", + "workers": "1w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_1w/100/new/estimates.json", + "mean_ns": 68038.9133507309, + "lb_ns": 60144.881347430055, + "ub_ns": 78999.4423918486, + "series": "static_per_shard:1w" + }, + { + "policy": "static_per_shard", + "workers": "1w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_1w/1000/new/estimates.json", + "mean_ns": 315397.2679475274, + "lb_ns": 298548.79247113975, + "ub_ns": 338033.46786511806, + "series": "static_per_shard:1w" + }, + { + "policy": "static_per_shard", + "workers": "1w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_1w/10000/new/estimates.json", + "mean_ns": 3253597.341064291, + "lb_ns": 3174288.7754920344, + "ub_ns": 3339159.0839692825, + "series": "static_per_shard:1w" + }, + { + "policy": "static_per_worker", + "workers": "1w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_1w/100/new/estimates.json", + "mean_ns": 51739.36454908365, + "lb_ns": 48378.98747472938, + "ub_ns": 56425.19122588916, + "series": "static_per_worker:1w" + }, + { + "policy": "static_per_worker", + "workers": "1w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_1w/1000/new/estimates.json", + "mean_ns": 305768.3191330779, + "lb_ns": 289677.7784319992, + "ub_ns": 325951.6131473716, + "series": "static_per_worker:1w" + }, + { + "policy": "static_per_worker", + "workers": "1w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_1w/10000/new/estimates.json", + "mean_ns": 4339330.832882105, + "lb_ns": 3776273.4789978, + "ub_ns": 4967749.246199884, + "series": "static_per_worker:1w" + }, + { + "policy": "dynamic_per_shard", + "workers": "4w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_4w/100/new/estimates.json", + "mean_ns": 94623.96109289848, + "lb_ns": 92672.9358013662, + "ub_ns": 96862.53591063726, + "series": "dynamic_per_shard:4w" + }, + { + "policy": "dynamic_per_shard", + "workers": "4w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_4w/1000/new/estimates.json", + "mean_ns": 291516.0789781246, + "lb_ns": 285026.3822294185, + "ub_ns": 299125.50646760175, + "series": "dynamic_per_shard:4w" + }, + { + "policy": "dynamic_per_shard", + "workers": "4w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_4w/10000/new/estimates.json", + "mean_ns": 2312237.6050062696, + "lb_ns": 2254378.0920585655, + "ub_ns": 2388842.6003074846, + "series": "dynamic_per_shard:4w" + }, + { + "policy": "dynamic_per_worker", + "workers": "4w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_4w/100/new/estimates.json", + "mean_ns": 480978.59250441974, + "lb_ns": 426663.4850814179, + "ub_ns": 547182.7573886289, + "series": "dynamic_per_worker:4w" + }, + { + "policy": "dynamic_per_worker", + "workers": "4w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_4w/1000/new/estimates.json", + "mean_ns": 277423.6160682042, + "lb_ns": 274378.7112326404, + "ub_ns": 280653.77858380805, + "series": "dynamic_per_worker:4w" + }, + { + "policy": "dynamic_per_worker", + "workers": "4w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_4w/10000/new/estimates.json", + "mean_ns": 3532714.3493538103, + "lb_ns": 3196782.458081174, + "ub_ns": 3893028.476573624, + "series": "dynamic_per_worker:4w" + }, + { + "policy": "static_per_shard", + "workers": "4w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_4w/100/new/estimates.json", + "mean_ns": 99863.61987207436, + "lb_ns": 98266.36378482977, + "ub_ns": 101794.624786117, + "series": "static_per_shard:4w" + }, + { + "policy": "static_per_shard", + "workers": "4w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_4w/1000/new/estimates.json", + "mean_ns": 296294.6607022254, + "lb_ns": 288343.79662623204, + "ub_ns": 307105.6393947541, + "series": "static_per_shard:4w" + }, + { + "policy": "static_per_shard", + "workers": "4w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_4w/10000/new/estimates.json", + "mean_ns": 3076581.0265688575, + "lb_ns": 2788832.930038881, + "ub_ns": 3400977.1287433044, + "series": "static_per_shard:4w" + }, + { + "policy": "static_per_worker", + "workers": "4w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_4w/100/new/estimates.json", + "mean_ns": 82828.97430229129, + "lb_ns": 79154.03257528166, + "ub_ns": 87389.61768361439, + "series": "static_per_worker:4w" + }, + { + "policy": "static_per_worker", + "workers": "4w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_4w/1000/new/estimates.json", + "mean_ns": 380211.0759995546, + "lb_ns": 327855.16356549604, + "ub_ns": 462963.92953876726, + "series": "static_per_worker:4w" + }, + { + "policy": "static_per_worker", + "workers": "4w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_4w/10000/new/estimates.json", + "mean_ns": 4262819.759565299, + "lb_ns": 3869786.074998774, + "ub_ns": 4673047.668490899, + "series": "static_per_worker:4w" + }, + { + "policy": "dynamic_per_shard", + "workers": "8w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_8w/100/new/estimates.json", + "mean_ns": 152460.101521985, + "lb_ns": 142689.2895253969, + "ub_ns": 169578.32318578326, + "series": "dynamic_per_shard:8w" + }, + { + "policy": "dynamic_per_shard", + "workers": "8w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_8w/1000/new/estimates.json", + "mean_ns": 427597.4980816598, + "lb_ns": 380942.18165343726, + "ub_ns": 493478.6946695924, + "series": "dynamic_per_shard:8w" + }, + { + "policy": "dynamic_per_shard", + "workers": "8w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_shard_8w/10000/new/estimates.json", + "mean_ns": 2876309.416375992, + "lb_ns": 2594111.144678711, + "ub_ns": 3223226.433645327, + "series": "dynamic_per_shard:8w" + }, + { + "policy": "dynamic_per_worker", + "workers": "8w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_8w/100/new/estimates.json", + "mean_ns": 157455.9425359909, + "lb_ns": 153942.67719904362, + "ub_ns": 161920.5141127526, + "series": "dynamic_per_worker:8w" + }, + { + "policy": "dynamic_per_worker", + "workers": "8w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_8w/1000/new/estimates.json", + "mean_ns": 339257.17203846463, + "lb_ns": 315584.75279452157, + "ub_ns": 380663.61674345355, + "series": "dynamic_per_worker:8w" + }, + { + "policy": "dynamic_per_worker", + "workers": "8w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dynamic_per_worker_8w/10000/new/estimates.json", + "mean_ns": 2621356.7823611028, + "lb_ns": 2493971.8226580755, + "ub_ns": 2774790.902435505, + "series": "dynamic_per_worker:8w" + }, + { + "policy": "static_per_shard", + "workers": "8w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_8w/100/new/estimates.json", + "mean_ns": 236985.7440476028, + "lb_ns": 221566.6802469599, + "ub_ns": 256156.75997772816, + "series": "static_per_shard:8w" + }, + { + "policy": "static_per_shard", + "workers": "8w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_8w/1000/new/estimates.json", + "mean_ns": 477808.37468245544, + "lb_ns": 403653.5950145256, + "ub_ns": 583335.7173446632, + "series": "static_per_shard:8w" + }, + { + "policy": "static_per_shard", + "workers": "8w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/static_per_shard_8w/10000/new/estimates.json", + "mean_ns": 2710449.0420398023, + "lb_ns": 2546788.7868638574, + "ub_ns": 2953026.784321096, + "series": "static_per_shard:8w" + }, + { + "policy": "static_per_worker", + "workers": "8w", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_8w/100/new/estimates.json", + "mean_ns": 172220.2781279374, + "lb_ns": 158988.24858006404, + "ub_ns": 188894.3418611982, + "series": "static_per_worker:8w" + }, + { + "policy": "static_per_worker", + "workers": "8w", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_8w/1000/new/estimates.json", + "mean_ns": 332053.05171778303, + "lb_ns": 323383.93442205345, + "ub_ns": 341832.6875972533, + "series": "static_per_worker:8w" + }, + { + "policy": "static_per_worker", + "workers": "8w", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/static_per_worker_8w/10000/new/estimates.json", + "mean_ns": 5234556.768329204, + "lb_ns": 4974722.401272965, + "ub_ns": 5536259.659252154, + "series": "static_per_worker:8w" + }, + { + "policy": "dedicated_per_shard", + "workers": "dedicated", + "load": 100, + "path": "target/criterion/parallel_policy_matrix/dedicated_per_shard/100/new/estimates.json", + "mean_ns": 1911916.2158310344, + "lb_ns": 1770192.7824185104, + "ub_ns": 2076102.2267788248, + "series": "dedicated_per_shard:dedicated" + }, + { + "policy": "dedicated_per_shard", + "workers": "dedicated", + "load": 1000, + "path": "target/criterion/parallel_policy_matrix/dedicated_per_shard/1000/new/estimates.json", + "mean_ns": 5056303.960415298, + "lb_ns": 4869380.740892483, + "ub_ns": 5269361.096465565, + "series": "dedicated_per_shard:dedicated" + }, + { + "policy": "dedicated_per_shard", + "workers": "dedicated", + "load": 10000, + "path": "target/criterion/parallel_policy_matrix/dedicated_per_shard/10000/new/estimates.json", + "mean_ns": 7810389.708333333, + "lb_ns": 7603201.088333334, + "ub_ns": 8024619.369652779, + "series": "dedicated_per_shard:dedicated" + } + ] +} diff --git a/docs/benchmarks/report-inline.html b/docs/benchmarks/report-inline.html index debdd7b8..166d1c2f 100644 --- a/docs/benchmarks/report-inline.html +++ b/docs/benchmarks/report-inline.html @@ -2,467 +2,4746 @@ - - - - Echo Benchmarks Dashboard - - - -
-

Echo Benchmarks

-

What we're measuring: Deterministic scheduler overhead for executing n rewrites per transaction. Lower is better.

-
Loading benchmark data...
-

Why this matters: The scheduler maintains O(n) linear scaling through adaptive sorting—comparison sort for small batches, radix sort beyond n=1024.

-
-
-
- - - -
-
-
-
-
- - - - - + + + + Echo Benchmarks + + + + + +
+
+

Single-page engineering readout

+

Echo Runtime Benchmarks

+

+ This report answers two practical questions: does the + deterministic core stay cheap as load grows, and which + parallel shard-policy shape actually wins when the workload + is held constant? +

+
+
+
+

Core overhead

+

Stable costs, visible at a glance

+

+ Snapshot hashing and scheduler drain stay on one tab, + with both budget and complexity views. +

+
+
+

Policy study

+

Five shapes, one workload

+

+ The policy matrix holds the executor constant and only + changes shard claiming, worker shape, and delta + grouping. +

+
+
+

Baked artifact

+

Offline-friendly and provenance-aware

+

+ The generated page carries its data, styling, and run + metadata with it so the story survives outside the live + Criterion directory. +

+
+
+ +
+ +
+
+
+

Core overhead

+

+ This tab tracks the costs the engine pays every tick: + hashing the reachable snapshot and draining the + deterministic scheduler. Budget view keeps the frame + line visible. Complexity view makes the scaling shape + easier to inspect. +

+
+ Loading benchmark data... +
+
+ + +
+
+
+
+
+
+ +
+
+

Per-benchmark tables

+
+
+
+

Capture status

+
+
+
+
+ + +
+ + + + + + + diff --git a/docs/benchmarks/vendor/.gitignore b/docs/benchmarks/vendor/.gitignore index 5339c258..e2227562 100644 --- a/docs/benchmarks/vendor/.gitignore +++ b/docs/benchmarks/vendor/.gitignore @@ -4,3 +4,5 @@ *.css *.css.map !.gitkeep +!open-props.min.css +!normalize.dark.min.css diff --git a/docs/benchmarks/vendor/normalize.dark.min.css b/docs/benchmarks/vendor/normalize.dark.min.css new file mode 100644 index 00000000..f9de8ac4 --- /dev/null +++ b/docs/benchmarks/vendor/normalize.dark.min.css @@ -0,0 +1 @@ +:where(html){--csstools-color-scheme--light: ;--link:var(--indigo-3);--link-visited:var(--purple-3);--text-1:var(--gray-0);--text-2:var(--gray-4);--surface-1:var(--gray-9);--surface-2:var(--gray-8);--surface-3:var(--gray-7);--surface-4:var(--gray-6);--scrollthumb-color:var(--gray-6);-webkit-text-size-adjust:none;--shadow-strength:10%;--shadow-color:220 40% 2%;--inner-shadow-highlight:inset 0 -.5px 0 0 hsla(0,0%,100%,.067),inset 0 .5px 0 0 rgba(0,0,0,.467);accent-color:var(--brand,var(--link));background-color:var(--surface-1);block-size:100%;caret-color:var(--brand,var(--link));color:var(--text-1);color-scheme:dark;font-family:var(--font-system-ui);line-height:var(--font-lineheight-3);scrollbar-color:var(--scrollthumb-color) transparent}:where(html) :where(dialog){background-color:var(--surface-2)}:where(html) :where(button,.btn){--_highlight:var(--_highlight-dark);--_bg:var(--_bg-dark);--_ink-shadow:var(--_ink-shadow-dark)}:where(html) :where(button,.btn):where([type=reset]){--_text:var(--red-2);--_border:var(--surface-3)}:where(html) [disabled]:where(button,input[type=button],.btn){--_text:var(--gray-5)}:where(html) [disabled]:where(button,input[type=submit],.btn){--_text:var(--gray-5)}:where(html) [disabled]:where(button,input[type=reset],.btn){--_text:var(--gray-5)}:where(html) :where(textarea,select,input:not([type=button],[type=submit],[type=reset])){background-color:#171a1c}:where(html) :where([disabled]),:where(html) :where([type=reset]),:where(html) :where([type=submit]),:where(html) :where(form button:not([type=button])){--_bg:var(--surface-1)}:where(a[href]){color:var(--brand,var(--link))}:where(a[href]):where(:visited){color:var(--link-visited)}:focus-visible{outline-color:var(--brand,var(--link))}*,:after,:before{box-sizing:border-box}:where(:not(dialog)){margin:0}:where(:not(fieldset,progress,meter)){background-origin:border-box;background-repeat:no-repeat;border-style:solid;border-width:0}@media (prefers-reduced-motion:no-preference){:where(html){scroll-behavior:smooth}}@media (prefers-reduced-motion:no-preference){:where(:focus-visible){transition:outline-offset 145ms var(--ease-2)}:where(:not(:active):focus-visible){transition-duration:.25s}}:where(:not(:active):focus-visible){outline-offset:5px}:where(body){min-block-size:100%}:where(h1,h2,h3,h4,h5,h6){text-wrap:balance;font-weight:var(--font-weight-9);line-height:var(--font-lineheight-1)}:where(h1){font-size:var(--font-size-8);max-inline-size:var(--size-header-1)}:where(h2){font-size:var(--font-size-6);max-inline-size:var(--size-header-2)}:where(h3){font-size:var(--font-size-5)}:where(h4){font-size:var(--font-size-4)}:where(h5){font-size:var(--font-size-3)}:where(h3,h4,h5,h6,dt){max-inline-size:var(--size-header-3)}:where(p,ul,ol,dl,h6){font-size:var(--font-size-2)}:where(a,u,ins,abbr){text-underline-offset:1px}@supports (-moz-appearance:none){:where(a,u,ins,abbr){text-underline-offset:2px}}:where(a[href],area,button,input:not([type=text],[type=email],[type=number],[type=password],[type=""],[type=tel],[type=url]),label[for],select,summary,[tabindex]:not([tabindex*="-"],pre)){cursor:pointer}:where(a[href],area,button,input,label[for],select,summary,textarea,[tabindex]:not([tabindex*="-"])){-webkit-tap-highlight-color:transparent;touch-action:manipulation}:where(a):where([href]){text-decoration-color:var(--indigo-2)}:where(a):where([href]):where(:visited){text-decoration-color:var(--purple-2)}:where(a):where(:not(:hover)){text-decoration:inherit}:where(img,svg,video,canvas,audio,iframe,embed,object){display:block}:where(img,svg,video){block-size:auto;max-inline-size:100%}:where(input,button,textarea,select),:where(input[type=file])::-webkit-file-upload-button{color:inherit;font:inherit;font-size:inherit;letter-spacing:inherit}::placeholder{color:var(--gray-7);color:var(--gray-6);opacity:.75}:where(input:not([type=range]),textarea){padding-block:var(--size-1);padding-inline:var(--size-2)}:where(select){field-sizing:content;padding-block:.75ch;padding-inline:var(--size-relative-4) 0}:where(textarea,select,input:not([type=button],[type=submit],[type=reset])){background-color:var(--surface-2);background-color:var(--gray-10);border-radius:var(--radius-2)}:where(textarea){field-sizing:content;min-block-size:2lh;min-inline-size:var(--size-content-1);resize:vertical}:where(input[type=checkbox],input[type=radio]){block-size:var(--size-3);inline-size:var(--size-3)}:where(svg:not([width])){inline-size:var(--size-10)}:where(code,kbd,samp,pre){font-family:var(--font-monospace-code),monospace}:where(:not(pre)>code,kbd){white-space:nowrap}:where(pre){direction:ltr;max-inline-size:max-content;min-inline-size:0;white-space:pre;writing-mode:lr}:where(:not(pre)>code){background:var(--surface-2);border-radius:var(--radius-2);padding:var(--size-1) var(--size-2);writing-mode:lr}:where(kbd,var){border-color:var(--surface-4);border-radius:var(--radius-2);border-width:var(--border-size-1);padding:var(--size-1) var(--size-2)}:where(mark){border-radius:var(--radius-2);padding-inline:var(--size-1)}:where(ol,ul){padding-inline-start:var(--size-8)}:where(li){padding-inline-start:var(--size-2)}:where(li,dd,figcaption){max-inline-size:var(--size-content-2)}:where(p){text-wrap:pretty;max-inline-size:var(--size-content-3)}:where(dt,summary){font-weight:var(--font-weight-7)}:where(dt:not(:first-of-type)){margin-block-start:var(--size-5)}:where(small){font-size:max(.5em,var(--font-size-0));max-inline-size:var(--size-content-1)}:where(hr){background-color:var(--surface-3);height:var(--border-size-2);margin-block:var(--size-fluid-5)}:where(figure){display:grid;gap:var(--size-2);place-items:center}:where(figure)>:where(figcaption){text-wrap:balance;font-size:var(--font-size-1)}:where(blockquote,:not(blockquote)>cite){border-inline-start-width:var(--border-size-3)}:where(blockquote){display:grid;gap:var(--size-3);max-inline-size:var(--size-content-2);padding-block:var(--size-3);padding-inline:var(--size-4)}:where(:not(blockquote)>cite){padding-inline-start:var(--size-2)}:where(summary){background:var(--surface-3);border-radius:var(--radius-2);margin:calc(var(--size-2)*-1) calc(var(--size-3)*-1);padding:var(--size-2) var(--size-3)}:where(details){background:var(--surface-2);border-radius:var(--radius-2);padding-block:var(--size-2);padding-inline:var(--size-3)}:where(details[open]>summary){border-end-end-radius:0;border-end-start-radius:0;margin-bottom:var(--size-2)}:where(fieldset){border:var(--border-size-1) solid var(--surface-4);border-radius:var(--radius-2)}:where(del){background:var(--red-9);color:var(--red-2)}:where(ins){background:var(--green-9);color:var(--green-1)}:where(abbr){text-decoration-color:var(--blue-5)}:where(dialog){background-color:var(--surface-1);background-color:var(--surface-2);border-radius:var(--radius-3);box-shadow:var(--shadow-6);color:inherit}:where(menu){display:flex;gap:var(--size-3);padding-inline-start:0}:where(sup){font-size:.5em}:where(table){--nice-inner-radius:calc(var(--radius-3) - 2px);background:var(--surface-2);border:1px solid var(--surface-2);border-radius:var(--radius-3);width:fit-content}:where(table:not(:has(tfoot)) tr:last-child td:first-child){border-end-start-radius:var(--nice-inner-radius)}:where(table:not(:has(tfoot)) tr:last-child td:last-child){border-end-end-radius:var(--nice-inner-radius)}:where(table thead tr:first-child th:first-child){border-start-start-radius:var(--nice-inner-radius)}:where(table thead tr:first-child th:last-child){border-start-end-radius:var(--nice-inner-radius)}:where(tfoot tr:last-child th:first-of-type){border-end-start-radius:var(--nice-inner-radius)}:where(tfoot tr:last-child td:first-of-type){border-end-start-radius:var(--nice-inner-radius)}:where(tfoot tr:last-child th:last-of-type){border-end-end-radius:var(--nice-inner-radius)}:where(tfoot tr:last-child td:last-of-type){border-end-end-radius:var(--nice-inner-radius)}:where(th){background-color:var(--surface-2);color:var(--text-1)}:where(table a:not(.does-not-exist):focus-visible){outline-offset:-2px}:where(table button:not(.does-not-exist):focus-visible){outline-offset:-2px}:where(table [contenteditable]:focus-visible){outline-offset:-2px}:where(td){text-wrap:pretty;background:var(--surface-1);max-inline-size:var(--size-content-2)}:where(td,th){padding:var(--size-2);text-align:left}:where(td:not([align])){text-align:center}:where(th:not([align])){text-align:center}:where(thead){border-collapse:collapse}:where(table tr:hover td),:where(tbody tr:nth-child(2n):hover td){background-color:var(--surface-3)}:where(table>caption){margin:var(--size-3)}:where(tfoot button){padding-block:var(--size-1);padding-inline:var(--size-3)} \ No newline at end of file diff --git a/docs/benchmarks/vendor/open-props.min.css b/docs/benchmarks/vendor/open-props.min.css new file mode 100644 index 00000000..177cb68c --- /dev/null +++ b/docs/benchmarks/vendor/open-props.min.css @@ -0,0 +1 @@ +:where(html){--font-system-ui:system-ui,-apple-system,Segoe UI,Roboto,Ubuntu,Cantarell,Noto Sans,sans-serif;--font-transitional:Charter,Bitstream Charter,Sitka Text,Cambria,serif;--font-old-style:Iowan Old Style,Palatino Linotype,URW Palladio L,P052,serif;--font-humanist:Seravek,Gill Sans Nova,Ubuntu,Calibri,DejaVu Sans,source-sans-pro,sans-serif;--font-geometric-humanist:Avenir,Montserrat,Corbel,URW Gothic,source-sans-pro,sans-serif;--font-classical-humanist:Optima,Candara,Noto Sans,source-sans-pro,sans-serif;--font-neo-grotesque:Inter,Roboto,Helvetica Neue,Arial Nova,Nimbus Sans,Arial,sans-serif;--font-monospace-slab-serif:Nimbus Mono PS,Courier New,monospace;--font-monospace-code:Dank Mono,Operator Mono,Inconsolata,Fira Mono,ui-monospace,SF Mono,Monaco,Droid Sans Mono,Source Code Pro,Cascadia Code,Menlo,Consolas,DejaVu Sans Mono,monospace;--font-industrial:Bahnschrift,DIN Alternate,Franklin Gothic Medium,Nimbus Sans Narrow,sans-serif-condensed,sans-serif;--font-rounded-sans:ui-rounded,Hiragino Maru Gothic ProN,Quicksand,Comfortaa,Manjari,Arial Rounded MT,Arial Rounded MT Bold,Calibri,source-sans-pro,sans-serif;--font-slab-serif:Rockwell,Rockwell Nova,Roboto Slab,DejaVu Serif,Sitka Small,serif;--font-antique:Superclarendon,Bookman Old Style,URW Bookman,URW Bookman L,Georgia Pro,Georgia,serif;--font-didone:Didot,Bodoni MT,Noto Serif Display,URW Palladio L,P052,Sylfaen,serif;--font-handwritten:Segoe Print,Bradley Hand,Chilanka,TSCu_Comic,casual,cursive;--font-sans:var(--font-system-ui);--font-serif:ui-serif,serif;--font-mono:var(--font-monospace-code);--font-weight-1:100;--font-weight-2:200;--font-weight-3:300;--font-weight-4:400;--font-weight-5:500;--font-weight-6:600;--font-weight-7:700;--font-weight-8:800;--font-weight-9:900;--font-lineheight-00:.95;--font-lineheight-0:1.1;--font-lineheight-1:1.25;--font-lineheight-2:1.375;--font-lineheight-3:1.5;--font-lineheight-4:1.75;--font-lineheight-5:2;--font-letterspacing-0:-.05em;--font-letterspacing-1:.025em;--font-letterspacing-2:.050em;--font-letterspacing-3:.075em;--font-letterspacing-4:.150em;--font-letterspacing-5:.500em;--font-letterspacing-6:.750em;--font-letterspacing-7:1em;--font-size-00:.5rem;--font-size-0:.75rem;--font-size-1:1rem;--font-size-2:1.1rem;--font-size-3:1.25rem;--font-size-4:1.5rem;--font-size-5:2rem;--font-size-6:2.5rem;--font-size-7:3rem;--font-size-8:3.5rem;--font-size-fluid-0:max(.75rem,min(2vw,1rem));--font-size-fluid-1:max(1rem,min(4vw,1.5rem));--font-size-fluid-2:max(1.5rem,min(6vw,2.5rem));--font-size-fluid-3:max(2rem,min(9vw,3.5rem));--size-000:-.5rem;--size-00:-.25rem;--size-1:.25rem;--size-2:.5rem;--size-3:1rem;--size-4:1.25rem;--size-5:1.5rem;--size-6:1.75rem;--size-7:2rem;--size-8:3rem;--size-9:4rem;--size-10:5rem;--size-11:7.5rem;--size-12:10rem;--size-13:15rem;--size-14:20rem;--size-15:30rem;--size-px-000:-8px;--size-px-00:-4px;--size-px-1:4px;--size-px-2:8px;--size-px-3:16px;--size-px-4:20px;--size-px-5:24px;--size-px-6:28px;--size-px-7:32px;--size-px-8:48px;--size-px-9:64px;--size-px-10:80px;--size-px-11:120px;--size-px-12:160px;--size-px-13:240px;--size-px-14:320px;--size-px-15:480px;--size-fluid-1:max(.5rem,min(1vw,1rem));--size-fluid-2:max(1rem,min(2vw,1.5rem));--size-fluid-3:max(1.5rem,min(3vw,2rem));--size-fluid-4:max(2rem,min(4vw,3rem));--size-fluid-5:max(4rem,min(5vw,5rem));--size-fluid-6:max(5rem,min(7vw,7.5rem));--size-fluid-7:max(7.5rem,min(10vw,10rem));--size-fluid-8:max(10rem,min(20vw,15rem));--size-fluid-9:max(15rem,min(30vw,20rem));--size-fluid-10:max(20rem,min(40vw,30rem));--size-content-1:20ch;--size-content-2:45ch;--size-content-3:60ch;--size-header-1:20ch;--size-header-2:25ch;--size-header-3:35ch;--size-xxs:240px;--size-xs:360px;--size-sm:480px;--size-md:768px;--size-lg:1024px;--size-xl:1440px;--size-xxl:1920px;--size-relative-000:-.5ch;--size-relative-00:-.25ch;--size-relative-1:.25ch;--size-relative-2:.5ch;--size-relative-3:1ch;--size-relative-4:1.25ch;--size-relative-5:1.5ch;--size-relative-6:1.75ch;--size-relative-7:2ch;--size-relative-8:3ch;--size-relative-9:4ch;--size-relative-10:5ch;--size-relative-11:7.5ch;--size-relative-12:10ch;--size-relative-13:15ch;--size-relative-14:20ch;--size-relative-15:30ch;--ease-1:cubic-bezier(.25,0,.5,1);--ease-2:cubic-bezier(.25,0,.4,1);--ease-3:cubic-bezier(.25,0,.3,1);--ease-4:cubic-bezier(.25,0,.2,1);--ease-5:cubic-bezier(.25,0,.1,1);--ease-in-1:cubic-bezier(.25,0,1,1);--ease-in-2:cubic-bezier(.50,0,1,1);--ease-in-3:cubic-bezier(.70,0,1,1);--ease-in-4:cubic-bezier(.90,0,1,1);--ease-in-5:cubic-bezier(1,0,1,1);--ease-out-1:cubic-bezier(0,0,.75,1);--ease-out-2:cubic-bezier(0,0,.50,1);--ease-out-3:cubic-bezier(0,0,.3,1);--ease-out-4:cubic-bezier(0,0,.1,1);--ease-out-5:cubic-bezier(0,0,0,1);--ease-in-out-1:cubic-bezier(.1,0,.9,1);--ease-in-out-2:cubic-bezier(.3,0,.7,1);--ease-in-out-3:cubic-bezier(.5,0,.5,1);--ease-in-out-4:cubic-bezier(.7,0,.3,1);--ease-in-out-5:cubic-bezier(.9,0,.1,1);--ease-elastic-out-1:cubic-bezier(.5,.75,.75,1.25);--ease-elastic-out-2:cubic-bezier(.5,1,.75,1.25);--ease-elastic-out-3:cubic-bezier(.5,1.25,.75,1.25);--ease-elastic-out-4:cubic-bezier(.5,1.5,.75,1.25);--ease-elastic-out-5:cubic-bezier(.5,1.75,.75,1.25);--ease-elastic-in-1:cubic-bezier(.5,-0.25,.75,1);--ease-elastic-in-2:cubic-bezier(.5,-0.50,.75,1);--ease-elastic-in-3:cubic-bezier(.5,-0.75,.75,1);--ease-elastic-in-4:cubic-bezier(.5,-1.00,.75,1);--ease-elastic-in-5:cubic-bezier(.5,-1.25,.75,1);--ease-elastic-in-out-1:cubic-bezier(.5,-.1,.1,1.5);--ease-elastic-in-out-2:cubic-bezier(.5,-.3,.1,1.5);--ease-elastic-in-out-3:cubic-bezier(.5,-.5,.1,1.5);--ease-elastic-in-out-4:cubic-bezier(.5,-.7,.1,1.5);--ease-elastic-in-out-5:cubic-bezier(.5,-.9,.1,1.5);--ease-step-1:steps(2);--ease-step-2:steps(3);--ease-step-3:steps(4);--ease-step-4:steps(7);--ease-step-5:steps(10);--ease-elastic-1:var(--ease-elastic-out-1);--ease-elastic-2:var(--ease-elastic-out-2);--ease-elastic-3:var(--ease-elastic-out-3);--ease-elastic-4:var(--ease-elastic-out-4);--ease-elastic-5:var(--ease-elastic-out-5);--ease-squish-1:var(--ease-elastic-in-out-1);--ease-squish-2:var(--ease-elastic-in-out-2);--ease-squish-3:var(--ease-elastic-in-out-3);--ease-squish-4:var(--ease-elastic-in-out-4);--ease-squish-5:var(--ease-elastic-in-out-5);--ease-spring-1:linear(0,0.006,0.025 2.8%,0.101 6.1%,0.539 18.9%,0.721 25.3%,0.849 31.5%,0.937 38.1%,0.968 41.8%,0.991 45.7%,1.006 50.1%,1.015 55%,1.017 63.9%,1.001);--ease-spring-2:linear(0,0.007,0.029 2.2%,0.118 4.7%,0.625 14.4%,0.826 19%,0.902,0.962,1.008 26.1%,1.041 28.7%,1.064 32.1%,1.07 36%,1.061 40.5%,1.015 53.4%,0.999 61.6%,0.995 71.2%,1);--ease-spring-3:linear(0,0.009,0.035 2.1%,0.141 4.4%,0.723 12.9%,0.938 16.7%,1.017,1.077,1.121,1.149 24.3%,1.159,1.163,1.161,1.154 29.9%,1.129 32.8%,1.051 39.6%,1.017 43.1%,0.991,0.977 51%,0.974 53.8%,0.975 57.1%,0.997 69.8%,1.003 76.9%,1);--ease-spring-4:linear(0,0.009,0.037 1.7%,0.153 3.6%,0.776 10.3%,1.001,1.142 16%,1.185,1.209 19%,1.215 19.9% 20.8%,1.199,1.165 25%,1.056 30.3%,1.008 33%,0.973,0.955 39.2%,0.953 41.1%,0.957 43.3%,0.998 53.3%,1.009 59.1% 63.7%,0.998 78.9%,1);--ease-spring-5:linear(0,0.01,0.04 1.6%,0.161 3.3%,0.816 9.4%,1.046,1.189 14.4%,1.231,1.254 17%,1.259,1.257 18.6%,1.236,1.194 22.3%,1.057 27%,0.999 29.4%,0.955 32.1%,0.942,0.935 34.9%,0.933,0.939 38.4%,1 47.3%,1.011,1.017 52.6%,1.016 56.4%,1 65.2%,0.996 70.2%,1.001 87.2%,1);--ease-bounce-1:linear(0,0.004,0.016,0.035,0.063,0.098,0.141,0.191,0.25,0.316,0.391 36.8%,0.563,0.766,1 58.8%,0.946,0.908 69.1%,0.895,0.885,0.879,0.878,0.879,0.885,0.895,0.908 89.7%,0.946,1);--ease-bounce-2:linear(0,0.004,0.016,0.035,0.063,0.098,0.141 15.1%,0.25,0.391,0.562,0.765,1,0.892 45.2%,0.849,0.815,0.788,0.769,0.757,0.753,0.757,0.769,0.788,0.815,0.85,0.892 75.2%,1 80.2%,0.973,0.954,0.943,0.939,0.943,0.954,0.973,1);--ease-bounce-3:linear(0,0.004,0.016,0.035,0.062,0.098,0.141 11.4%,0.25,0.39,0.562,0.764,1 30.3%,0.847 34.8%,0.787,0.737,0.699,0.672,0.655,0.65,0.656,0.672,0.699,0.738,0.787,0.847 61.7%,1 66.2%,0.946,0.908,0.885 74.2%,0.879,0.878,0.879,0.885 79.5%,0.908,0.946,1 87.4%,0.981,0.968,0.96,0.957,0.96,0.968,0.981,1);--ease-bounce-4:linear(0,0.004,0.016 3%,0.062,0.141,0.25,0.391,0.562 18.2%,1 24.3%,0.81,0.676 32.3%,0.629,0.595,0.575,0.568,0.575,0.595,0.629,0.676 48.2%,0.811,1 56.2%,0.918,0.86,0.825,0.814,0.825,0.86,0.918,1 77.2%,0.94 80.6%,0.925,0.92,0.925,0.94 87.5%,1 90.9%,0.974,0.965,0.974,1);--ease-bounce-5:linear(0,0.004,0.016 2.5%,0.063,0.141,0.25 10.1%,0.562,1 20.2%,0.783,0.627,0.534 30.9%,0.511,0.503,0.511,0.534 38%,0.627,0.782,1 48.7%,0.892,0.815,0.769 56.3%,0.757,0.753,0.757,0.769 61.3%,0.815,0.892,1 68.8%,0.908 72.4%,0.885,0.878,0.885,0.908 79.4%,1 83%,0.954 85.5%,0.943,0.939,0.943,0.954 90.5%,1 93%,0.977,0.97,0.977,1);--ease-circ-in:cubic-bezier(.6,.04,.98,.335);--ease-circ-in-out:cubic-bezier(.785,.135,.15,.86);--ease-circ-out:cubic-bezier(.075,.82,.165,1);--ease-cubic-in:cubic-bezier(.55,.055,.675,.19);--ease-cubic-in-out:cubic-bezier(.645,.045,.355,1);--ease-cubic-out:cubic-bezier(.215,.61,.355,1);--ease-expo-in:cubic-bezier(.95,.05,.795,.035);--ease-expo-in-out:cubic-bezier(1,0,0,1);--ease-expo-out:cubic-bezier(.19,1,.22,1);--ease-quad-in:cubic-bezier(.55,.085,.68,.53);--ease-quad-in-out:cubic-bezier(.455,.03,.515,.955);--ease-quad-out:cubic-bezier(.25,.46,.45,.94);--ease-quart-in:cubic-bezier(.895,.03,.685,.22);--ease-quart-in-out:cubic-bezier(.77,0,.175,1);--ease-quart-out:cubic-bezier(.165,.84,.44,1);--ease-quint-in:cubic-bezier(.755,.05,.855,.06);--ease-quint-in-out:cubic-bezier(.86,0,.07,1);--ease-quint-out:cubic-bezier(.23,1,.32,1);--ease-sine-in:cubic-bezier(.47,0,.745,.715);--ease-sine-in-out:cubic-bezier(.445,.05,.55,.95);--ease-sine-out:cubic-bezier(.39,.575,.565,1);--layer-1:1;--layer-2:2;--layer-3:3;--layer-4:4;--layer-5:5;--layer-important:2147483647;--shadow-color:220 3% 15%;--shadow-strength:1%;--inner-shadow-highlight:inset 0 -.5px 0 0 #fff,inset 0 .5px 0 0 rgba(0,0,0,.067);--shadow-1:0 1px 2px -1px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 9%));--shadow-2:0 3px 5px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 7px 14px -5px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 5%));--shadow-3:0 -1px 3px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 2%)),0 1px 2px -5px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 2%)),0 2px 5px -5px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 4%)),0 4px 12px -5px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 5%)),0 12px 15px -5px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 7%));--shadow-4:0 -2px 5px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 2%)),0 1px 1px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 2px 2px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 5px 5px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 4%)),0 9px 9px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 5%)),0 16px 16px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 6%));--shadow-5:0 -1px 2px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 2%)),0 2px 1px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 5px 5px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 10px 10px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 4%)),0 20px 20px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 5%)),0 40px 40px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 7%));--shadow-6:0 -1px 2px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 2%)),0 3px 2px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 7px 5px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 3%)),0 12px 10px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 4%)),0 22px 18px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 5%)),0 41px 33px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 6%)),0 100px 80px -2px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 7%));--inner-shadow-0:inset 0 0 0 1px hsl(var(--shadow-color)/calc(var(--shadow-strength) + 9%));--inner-shadow-1:inset 0 1px 2px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 9%)),var(--inner-shadow-highlight);--inner-shadow-2:inset 0 1px 4px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 9%)),var(--inner-shadow-highlight);--inner-shadow-3:inset 0 2px 8px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 9%)),var(--inner-shadow-highlight);--inner-shadow-4:inset 0 2px 14px 0 hsl(var(--shadow-color)/calc(var(--shadow-strength) + 9%)),var(--inner-shadow-highlight);--ratio-square:1;--ratio-landscape:4/3;--ratio-portrait:3/4;--ratio-widescreen:16/9;--ratio-ultrawide:18/5;--ratio-golden:1.6180/1;--gray-0:#f8f9fa;--gray-1:#f1f3f5;--gray-2:#e9ecef;--gray-3:#dee2e6;--gray-4:#ced4da;--gray-5:#adb5bd;--gray-6:#868e96;--gray-7:#495057;--gray-8:#343a40;--gray-9:#212529;--gray-10:#16191d;--gray-11:#0d0f12;--gray-12:#030507;--stone-0:#f8fafb;--stone-1:#f2f4f6;--stone-2:#ebedef;--stone-3:#e0e4e5;--stone-4:#d1d6d8;--stone-5:#b1b6b9;--stone-6:#979b9d;--stone-7:#7e8282;--stone-8:#666968;--stone-9:#50514f;--stone-10:#3a3a37;--stone-11:#252521;--stone-12:#121210;--red-0:#fff5f5;--red-1:#ffe3e3;--red-2:#ffc9c9;--red-3:#ffa8a8;--red-4:#ff8787;--red-5:#ff6b6b;--red-6:#fa5252;--red-7:#f03e3e;--red-8:#e03131;--red-9:#c92a2a;--red-10:#b02525;--red-11:#962020;--red-12:#7d1a1a;--pink-0:#fff0f6;--pink-1:#ffdeeb;--pink-2:#fcc2d7;--pink-3:#faa2c1;--pink-4:#f783ac;--pink-5:#f06595;--pink-6:#e64980;--pink-7:#d6336c;--pink-8:#c2255c;--pink-9:#a61e4d;--pink-10:#8c1941;--pink-11:#731536;--pink-12:#59102a;--purple-0:#f8f0fc;--purple-1:#f3d9fa;--purple-2:#eebefa;--purple-3:#e599f7;--purple-4:#da77f2;--purple-5:#cc5de8;--purple-6:#be4bdb;--purple-7:#ae3ec9;--purple-8:#9c36b5;--purple-9:#862e9c;--purple-10:#702682;--purple-11:#5a1e69;--purple-12:#44174f;--violet-0:#f3f0ff;--violet-1:#e5dbff;--violet-2:#d0bfff;--violet-3:#b197fc;--violet-4:#9775fa;--violet-5:#845ef7;--violet-6:#7950f2;--violet-7:#7048e8;--violet-8:#6741d9;--violet-9:#5f3dc4;--violet-10:#5235ab;--violet-11:#462d91;--violet-12:#3a2578;--indigo-0:#edf2ff;--indigo-1:#dbe4ff;--indigo-2:#bac8ff;--indigo-3:#91a7ff;--indigo-4:#748ffc;--indigo-5:#5c7cfa;--indigo-6:#4c6ef5;--indigo-7:#4263eb;--indigo-8:#3b5bdb;--indigo-9:#364fc7;--indigo-10:#2f44ad;--indigo-11:#283a94;--indigo-12:#21307a;--blue-0:#e7f5ff;--blue-1:#d0ebff;--blue-2:#a5d8ff;--blue-3:#74c0fc;--blue-4:#4dabf7;--blue-5:#339af0;--blue-6:#228be6;--blue-7:#1c7ed6;--blue-8:#1971c2;--blue-9:#1864ab;--blue-10:#145591;--blue-11:#114678;--blue-12:#0d375e;--cyan-0:#e3fafc;--cyan-1:#c5f6fa;--cyan-2:#99e9f2;--cyan-3:#66d9e8;--cyan-4:#3bc9db;--cyan-5:#22b8cf;--cyan-6:#15aabf;--cyan-7:#1098ad;--cyan-8:#0c8599;--cyan-9:#0b7285;--cyan-10:#095c6b;--cyan-11:#074652;--cyan-12:#053038;--teal-0:#e6fcf5;--teal-1:#c3fae8;--teal-2:#96f2d7;--teal-3:#63e6be;--teal-4:#38d9a9;--teal-5:#20c997;--teal-6:#12b886;--teal-7:#0ca678;--teal-8:#099268;--teal-9:#087f5b;--teal-10:#066649;--teal-11:#054d37;--teal-12:#033325;--green-0:#ebfbee;--green-1:#d3f9d8;--green-2:#b2f2bb;--green-3:#8ce99a;--green-4:#69db7c;--green-5:#51cf66;--green-6:#40c057;--green-7:#37b24d;--green-8:#2f9e44;--green-9:#2b8a3e;--green-10:#237032;--green-11:#1b5727;--green-12:#133d1b;--lime-0:#f4fce3;--lime-1:#e9fac8;--lime-2:#d8f5a2;--lime-3:#c0eb75;--lime-4:#a9e34b;--lime-5:#94d82d;--lime-6:#82c91e;--lime-7:#74b816;--lime-8:#66a80f;--lime-9:#5c940d;--lime-10:#4c7a0b;--lime-11:#3c6109;--lime-12:#2c4706;--yellow-0:#fff9db;--yellow-1:#fff3bf;--yellow-2:#ffec99;--yellow-3:#ffe066;--yellow-4:#ffd43b;--yellow-5:#fcc419;--yellow-6:#fab005;--yellow-7:#f59f00;--yellow-8:#f08c00;--yellow-9:#e67700;--yellow-10:#b35c00;--yellow-11:#804200;--yellow-12:#663500;--orange-0:#fff4e6;--orange-1:#ffe8cc;--orange-2:#ffd8a8;--orange-3:#ffc078;--orange-4:#ffa94d;--orange-5:#ff922b;--orange-6:#fd7e14;--orange-7:#f76707;--orange-8:#e8590c;--orange-9:#d9480f;--orange-10:#bf400d;--orange-11:#99330b;--orange-12:#802b09;--choco-0:#fff8dc;--choco-1:#fce1bc;--choco-2:#f7ca9e;--choco-3:#f1b280;--choco-4:#e99b62;--choco-5:#df8545;--choco-6:#d46e25;--choco-7:#bd5f1b;--choco-8:#a45117;--choco-9:#8a4513;--choco-10:#703a13;--choco-11:#572f12;--choco-12:#3d210d;--brown-0:#faf4eb;--brown-1:#ede0d1;--brown-2:#e0cab7;--brown-3:#d3b79e;--brown-4:#c5a285;--brown-5:#b78f6d;--brown-6:#a87c56;--brown-7:#956b47;--brown-8:#825b3a;--brown-9:#6f4b2d;--brown-10:#5e3a21;--brown-11:#4e2b15;--brown-12:#422412;--sand-0:#f8fafb;--sand-1:#e6e4dc;--sand-2:#d5cfbd;--sand-3:#c2b9a0;--sand-4:#aea58c;--sand-5:#9a9178;--sand-6:#867c65;--sand-7:#736a53;--sand-8:#5f5746;--sand-9:#4b4639;--sand-10:#38352d;--sand-11:#252521;--sand-12:#121210;--camo-0:#f9fbe7;--camo-1:#e8ed9c;--camo-2:#d2df4e;--camo-3:#c2ce34;--camo-4:#b5bb2e;--camo-5:#a7a827;--camo-6:#999621;--camo-7:#8c851c;--camo-8:#7e7416;--camo-9:#6d6414;--camo-10:#5d5411;--camo-11:#4d460e;--camo-12:#36300a;--jungle-0:#ecfeb0;--jungle-1:#def39a;--jungle-2:#d0e884;--jungle-3:#c2dd6e;--jungle-4:#b5d15b;--jungle-5:#a8c648;--jungle-6:#9bbb36;--jungle-7:#8fb024;--jungle-8:#84a513;--jungle-9:#7a9908;--jungle-10:#658006;--jungle-11:#516605;--jungle-12:#3d4d04;--gradient-space: ;--gradient-1:linear-gradient(to bottom right var(--gradient-space),#1f005c,#5b0060,#870160,#ac255e,#ca485c,#e16b5c,#f39060,#ffb56b);--gradient-2:linear-gradient(to bottom right var(--gradient-space),#48005c,#8300e2,#a269ff);--gradient-3:radial-gradient(circle at top right var(--gradient-space),#0ff,rgba(0,255,255,0)),radial-gradient(circle at bottom left var(--gradient-space),#ff1492,rgba(255,20,146,0));--gradient-4:linear-gradient(to bottom right var(--gradient-space),#00f5a0,#00d9f5);--gradient-5:conic-gradient(from -270deg at 75% 110% var(--gradient-space),#f0f,#fffaf0);--gradient-6:conic-gradient(from -90deg at top left var(--gradient-space),#000,#fff);--gradient-7:linear-gradient(to bottom right var(--gradient-space),#72c6ef,#004e8f);--gradient-8:conic-gradient(from 90deg at 50% 0% var(--gradient-space),#111,50%,#222,#111);--gradient-9:conic-gradient(from .5turn at bottom center var(--gradient-space),#add8e6,#fff);--gradient-10:conic-gradient(from 90deg at 40% -25% var(--gradient-space),gold,#f79d03,#ee6907,#e6390a,#de0d0d,#d61039,#cf1261,#c71585,#cf1261,#d61039,#de0d0d,#ee6907,#f79d03,gold,gold,gold);--gradient-11:conic-gradient(at bottom left var(--gradient-space),#ff1493,cyan);--gradient-12:conic-gradient(from 90deg at 25% -10% var(--gradient-space),#ff4500,#d3f340,#7bee85,#afeeee,#7bee85);--gradient-13:radial-gradient(circle at 50% 200% var(--gradient-space),#000142,#3b0083,#b300c3,#ff059f,#ff4661,#ffad86,#fff3c7);--gradient-14:conic-gradient(at top right var(--gradient-space),lime,cyan);--gradient-15:linear-gradient(to bottom right var(--gradient-space),#c7d2fe,#fecaca,#fef3c7);--gradient-16:radial-gradient(circle at 50% -250% var(--gradient-space),#374151,#111827,#000);--gradient-17:conic-gradient(from -90deg at 50% -25% var(--gradient-space),blue,#8a2be2);--gradient-18:linear-gradient(0deg var(--gradient-space),rgba(255,0,0,.8),rgba(255,0,0,0) 75%),linear-gradient(60deg var(--gradient-space),rgba(255,255,0,.8),rgba(255,255,0,0) 75%),linear-gradient(120deg var(--gradient-space),rgba(0,255,0,.8),rgba(0,255,0,0) 75%),linear-gradient(180deg var(--gradient-space),rgba(0,255,255,.8),rgba(0,255,255,0) 75%),linear-gradient(240deg var(--gradient-space),rgba(0,0,255,.8),rgba(0,0,255,0) 75%),linear-gradient(300deg var(--gradient-space),rgba(255,0,255,.8),rgba(255,0,255,0) 75%);--gradient-19:linear-gradient(to bottom right var(--gradient-space),#ffe259,#ffa751);--gradient-20:conic-gradient(from -135deg at -10% center var(--gradient-space),orange,#ff7715,#ff522a,#ff3f47,#ff5482,#ff69b4);--gradient-21:conic-gradient(from -90deg at 25% 115% var(--gradient-space),red,#f06,#f0c,#c0f,#60f,#00f,#00f,#00f,#00f);--gradient-22:linear-gradient(to bottom right var(--gradient-space),#acb6e5,#86fde8);--gradient-23:linear-gradient(to bottom right var(--gradient-space),#536976,#292e49);--gradient-24:conic-gradient(from .5turn at 0% 0% var(--gradient-space),#00c476,10%,#82b0ff,90%,#00c476);--gradient-25:conic-gradient(at 125% 50% var(--gradient-space),#b78cf7,#ff7c94,#ffcf0d,#ff7c94,#b78cf7);--gradient-26:linear-gradient(to bottom right var(--gradient-space),#9796f0,#fbc7d4);--gradient-27:conic-gradient(from .5turn at bottom left var(--gradient-space),#ff1493,#639);--gradient-28:conic-gradient(from -90deg at 50% 105% var(--gradient-space),#fff,orchid);--gradient-29:radial-gradient(circle at top right var(--gradient-space),#bfb3ff,rgba(191,179,255,0)),radial-gradient(circle at bottom left var(--gradient-space),#86acf9,rgba(134,172,249,0));--gradient-30:radial-gradient(circle at top right var(--gradient-space),#00ff80,rgba(0,255,128,0)),radial-gradient(circle at bottom left var(--gradient-space),#adffd6,rgba(173,255,214,0));--noise-1:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 200 200' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='a'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='.005' numOctaves='2' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23a)'/%3E%3C/svg%3E");--noise-2:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 300 300' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='a'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='.05' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23a)'/%3E%3C/svg%3E");--noise-3:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 1024 1024' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='a'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='.25' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23a)'/%3E%3C/svg%3E");--noise-4:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 2056 2056' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='a'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='.5' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23a)'/%3E%3C/svg%3E");--noise-5:url("data:image/svg+xml;charset=utf-8,%3Csvg viewBox='0 0 2056 2056' xmlns='http://www.w3.org/2000/svg'%3E%3Cfilter id='a'%3E%3CfeTurbulence type='fractalNoise' baseFrequency='.75' stitchTiles='stitch'/%3E%3C/filter%3E%3Crect width='100%25' height='100%25' filter='url(%23a)'/%3E%3C/svg%3E");--noise-filter-1:contrast(300%) brightness(100%);--noise-filter-2:contrast(200%) brightness(150%);--noise-filter-3:contrast(200%) brightness(250%);--noise-filter-4:contrast(200%) brightness(500%);--noise-filter-5:contrast(200%) brightness(1000%);--animation-fade-in:fade-in .5s var(--ease-3);--animation-fade-in-bloom:fade-in-bloom 2s var(--ease-3);--animation-fade-out:fade-out .5s var(--ease-3);--animation-fade-out-bloom:fade-out-bloom 2s var(--ease-3);--animation-scale-up:scale-up .5s var(--ease-3);--animation-scale-down:scale-down .5s var(--ease-3);--animation-slide-out-up:slide-out-up .5s var(--ease-3);--animation-slide-out-down:slide-out-down .5s var(--ease-3);--animation-slide-out-right:slide-out-right .5s var(--ease-3);--animation-slide-out-left:slide-out-left .5s var(--ease-3);--animation-slide-in-up:slide-in-up .5s var(--ease-3);--animation-slide-in-down:slide-in-down .5s var(--ease-3);--animation-slide-in-right:slide-in-right .5s var(--ease-3);--animation-slide-in-left:slide-in-left .5s var(--ease-3);--animation-shake-x:shake-x .75s var(--ease-out-5);--animation-shake-y:shake-y .75s var(--ease-out-5);--animation-shake-z:shake-z 1s var(--ease-in-out-3);--animation-spin:spin 2s linear infinite;--animation-ping:ping 5s var(--ease-out-3) infinite;--animation-blink:blink 1s var(--ease-out-3) infinite;--animation-float:float 3s var(--ease-in-out-3) infinite;--animation-bounce:bounce 2s var(--ease-squish-2) infinite;--animation-pulse:pulse 2s var(--ease-out-3) infinite;--border-size-1:1px;--border-size-2:2px;--border-size-3:5px;--border-size-4:10px;--border-size-5:25px;--radius-1:2px;--radius-2:5px;--radius-3:1rem;--radius-4:2rem;--radius-5:4rem;--radius-6:8rem;--radius-drawn-1:255px 15px 225px 15px/15px 225px 15px 255px;--radius-drawn-2:125px 10px 20px 185px/25px 205px 205px 25px;--radius-drawn-3:15px 255px 15px 225px/225px 15px 255px 15px;--radius-drawn-4:15px 25px 155px 25px/225px 150px 25px 115px;--radius-drawn-5:250px 25px 15px 20px/15px 80px 105px 115px;--radius-drawn-6:28px 100px 20px 15px/150px 30px 205px 225px;--radius-round:1e5px;--radius-blob-1:30% 70% 70% 30%/53% 30% 70% 47%;--radius-blob-2:53% 47% 34% 66%/63% 46% 54% 37%;--radius-blob-3:37% 63% 56% 44%/49% 56% 44% 51%;--radius-blob-4:63% 37% 37% 63%/43% 37% 63% 57%;--radius-blob-5:49% 51% 48% 52%/57% 44% 56% 43%;--radius-conditional-1:clamp(0px,calc(100vw - 100%) * 1e5,var(--radius-1));--radius-conditional-2:clamp(0px,calc(100vw - 100%) * 1e5,var(--radius-2));--radius-conditional-3:clamp(0px,calc(100vw - 100%) * 1e5,var(--radius-3));--radius-conditional-4:clamp(0px,calc(100vw - 100%) * 1e5,var(--radius-4));--radius-conditional-5:clamp(0px,calc(100vw - 100%) * 1e5,var(--radius-5));--radius-conditional-6:clamp(0px,calc(100vw - 100%) * 1e5,var(--radius-6))}@media (prefers-color-scheme:dark){:where(html){--shadow-color:220 40% 2%;--shadow-strength:25%;--inner-shadow-highlight:inset 0 -.5px 0 0 hsla(0,0%,100%,.067),inset 0 .5px 0 0 rgba(0,0,0,.467)}}@supports (background:linear-gradient(to right in oklab,#000,#fff)){:where(html){--gradient-space:in oklab}}@keyframes fade-in{to{opacity:1}}@keyframes fade-in-bloom{0%{filter:brightness(1) blur(20px);opacity:0}10%{filter:brightness(2) blur(10px);opacity:1}to{filter:brightness(1) blur(0);opacity:1}}@keyframes fade-out{to{opacity:0}}@keyframes fade-out-bloom{to{filter:brightness(1) blur(20px);opacity:0}10%{filter:brightness(2) blur(10px);opacity:1}0%{filter:brightness(1) blur(0);opacity:1}}@keyframes scale-up{to{transform:scale(1.25)}}@keyframes scale-down{to{transform:scale(.75)}}@keyframes slide-out-up{to{transform:translateY(-100%)}}@keyframes slide-out-down{to{transform:translateY(100%)}}@keyframes slide-out-right{to{transform:translateX(100%)}}@keyframes slide-out-left{to{transform:translateX(-100%)}}@keyframes slide-in-up{0%{transform:translateY(100%)}}@keyframes slide-in-down{0%{transform:translateY(-100%)}}@keyframes slide-in-right{0%{transform:translateX(-100%)}}@keyframes slide-in-left{0%{transform:translateX(100%)}}@keyframes shake-x{0%,to{transform:translateX(0)}20%{transform:translateX(-5%)}40%{transform:translateX(5%)}60%{transform:translateX(-5%)}80%{transform:translateX(5%)}}@keyframes shake-y{0%,to{transform:translateY(0)}20%{transform:translateY(-5%)}40%{transform:translateY(5%)}60%{transform:translateY(-5%)}80%{transform:translateY(5%)}}@keyframes shake-z{0%,to{transform:rotate(0deg)}20%{transform:rotate(-2deg)}40%{transform:rotate(2deg)}60%{transform:rotate(-2deg)}80%{transform:rotate(2deg)}}@keyframes spin{to{transform:rotate(1turn)}}@keyframes ping{90%,to{opacity:0;transform:scale(2)}}@keyframes blink{0%,to{opacity:1}50%{opacity:.5}}@keyframes float{50%{transform:translateY(-25%)}}@keyframes bounce{25%{transform:translateY(-20%)}40%{transform:translateY(-3%)}0%,60%,to{transform:translateY(0)}}@keyframes pulse{50%{transform:scale(.9)}}@media (prefers-color-scheme:dark){@keyframes fade-in-bloom{0%{filter:brightness(1) blur(20px);opacity:0}10%{filter:brightness(.5) blur(10px);opacity:1}to{filter:brightness(1) blur(0);opacity:1}}}@media (prefers-color-scheme:dark){@keyframes fade-out-bloom{to{filter:brightness(1) blur(20px);opacity:0}10%{filter:brightness(.5) blur(10px);opacity:1}0%{filter:brightness(1) blur(0);opacity:1}}} \ No newline at end of file diff --git a/docs/plans/parallel-merge-and-footprint-design-review.md b/docs/plans/parallel-merge-and-footprint-design-review.md new file mode 100644 index 00000000..91df08f0 --- /dev/null +++ b/docs/plans/parallel-merge-and-footprint-design-review.md @@ -0,0 +1,362 @@ + + + +# Parallel Merge & Footprint Optimization Design Review + +- **Status:** Review complete; no implementation approved yet +- **Date:** 2026-03-28 +- **Idea Note:** [Parallel Merge & Footprint Scheduling Optimizations](parallel-merge-and-footprint-optimizations.md) + +## Purpose + +The earlier optimization note records two attractive ideas: + +1. replace parallel delta flatten-and-sort with a k-way merge, and +2. skip footprint checks for cross-shard rewrites. + +This document answers a narrower question: + +- what is actually true in Echo today, +- what would have to be proven before either optimization is safe, +- which idea is still worth investigating, and +- which idea should be treated as suspect until stronger evidence exists. + +## Executive Summary + +1. The **k-way merge** idea is still plausible, but the current note overstates + why it works. The current executor returns **per-worker unsorted deltas**, + not per-shard canonical runs, so the required sorted-run invariant is not + yet established. +2. The **shard-aware footprint skip** idea is much weaker against the current + implementation. The default scheduler is already the `GenSet`-based + `RadixScheduler`, whose reservation path scales like `O(m)` in candidate + footprint size rather than `O(k×m)` in the number of previously admitted + rewrites. +3. The cross-shard independence claim is **not currently proven**. Shard routing + is by the scope node's `NodeId`, while footprint conflicts are checked over + warp-scoped nodes, edges, attachments, and ports. Those are not the same + keyspace, and current runtime enforcement does not prove they always align. +4. Recommendation: + - keep investigating k-way merge, but only behind an explicit sorted-run + proof obligation and benchmark plan + - do **not** implement cross-shard footprint skipping until a stronger + locality invariant is proven + +## Current Code Reality + +### Parallel delta merge + +Today the merge path is: + +1. execute work in parallel +2. collect one `TickDelta` per worker +3. flatten all worker deltas +4. sort globally before hashing or validation paths compare results + +Relevant code: + +- `execute_parallel_sharded()` returns one `TickDelta` per worker in + `crates/warp-core/src/parallel/exec.rs` +- the live engine path in `crates/warp-core/src/engine_impl.rs` merges parallel + deltas by canonical `WarpOpKey` +- `merge_deltas()` in `crates/warp-core/src/parallel/merge.rs` and the + `delta_validate` helpers flatten all worker outputs and sort the combined + vector by `(WarpOpKey, OpOrigin)` +- `TickDelta::into_parts_unsorted()` in `crates/warp-core/src/tick_delta.rs` + explicitly exposes unsorted emission order + +So the current implementation does **not** already materialize the +"per-shard pre-sorted run" structure that the idea note assumes, and the +ordering key differs slightly between the live engine path and the stricter +validation/test merge path. + +### Scheduler complexity + +The default scheduler is the `RadixScheduler`, not the legacy +`Vec` frontier scan. + +- `RadixScheduler::reserve()` uses generation-stamped sets (`GenSet`) for + membership checks in `crates/warp-core/src/scheduler.rs` +- `docs/scheduler-warp-core.md` already documents the default path as `O(m)`, + where `m` is the candidate footprint size +- the legacy pairwise frontier scan still exists as `LegacyScheduler`, but it + is not the default hot path + +This matters because the shard-aware footprint idea primarily helps pairwise +all-frontier overlap checks. That is no longer the main scheduler algorithm. + +### Footprint keys vs shard keys + +Shard routing and footprint conflict detection are based on different data: + +- shard routing uses the scoped node's `NodeId` low bits in + `crates/warp-core/src/parallel/shard.rs` +- footprint conflicts are checked over warp-scoped nodes, edges, + attachments, and ports in `crates/warp-core/src/footprint.rs` and + `crates/warp-core/src/scheduler.rs` + +Current runtime enforcement proves footprints are **warp-local**, not +**shard-local**: + +- `FootprintGuard::new()` in `crates/warp-core/src/footprint_guard.rs` + asserts against cross-warp entries +- it does not assert that every touched slot maps to the same shard as + `scope(r)` + +That distinction is exactly why the cross-shard skip needs a proof instead of a +performance intuition. + +## 1. K-Way Merge Assessment + +### What would have to be true + +For a k-way merge to be a correct replacement for the current global sort, we +need a family of runs `R1..Rk` such that: + +- each `Ri` is already sorted by the exact canonical order required by the path + being replaced, and +- the current merged output is `sort(flatten(R1..Rk))` under that same key + +For the `delta_validate` and test helpers that key is `(WarpOpKey, OpOrigin)`. +For the live engine path, the proof obligation must be stated against the +engine's canonical `WarpOpKey` ordering instead of assuming the stricter +validation key. + +Under those conditions, a standard heap-based merge is correct: + +```text +kway_merge(R1..Rk) == sort(flatten(R1..Rk)) +``` + +This is the real proof obligation. The current note skips straight to the +conclusion without establishing that the merge inputs satisfy the premise. + +### What is true today + +What we actually have today is weaker: + +- the executor returns one `TickDelta` per worker, not per shard +- workers may process many shards +- `TickDelta` collects operations in emission order, not canonical order +- canonical ordering is imposed later by `merge_deltas()` + +So "shard assignment exists" does **not** imply "merge inputs are sorted runs." + +### What about the 1-core / 1-worker case? + +That case is important because it exposes the missing invariant cleanly. + +If `k = 1`: + +- a k-way merge only helps if the single input run is already sorted by the + canonical key +- otherwise we still need to sort, and the optimization collapses back into a + normal sort path + +So the "what if 1 shard because 1 CPU core" question has a direct answer: + +- if the single worker delta is unsorted, the k-way merge idea provides no + algorithmic win +- if the single worker delta is already canonically sorted, then the merge is + effectively a linear pass, but that is a stronger invariant than the current + implementation documents + +### Recommendation + +The k-way merge idea remains worth investigating, but only in this order: + +1. decide whether Echo should produce **per-shard canonical runs** or + **per-worker canonical runs** +2. prove or enforce that each run is already sorted by `(WarpOpKey, OpOrigin)` +3. benchmark: + - current flatten-and-sort + - sort-each-run-plus-merge + - true pre-sorted k-way merge +4. only keep the optimization if the canonical-output equality is explicit and + the benchmark win survives review + +## 2. Shard-Aware Footprint Skip Assessment + +### The claimed invariant + +The idea note assumes: + +```text +shard(r1) != shard(r2) => footprint(r1) and footprint(r2) are disjoint +``` + +That is the key claim. Without it, skipping cross-shard overlap checks is not +conservative, and the optimization is unsound. + +### Why the claim is not currently established + +The current code only guarantees: + +- the rewrite has a scoped node +- shard routing is a deterministic function of that scope node +- footprint slots are warp-scoped +- footprint guards reject cross-warp entries + +It does **not** currently prove: + +- every node touched by the rewrite hashes to the same shard as the scope node +- every edge touched by the rewrite hashes to that same shard +- every attachment touched by the rewrite belongs to resources in that same shard +- every boundary port touched by the rewrite belongs to that same shard + +That is enough to reject the note's current "structurally disjoint by +construction" wording. + +### Could there still be overlapping footprints? + +Yes. Unless we prove a stronger locality invariant, the answer is plainly yes. + +The dangerous pattern is: + +```text +rewrite r has scope node A +shard(scope(A)) = s1 +rewrite body touches some other slot X +shard(slot(X)) = s2 +``` + +If `X` can differ from the scope shard, then two rewrites can land in different +scope shards and still overlap through some footprinted slot. + +That possibility is enough to block the optimization until the invariant is +settled. + +### Does the "1 / shard_count" math still matter? + +Only for a pairwise overlap algorithm. + +If we were still using the old pairwise frontier scan, then under a uniform +distribution: + +- probability of same-shard pair: `1 / S` +- expected candidate pairs surviving the shard gate: + `C(N, 2) / S` + +That math is fine as a performance estimate for the **legacy** pairwise model. + +But the current default scheduler is already `O(m)` with `GenSet`s, not +`O(k×m)` over frontier size, so there is no honest shard-count crossover to +claim against today's default path without fresh benchmarks. + +### What about the bloom-filter idea? + +The bloom-style same-shard prefilter is the least controversial part. + +If two footprints share a real slot and the filter is built from those slots, +then they must share at least one set bit. Therefore: + +- `filter_a & filter_b == 0` implies no shared slot represented in the filter +- false positives are possible +- false negatives are not acceptable + +So the same-shard prefilter is conceptually fine as a conservative +implementation detail. The problem is the earlier step: the current note has +not earned the right to skip **cross-shard** checks yet. + +## Proof Obligations + +### K-Way Merge Benchmarks + +Before implementation, prove or enforce: + +1. every merge input run is sorted by `(WarpOpKey, OpOrigin)` +2. the k-way merge produces byte-for-byte identical canonical output to the + current flatten-and-sort path +3. dedupe and conflict detection semantics are unchanged + +Acceptable proof styles: + +- a direct design proof over sorted runs +- property tests comparing `kway_merge` against `sort(flatten(...))` +- deterministic regression tests across worker counts and shard layouts + +### Cross-shard footprint skip + +Before implementation, prove: + +```text +for every rewrite r and every footprinted slot x in r, +shard(x) == shard(scope(r)) +``` + +Then derive: + +```text +shard(r1) != shard(r2) => independent(r1, r2) +``` + +Until that implication is proved, the optimization should be treated as unsafe. + +Acceptable proof styles: + +- a written invariant tied to the rewrite API and enforced by runtime guards +- property tests that generate rewrites and verify footprint slots stay on the + scope shard +- bounded model checking if a sufficiently small executable model exists + +Formal-methods note: + +- a tool like Kani or a separate executable model could help for bounded cases +- but the first useful step is still to write down the invariant precisely +- without that, "use a formal tool" just formalizes an ambiguous claim + +## Benchmark Plan + +### K-Way Merge Kill Criteria + +Benchmark only after the sorted-run invariant is explicit. + +Compare: + +1. current flatten-and-sort +2. sort-each-run then k-way merge +3. true pre-sorted k-way merge + +Measure: + +- total merge wall time +- allocation count / bytes +- sensitivity to worker count +- sensitivity to skewed shard distributions + +### Shard-aware footprint skipping + +Do not benchmark first. Prove the invariant first. + +If the invariant is ever proven, then benchmark against the current +`RadixScheduler`, not against the legacy pairwise scheduler alone. + +## Kill Criteria + +### K-way merge + +Reject the optimization if any of these are true: + +- the merge inputs cannot be made individually canonical without an equivalent + sorting cost +- the implementation complicates determinism reasoning materially +- benchmarks do not show a real win on representative shard distributions + +### Shard-aware footprint skip + +Reject the optimization if any of these are true: + +- a rewrite can touch any slot outside the scope shard +- runtime enforcement cannot cheaply verify the required locality invariant +- the benchmark only beats the legacy scheduler but not the current default + `GenSet` scheduler + +## Final Recommendation + +Treat the two ideas differently. + +- **K-way merge:** keep alive as a plausible optimization candidate, but convert + it into a real design with explicit sorted-run obligations. +- **Shard-aware footprint skip:** downgrade from "optimization candidate" to + "hypothesis requiring a proof." Until the stronger shard-locality invariant is + stated and enforced, it should not move toward implementation. diff --git a/docs/plans/parallel-merge-and-footprint-optimizations.md b/docs/plans/parallel-merge-and-footprint-optimizations.md index 37aa2842..4a31884e 100644 --- a/docs/plans/parallel-merge-and-footprint-optimizations.md +++ b/docs/plans/parallel-merge-and-footprint-optimizations.md @@ -5,6 +5,18 @@ **Status:** Ideas — not yet designed or scheduled +See also the stricter review note: +[Parallel Merge & Footprint Optimization Design Review](parallel-merge-and-footprint-design-review.md). + +Current disposition after code review: + +- k-way merge remains plausible, but only if merge inputs can be proven or + enforced to be individually sorted by the canonical `(WarpOpKey, OpOrigin)` + order +- shard-aware cross-shard footprint skipping is **not** currently proven safe + against the default scheduler and should be treated as a hypothesis, not an + implementation-ready optimization + Two optimization opportunities for the parallel execution pipeline, both exploiting structure that already exists in the shard-based architecture. @@ -14,10 +26,11 @@ exploiting structure that already exists in the shard-based architecture. ### Observation -Per-shard deltas are already partially sorted by construction: shards are -assigned by `lowbits(NodeId) & (SHARDS - 1)`, so ops within a shard share -a bit prefix. If ops are accumulated in insertion order within each shard, -the per-worker deltas are pre-sorted runs. +Per-shard deltas may be partially sorted only under stricter preconditions. +Shard assignment by `lowbits(NodeId) & (SHARDS - 1)` alone does not prove +canonical run ordering, and the current code does not yet enforce individually +sorted runs. Treat "pre-sorted runs" as a hypothesis until the design review's +proof obligations are satisfied. ### Idea @@ -27,10 +40,10 @@ O(n log n), where k = worker count (bounded, typically 4–16). ### Why It Works -The canonical merge must produce ops in `(WarpOpKey, OpOrigin)` order for -deterministic hashing. A k-way merge of pre-sorted runs produces the same -canonical order — it's just cheaper when the runs are already partially -ordered, which they are by shard assignment. +If the merge inputs can be proven or enforced to be pre-sorted by the canonical +ordering required by the target path, then a k-way merge would produce the same +deterministic output at lower cost. That proof does not exist yet, so this +remains a design candidate rather than an implementation-ready optimization. ### Constraints @@ -45,14 +58,15 @@ ordered, which they are by shard assignment. ### Current Behavior Footprint overlap testing currently considers all rule pairs. But rules -whose footprints land in different shards are structurally disjoint by -construction — the shard assignment (`lowbits(NodeId)`) already proves -non-overlap. +whose footprints land in different shards may be structurally disjoint under +additional scheduler invariants. The current review note does **not** treat +cross-shard independence as proven against the default scheduler. ### Proposed Change -Only perform footprint overlap checks for rules within the same shard. -Cross-shard pairs are guaranteed independent and skip the check entirely. +Only perform footprint overlap checks for rules within the same shard if a +stronger locality invariant can first prove cross-shard independence. Until +then, cross-shard skip remains a hypothesis and not a safe implementation step. For the remaining same-shard pairs, use a bloom filter over read/write slots: hash each footprint's slots into a small bit vector, AND two @@ -62,10 +76,11 @@ the real overlap check. ### Rationale -In a well-distributed workload, most rules land in different shards, making -the overlap test set much smaller than the full rule set. The bloom filter -further prunes within-shard pairs at near-zero cost. False positives just -mean conservative serialization, which is safe — never incorrect. +If the stronger locality invariant holds, then in a well-distributed workload +most rules would land in different shards, making the overlap test set much +smaller than the full rule set. The bloom filter would then further prune +within-shard pairs at near-zero cost. False positives would just mean +conservative serialization, which is safe — never incorrect. ### Bounds diff --git a/scripts/bench_bake.py b/scripts/bench_bake.py deleted file mode 100755 index c3bde5f0..00000000 --- a/scripts/bench_bake.py +++ /dev/null @@ -1,143 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: Apache-2.0 -# © James Ross Ω FLYING•ROBOTS - -""" -Bake Criterion results into a self-contained HTML report that works over file:// - -Reads estimates from target/criterion for known groups and injects them into -docs/benchmarks/index.html, producing docs/benchmarks/report-inline.html with -`window.__CRITERION_DATA__` and `window.__CRITERION_MISSING__` prepopulated. - -Usage: - python3 scripts/bench_bake.py [--out docs/benchmarks/report-inline.html] -""" -from __future__ import annotations - -import argparse -import json -import sys -from pathlib import Path - -ROOT = Path(__file__).resolve().parents[1] -CRITERION = ROOT / "target" / "criterion" -TEMPLATE = ROOT / "docs" / "benchmarks" / "index.html" -DEFAULT_OUT = ROOT / "docs" / "benchmarks" / "report-inline.html" - -# Only bake groups the dashboard renders by default -GROUPS = [ - ("snapshot_hash", "Snapshot Hash"), - ("scheduler_drain", "Scheduler Drain"), - ("scheduler_drain/enqueue", "Scheduler Enqueue"), - ("scheduler_drain/drain", "Scheduler Drain Phase"), -] -INPUTS = [10, 100, 1000, 3000, 10000, 30000] - - -def load_estimate(group: str, n: int): - base = CRITERION / group / str(n) - for kind in ("new", "base", "change"): - p = base / kind / "estimates.json" - if p.exists(): - try: - obj = json.loads(p.read_text()) - mean = ( - obj.get("mean", {}).get("point_estimate") - if isinstance(obj.get("mean"), dict) - else None - ) - if mean is None and isinstance(obj.get("Mean"), dict): - mean = obj["Mean"].get("point_estimate") - lb = ( - obj.get("mean", {}) - .get("confidence_interval", {}) - .get("lower_bound") - ) - ub = ( - obj.get("mean", {}) - .get("confidence_interval", {}) - .get("upper_bound") - ) - if mean is None: - return { - "ok": False, - "path": str(p.relative_to(ROOT)), - "error": "missing mean.point_estimate", - } - return { - "ok": True, - "path": str(p.relative_to(ROOT)), - "mean": float(mean), - "lb": float(lb) if lb is not None else None, - "ub": float(ub) if ub is not None else None, - } - except (json.JSONDecodeError, KeyError, TypeError, ValueError) as e: - return { - "ok": False, - "path": str(p.relative_to(ROOT)), - "error": f"parse error: {e}", - } - return { - "ok": False, - "path": str((base / "new" / "estimates.json").relative_to(ROOT)), - "error": "not found (tried new/base/change)", - } - - -def build_inline_script(results, missing) -> str: - data_json = json.dumps(results, separators=(",", ":")) - missing_json = json.dumps(missing, separators=(",", ":")) - return ( - f"\n" - ) - - -def bake_html(out_path: Path): - if not TEMPLATE.exists(): - sys.exit(f"Template not found: {TEMPLATE}") - - results = [] - missing = [] - for key, _label in GROUPS: - for n in INPUTS: - r = load_estimate(key, n) - if r["ok"]: - results.append({ - "group": key, - "n": n, - "mean": r["mean"], - "lb": r.get("lb"), - "ub": r.get("ub"), - }) - else: - missing.append({"group": key, "n": n, "path": r["path"], "error": r["error"]}) - - html = TEMPLATE.read_text() - # Inject inline data just before the main logic script that defines GROUPS - marker = "\n" + )) +} + +fn display_repo_relative(path: &Path, repo_root: &Path) -> String { + path.strip_prefix(repo_root) + .unwrap_or(path) + .display() + .to_string() + .replace('\\', "/") +} + +fn git_short_head_sha() -> Result { + let output = Command::new("git") + .args(["rev-parse", "--short", "HEAD"]) + .output() + .context("failed to run `git rev-parse --short HEAD`")?; + if !output.status.success() { + bail!("git rev-parse --short HEAD failed with {}", output.status); + } + Ok(String::from_utf8_lossy(&output.stdout).trim().to_owned()) +} + +fn local_benchmark_machine_descriptor() -> BenchMachineDescriptor { + let hostname = std::env::var("HOSTNAME") + .ok() + .filter(|value| !value.trim().is_empty()) + .or_else(|| { + std::env::var("COMPUTERNAME") + .ok() + .filter(|value| !value.trim().is_empty()) + }); + let os = std::env::consts::OS.to_owned(); + let arch = std::env::consts::ARCH.to_owned(); + let label = hostname.as_ref().map_or_else( + || format!("{os}/{arch}"), + |host| format!("{os}/{arch} on {host}"), + ); + + BenchMachineDescriptor { + os, + arch, + hostname, + label, + } +} + fn run_pr_threads_list(selector: Option<&str>) -> Result<()> { let overview = fetch_pr_overview(selector)?; let threads = fetch_unresolved_review_threads(&overview)?; @@ -6373,4 +7126,94 @@ mod tests { assert!(is_gh_auth_error("you must authenticate with GitHub")); assert!(is_gh_auth_error("bad credentials")); } + + #[test] + fn parse_policy_case_handles_worker_suffix_form() { + let Some(case) = parse_policy_case(Path::new("dynamic_per_worker_4w/1000")) else { + unreachable!("expected worker suffix policy case"); + }; + + assert_eq!( + case, + ParsedPolicyCase { + policy: "dynamic_per_worker".to_owned(), + workers: "4w".to_owned(), + load: 1000, + } + ); + } + + #[test] + fn parse_policy_case_handles_unlisted_worker_suffix_form() { + let Some(case) = parse_policy_case(Path::new("static_per_worker_16w/1000")) else { + unreachable!("expected generic worker suffix policy case"); + }; + + assert_eq!( + case, + ParsedPolicyCase { + policy: "static_per_worker".to_owned(), + workers: "16w".to_owned(), + load: 1000, + } + ); + } + + #[test] + fn parse_policy_case_handles_dedicated_two_segment_form() { + let Some(case) = parse_policy_case(Path::new("dedicated_per_shard/100")) else { + unreachable!("expected dedicated policy case"); + }; + + assert_eq!( + case, + ParsedPolicyCase { + policy: "dedicated_per_shard".to_owned(), + workers: "dedicated".to_owned(), + load: 100, + } + ); + } + + #[test] + fn benchmark_inline_script_embeds_policy_payload_metadata() { + let script = assert_ok( + build_benchmark_inline_script( + &[], + &[], + &PolicyMatrixPayload { + group: BENCH_POLICY_GROUP.to_owned(), + baked_at: Some("2026-03-28T22:40:30Z".to_owned()), + baked_git_sha: Some("deadbeef".to_owned()), + baked_source_digest: Some("cafebabe".to_owned()), + template_path: Some("docs/benchmarks/index.html".to_owned()), + machine: Some(BenchMachineDescriptor { + os: "macos".to_owned(), + arch: "aarch64".to_owned(), + hostname: None, + label: "macos/aarch64".to_owned(), + }), + criterion_root: Some("target/criterion/parallel_policy_matrix".to_owned()), + results: vec![PolicyMatrixRow { + policy: "static_per_worker".to_owned(), + workers: "4w".to_owned(), + load: 1000, + path: "target/criterion/parallel_policy_matrix/static_per_worker_4w/1000/new/estimates.json".to_owned(), + mean_ns: 138309.37, + lb_ns: Some(137130.76), + ub_ns: Some(139395.20), + series: "static_per_worker:4w".to_owned(), + }], + }, + ), + "inline benchmark script should serialize", + ); + + assert!(script.contains("window.__POLICY_MATRIX__ =")); + assert!(script.contains("\"baked_at\":\"2026-03-28T22:40:30Z\"")); + assert!(script.contains("\"baked_git_sha\":\"deadbeef\"")); + assert!(script.contains("\"baked_source_digest\":\"cafebabe\"")); + assert!(script.contains("\"template_path\":\"docs/benchmarks/index.html\"")); + assert!(script.contains("\"criterion_root\":\"target/criterion/parallel_policy_matrix\"")); + } }