diff --git a/.gitlab/pipeline/publish.yml b/.gitlab/pipeline/publish.yml
index d8f5d5832291f..68712610ad236 100644
--- a/.gitlab/pipeline/publish.yml
+++ b/.gitlab/pipeline/publish.yml
@@ -74,6 +74,8 @@ publish-subsystem-benchmarks:
artifacts: true
- job: subsystem-benchmark-availability-distribution
artifacts: true
+ - job: subsystem-benchmark-approval-voting
+ artifacts: true
- job: publish-rustdoc
artifacts: false
script:
@@ -115,6 +117,8 @@ trigger_workflow:
artifacts: true
- job: subsystem-benchmark-availability-distribution
artifacts: true
+ - job: subsystem-benchmark-approval-voting
+ artifacts: true
script:
- echo "Triggering workflow"
- >
diff --git a/.gitlab/pipeline/test.yml b/.gitlab/pipeline/test.yml
index 1d6efd7b9fd1a..c17a3ce35eaf5 100644
--- a/.gitlab/pipeline/test.yml
+++ b/.gitlab/pipeline/test.yml
@@ -511,7 +511,7 @@ test-syscalls:
fi
allow_failure: false # this rarely triggers in practice
-subsystem-benchmark-availability-recovery:
+.subsystem-benchmark-template:
stage: test
artifacts:
name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
@@ -523,26 +523,26 @@ subsystem-benchmark-availability-recovery:
- .docker-env
- .common-refs
- .run-immediately
- script:
- - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks
tags:
- benchmark
+
+subsystem-benchmark-availability-recovery:
+ extends:
+ - .subsystem-benchmark-template
+ script:
+ - cargo bench -p polkadot-availability-recovery --bench availability-recovery-regression-bench --features subsystem-benchmarks
allow_failure: true
subsystem-benchmark-availability-distribution:
- stage: test
- artifacts:
- name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}"
- when: always
- expire_in: 1 hour
- paths:
- - charts/
extends:
- - .docker-env
- - .common-refs
- - .run-immediately
+ - .subsystem-benchmark-template
script:
- cargo bench -p polkadot-availability-distribution --bench availability-distribution-regression-bench --features subsystem-benchmarks
- tags:
- - benchmark
+ allow_failure: true
+
+subsystem-benchmark-approval-voting:
+ extends:
+ - .subsystem-benchmark-template
+ script:
+ - cargo bench -p polkadot-node-core-approval-voting --bench approval-voting-regression-bench --features subsystem-benchmarks
allow_failure: true
diff --git a/Cargo.lock b/Cargo.lock
index 7bf5215b6deca..aafe8a46e7824 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -13018,6 +13018,7 @@ dependencies = [
"polkadot-overseer",
"polkadot-primitives",
"polkadot-primitives-test-helpers",
+ "polkadot-subsystem-bench",
"rand 0.8.5",
"rand_chacha 0.3.1",
"rand_core 0.6.4",
diff --git a/polkadot/node/core/approval-voting/Cargo.toml b/polkadot/node/core/approval-voting/Cargo.toml
index ced7706c40a28..473bc67923b6f 100644
--- a/polkadot/node/core/approval-voting/Cargo.toml
+++ b/polkadot/node/core/approval-voting/Cargo.toml
@@ -53,3 +53,14 @@ kvdb-memorydb = "0.13.0"
test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../../primitives/test-helpers" }
log = { workspace = true, default-features = true }
env_logger = "0.11"
+
+polkadot-subsystem-bench = { path = "../../subsystem-bench" }
+
+[[bench]]
+name = "approval-voting-regression-bench"
+path = "benches/approval-voting-regression-bench.rs"
+harness = false
+required-features = ["subsystem-benchmarks"]
+
+[features]
+subsystem-benchmarks = []
diff --git a/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs
new file mode 100644
index 0000000000000..cad45dc64d2e6
--- /dev/null
+++ b/polkadot/node/core/approval-voting/benches/approval-voting-regression-bench.rs
@@ -0,0 +1,94 @@
+// Copyright (C) Parity Technologies (UK) Ltd.
+// This file is part of Polkadot.
+
+// Polkadot is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Polkadot is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Polkadot. If not, see .
+
+//! approval-voting throughput test
+//!
+//! Approval Voting benchmark based on Kusama parameters and scale.
+//!
+//! Subsystems involved:
+//! - approval-distribution
+//! - approval-voting
+
+use polkadot_subsystem_bench::{
+ self,
+ approval::{bench_approvals, prepare_test, ApprovalsOptions},
+ configuration::TestConfiguration,
+ usage::BenchmarkUsage,
+ utils::save_to_file,
+};
+use std::io::Write;
+
+const BENCH_COUNT: usize = 10;
+
+fn main() -> Result<(), String> {
+ let mut messages = vec![];
+ let mut config = TestConfiguration::default();
+ config.n_cores = 100;
+ config.n_validators = 500;
+ config.num_blocks = 10;
+ config.peer_bandwidth = 524288000000;
+ config.bandwidth = 524288000000;
+ config.latency = None;
+ config.connectivity = 100;
+ config.generate_pov_sizes();
+ let options = ApprovalsOptions {
+ last_considered_tranche: 89,
+ coalesce_mean: 3.0,
+ coalesce_std_dev: 1.0,
+ coalesce_tranche_diff: 12,
+ enable_assignments_v2: true,
+ stop_when_approved: false,
+ workdir_prefix: "/tmp".to_string(),
+ num_no_shows_per_candidate: 0,
+ };
+
+ println!("Benchmarking...");
+ let usages: Vec = (0..BENCH_COUNT)
+ .map(|n| {
+ print!("\r[{}{}]", "#".repeat(n), "_".repeat(BENCH_COUNT - n));
+ std::io::stdout().flush().unwrap();
+ let (mut env, state) = prepare_test(config.clone(), options.clone(), false);
+ env.runtime().block_on(bench_approvals("approvals_throughput", &mut env, state))
+ })
+ .collect();
+ println!("\rDone!{}", " ".repeat(BENCH_COUNT));
+
+ let average_usage = BenchmarkUsage::average(&usages);
+ save_to_file(
+ "charts/availability-distribution-regression-bench.json",
+ average_usage.to_chart_json().map_err(|e| e.to_string())?,
+ )
+ .map_err(|e| e.to_string())?;
+ println!("{}", average_usage);
+
+ // We expect no variance for received and sent
+ // but use 0.001 because we operate with floats
+ messages.extend(average_usage.check_network_usage(&[
+ ("Received from peers", 52944.7000, 0.001),
+ ("Sent to peers", 63532.2000, 0.001),
+ ]));
+ messages.extend(average_usage.check_cpu_usage(&[
+ ("approval-distribution", 7.7883, 0.1),
+ ("approval-voting", 10.4655, 0.1),
+ ]));
+
+ if messages.is_empty() {
+ Ok(())
+ } else {
+ eprintln!("{}", messages.join("\n"));
+ Err("Regressions found".to_string())
+ }
+}