|
| 1 | +// This file is part of Substrate. |
| 2 | + |
| 3 | +// Copyright (C) Parity Technologies (UK) Ltd. |
| 4 | +// SPDX-License-Identifier: Apache-2.0 |
| 5 | + |
| 6 | +// Licensed under the Apache License, Version 2.0 (the "License"); |
| 7 | +// you may not use this file except in compliance with the License. |
| 8 | +// You may obtain a copy of the License at |
| 9 | +// |
| 10 | +// http://www.apache.org/licenses/LICENSE-2.0 |
| 11 | +// |
| 12 | +// Unless required by applicable law or agreed to in writing, software |
| 13 | +// distributed under the License is distributed on an "AS IS" BASIS, |
| 14 | +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 15 | +// See the License for the specific language governing permissions and |
| 16 | +// limitations under the License. |
| 17 | + |
| 18 | +//! Benchmarking for `pallet-example-basic`. |
| 19 | +
|
| 20 | +// Only enable this module for benchmarking. |
| 21 | +#![cfg(feature = "runtime-benchmarks")] |
| 22 | + |
| 23 | +use crate::*; |
| 24 | +use frame_benchmarking::v2::*; |
| 25 | +use frame_system::RawOrigin; |
| 26 | + |
| 27 | +// To actually run this benchmark on pallet-example-basic, we need to put this pallet into the |
| 28 | +// runtime and compile it with `runtime-benchmarks` feature. The detail procedures are |
| 29 | +// documented at: |
| 30 | +// https://docs.substrate.io/reference/how-to-guides/weights/add-benchmarks/ |
| 31 | +// |
| 32 | +// The auto-generated weight estimate of this pallet is copied over to the `weights.rs` file. |
| 33 | +// The exact command of how the estimate generated is printed at the top of the file. |
| 34 | + |
| 35 | +// Details on using the benchmarks macro can be seen at: |
| 36 | +// https://paritytech.github.io/substrate/master/frame_benchmarking/trait.Benchmarking.html#tymethod.benchmarks |
| 37 | +#[benchmarks] |
| 38 | +mod benchmarks { |
| 39 | + use super::*; |
| 40 | + |
| 41 | + // This will measure the execution time of `set_dummy`. |
| 42 | + #[benchmark] |
| 43 | + fn set_dummy_benchmark() { |
| 44 | + // This is the benchmark setup phase. |
| 45 | + // `set_dummy` is a constant time function, hence we hard-code some random value here. |
| 46 | + let value = 1000u32.into(); |
| 47 | + #[extrinsic_call] |
| 48 | + set_dummy(RawOrigin::Root, value); // The execution phase is just running `set_dummy` extrinsic call |
| 49 | + |
| 50 | + // This is the optional benchmark verification phase, asserting certain states. |
| 51 | + assert_eq!(Pallet::<T>::dummy(), Some(value)) |
| 52 | + } |
| 53 | + |
| 54 | + // An example method that returns a Result that can be called within a benchmark |
| 55 | + fn example_result_method() -> Result<(), BenchmarkError> { |
| 56 | + Ok(()) |
| 57 | + } |
| 58 | + |
| 59 | + // This will measure the execution time of `accumulate_dummy`. |
| 60 | + // The benchmark execution phase is shorthanded. When the name of the benchmark case is the same |
| 61 | + // as the extrinsic call. `_(...)` is used to represent the extrinsic name. |
| 62 | + // The benchmark verification phase is omitted. |
| 63 | + #[benchmark] |
| 64 | + fn accumulate_dummy() -> Result<(), BenchmarkError> { |
| 65 | + let value = 1000u32.into(); |
| 66 | + // The caller account is whitelisted for DB reads/write by the benchmarking macro. |
| 67 | + let caller: T::AccountId = whitelisted_caller(); |
| 68 | + |
| 69 | + // an example of calling something result-based within a benchmark using the ? operator |
| 70 | + // this necessitates specifying the `Result<(), BenchmarkError>` return type |
| 71 | + example_result_method()?; |
| 72 | + |
| 73 | + // You can use `_` if the name of the Call matches the benchmark name. |
| 74 | + #[extrinsic_call] |
| 75 | + _(RawOrigin::Signed(caller), value); |
| 76 | + |
| 77 | + // need this to be compatible with the return type |
| 78 | + Ok(()) |
| 79 | + } |
| 80 | + |
| 81 | + /// You can write helper functions in here since its a normal Rust module. |
| 82 | + fn setup_vector(len: u32) -> Vec<u32> { |
| 83 | + let mut vector = Vec::<u32>::new(); |
| 84 | + for i in (0..len).rev() { |
| 85 | + vector.push(i); |
| 86 | + } |
| 87 | + vector |
| 88 | + } |
| 89 | + |
| 90 | + // This will measure the execution time of sorting a vector. |
| 91 | + // |
| 92 | + // Define `x` as a linear component with range `[0, =10_000]`. This means that the benchmarking |
| 93 | + // will assume that the weight grows at a linear rate depending on `x`. |
| 94 | + #[benchmark] |
| 95 | + fn sort_vector(x: Linear<0, 10_000>) { |
| 96 | + let mut vector = setup_vector(x); |
| 97 | + |
| 98 | + // The benchmark execution phase could also be a closure with custom code: |
| 99 | + #[block] |
| 100 | + { |
| 101 | + vector.sort(); |
| 102 | + } |
| 103 | + |
| 104 | + // Check that it was sorted correctly. This will not be benchmarked and is just for |
| 105 | + // verification. |
| 106 | + vector.windows(2).for_each(|w| assert!(w[0] <= w[1])); |
| 107 | + } |
| 108 | + |
| 109 | + // This line generates test cases for benchmarking, and could be run by: |
| 110 | + // `cargo test -p pallet-example-basic --all-features`, you will see one line per case: |
| 111 | + // `test benchmarking::bench_sort_vector ... ok` |
| 112 | + // `test benchmarking::bench_accumulate_dummy ... ok` |
| 113 | + // `test benchmarking::bench_set_dummy_benchmark ... ok` in the result. |
| 114 | + // |
| 115 | + // The line generates three steps per benchmark, with repeat=1 and the three steps are |
| 116 | + // [low, mid, high] of the range. |
| 117 | + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); |
| 118 | +} |
0 commit comments