diff --git a/Cargo.lock b/Cargo.lock index 25e1bc9a6..a6bbc1811 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1606,7 +1606,7 @@ dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools 0.13.0", "log", "prettyplease", "proc-macro2", @@ -3055,7 +3055,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -3984,7 +3984,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.10", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -4503,7 +4503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -5938,7 +5938,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", "syn 2.0.95", @@ -6632,7 +6632,6 @@ dependencies = [ [[package]] name = "reth-blockchain-tree-api" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -6644,7 +6643,6 @@ dependencies = [ [[package]] name = "reth-chainspec" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-chains", "alloy-eips 0.1.4", @@ -6663,7 +6661,6 @@ dependencies = [ [[package]] name = "reth-codecs" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-consensus 0.1.4", "alloy-eips 0.1.4", @@ -6678,7 +6675,6 @@ dependencies = [ [[package]] name = "reth-codecs-derive" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "convert_case 0.6.0", "proc-macro2", @@ -6689,7 +6685,6 @@ dependencies = [ [[package]] name = "reth-consensus" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "auto_impl", "reth-primitives", @@ -6699,7 +6694,6 @@ dependencies = [ [[package]] name = "reth-consensus-common" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "reth-chainspec", "reth-consensus", @@ -6709,7 +6703,6 @@ dependencies = [ [[package]] name = "reth-db" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "bytes", "derive_more 0.99.18", @@ -6735,7 +6728,6 @@ dependencies = [ [[package]] name = "reth-db-api" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "bytes", "derive_more 0.99.18", @@ -6754,7 +6746,6 @@ dependencies = [ [[package]] name = "reth-discv4" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "alloy-rlp", @@ -6778,7 +6769,6 @@ dependencies = [ [[package]] name = "reth-ecies" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "aes 0.8.4", "alloy-primitives 0.7.7", @@ -6809,7 +6799,6 @@ dependencies = [ [[package]] name = "reth-errors" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "reth-blockchain-tree-api", "reth-consensus", @@ -6822,7 +6811,6 @@ dependencies = [ [[package]] name = "reth-eth-wire" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-rlp", "bytes", @@ -6849,7 +6837,6 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-rlp", "bytes", @@ -6864,7 +6851,6 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "reth-chainspec", "reth-consensus", @@ -6876,7 +6862,6 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-chains", "alloy-primitives 0.7.7", @@ -6889,7 +6874,6 @@ dependencies = [ [[package]] name = "reth-evm" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "auto_impl", "futures-util", @@ -6906,7 +6890,6 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-eips 0.1.4", "alloy-sol-types 0.7.7", @@ -6921,12 +6904,12 @@ dependencies = [ "reth-prune-types", "reth-revm", "revm-primitives", + "tracing", ] [[package]] name = "reth-execution-errors" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-eips 0.1.4", "alloy-primitives 0.7.7", @@ -6940,7 +6923,6 @@ dependencies = [ [[package]] name = "reth-execution-types" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "reth-execution-errors", "reth-primitives", @@ -6951,7 +6933,6 @@ dependencies = [ [[package]] name = "reth-fs-util" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "serde_json", "thiserror 1.0.63", @@ -6960,7 +6941,6 @@ dependencies = [ [[package]] name = "reth-metrics" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "metrics", "reth-metrics-derive", @@ -6969,7 +6949,6 @@ dependencies = [ [[package]] name = "reth-metrics-derive" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "once_cell", "proc-macro2", @@ -6981,7 +6960,6 @@ dependencies = [ [[package]] name = "reth-net-common" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "tokio", @@ -6990,7 +6968,6 @@ dependencies = [ [[package]] name = "reth-net-nat" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "futures-util", "reqwest 0.12.5", @@ -7002,7 +6979,6 @@ dependencies = [ [[package]] name = "reth-network-api" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "enr 0.12.1", @@ -7017,7 +6993,6 @@ dependencies = [ [[package]] name = "reth-network-p2p" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "auto_impl", "futures", @@ -7035,7 +7010,6 @@ dependencies = [ [[package]] name = "reth-network-peers" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "alloy-rlp", @@ -7050,7 +7024,6 @@ dependencies = [ [[package]] name = "reth-nippy-jar" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "anyhow", "bincode", @@ -7070,7 +7043,6 @@ dependencies = [ [[package]] name = "reth-primitives" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-consensus 0.1.4", "alloy-eips 0.1.4", @@ -7104,7 +7076,6 @@ dependencies = [ [[package]] name = "reth-primitives-traits" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-consensus 0.1.4", "alloy-eips 0.1.4", @@ -7123,7 +7094,6 @@ dependencies = [ [[package]] name = "reth-provider" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-rpc-types-engine", "auto_impl", @@ -7161,7 +7131,6 @@ dependencies = [ [[package]] name = "reth-prune-types" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "bytes", @@ -7175,7 +7144,6 @@ dependencies = [ [[package]] name = "reth-revm" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-eips 0.1.4", "alloy-rlp", @@ -7193,7 +7161,6 @@ dependencies = [ [[package]] name = "reth-rpc-types" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "alloy-rpc-types", @@ -7212,7 +7179,6 @@ dependencies = [ [[package]] name = "reth-stages-types" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "bytes", @@ -7225,7 +7191,6 @@ dependencies = [ [[package]] name = "reth-static-file-types" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-primitives 0.7.7", "derive_more 0.99.18", @@ -7236,7 +7201,6 @@ dependencies = [ [[package]] name = "reth-storage-api" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "auto_impl", "reth-chainspec", @@ -7253,7 +7217,6 @@ dependencies = [ [[package]] name = "reth-storage-errors" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "reth-fs-util", "reth-primitives", @@ -7263,7 +7226,6 @@ dependencies = [ [[package]] name = "reth-tracing" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "clap 4.5.9", "eyre", @@ -7278,7 +7240,6 @@ dependencies = [ [[package]] name = "reth-trie" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-rlp", "auto_impl", @@ -7299,7 +7260,6 @@ dependencies = [ [[package]] name = "reth-trie-common" version = "1.0.0-rc.2" -source = "git+https://github.com/taikoxyz/taiko-reth.git?branch=v1.0.0-rc.2-taiko#f124fe7bbf636ec53fbb1c8efb95ff1275bfd711" dependencies = [ "alloy-consensus 0.1.4", "alloy-genesis", @@ -7319,7 +7279,6 @@ dependencies = [ [[package]] name = "revm" version = "9.0.0" -source = "git+https://github.com/taikoxyz/revm.git?branch=v36-taiko#337a98f982e165c5a84cfc25d965344bfd4081bc" dependencies = [ "auto_impl", "cfg-if", @@ -7333,7 +7292,6 @@ dependencies = [ [[package]] name = "revm-interpreter" version = "5.0.0" -source = "git+https://github.com/taikoxyz/revm.git?branch=v36-taiko#337a98f982e165c5a84cfc25d965344bfd4081bc" dependencies = [ "revm-primitives", "serde", @@ -7342,7 +7300,6 @@ dependencies = [ [[package]] name = "revm-precompile" version = "7.0.0" -source = "git+https://github.com/taikoxyz/revm.git?branch=v36-taiko#337a98f982e165c5a84cfc25d965344bfd4081bc" dependencies = [ "aurora-engine-modexp", "blst", @@ -7359,7 +7316,6 @@ dependencies = [ [[package]] name = "revm-primitives" version = "4.0.0" -source = "git+https://github.com/taikoxyz/revm.git?branch=v36-taiko#337a98f982e165c5a84cfc25d965344bfd4081bc" dependencies = [ "alloy-primitives 0.7.7", "auto_impl", @@ -7964,7 +7920,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -9405,7 +9361,7 @@ dependencies = [ "getrandom 0.2.15", "once_cell", "rustix", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 298abcd98..02559a71b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -188,9 +188,21 @@ dotenv = "0.15.0" backoff = "0.4.0" [patch.crates-io] -revm = { git = "https://github.com/taikoxyz/revm.git", branch = "v36-taiko" } -revm-primitives = { git = "https://github.com/taikoxyz/revm.git", branch = "v36-taiko" } -revm-precompile = { git = "https://github.com/taikoxyz/revm.git", branch = "v36-taiko" } +# revm = { git = "https://github.com/taikoxyz/revm.git", branch = "v36-taiko" } +# revm-primitives = { git = "https://github.com/taikoxyz/revm.git", branch = "v36-taiko" } +# revm-precompile = { git = "https://github.com/taikoxyz/revm.git", branch = "v36-taiko" } +revm = {path = "../../taiko/revm/crates/revm"} +revm-primitives = {path = "../../taiko/revm/crates/primitives"} +revm-precompile = {path = "../../taiko/revm/crates/precompile"} secp256k1 = { git = "https://github.com/CeciliaZ030/rust-secp256k1", branch = "sp1-patch" } blst = { git = "https://github.com/CeciliaZ030/blst.git", branch = "v0.3.12-serialize" } alloy-serde = { git = "https://github.com/CeciliaZ030/alloy.git", branch = "v0.1.4-fix" } + +[patch."https://github.com/taikoxyz/taiko-reth.git"] +reth-primitives = { path = "../../taiko/taiko-reth/crates/primitives" } +reth-evm-ethereum = { path = "../../taiko/taiko-reth/crates/ethereum/evm" } +reth-evm = { path = "../../taiko/taiko-reth/crates/evm" } +reth-rpc-types = { path = "../../taiko/taiko-reth/crates/rpc/rpc-types" } +reth-revm = { path = "../../taiko/taiko-reth/crates/revm" } +reth-chainspec = { path = "../../taiko/taiko-reth/crates/chainspec" } +reth-provider = { path = "../../taiko/taiko-reth/crates/storage/provider" } diff --git a/core/src/interfaces.rs b/core/src/interfaces.rs index cd3a66e01..9d19a4cdb 100644 --- a/core/src/interfaces.rs +++ b/core/src/interfaces.rs @@ -3,8 +3,8 @@ use alloy_primitives::{Address, B256}; use clap::Args; use raiko_lib::{ input::{ - AggregationGuestInput, AggregationGuestOutput, BlobProofType, GuestBatchInput, GuestInput, - GuestOutput, + AggregationGuestInput, AggregationGuestOutput, BlobProofType, GuestBatchInput, + GuestBatchOutput, GuestInput, GuestOutput, }, proof_type::ProofType, prover::{IdStore, IdWrite, Proof, ProofKey, Prover, ProverError}, @@ -124,13 +124,12 @@ pub async fn run_prover( pub async fn run_batch_prover( proof_type: ProofType, input: GuestBatchInput, - output: &GuestOutput, + output: &GuestBatchOutput, config: &Value, store: Option<&mut dyn IdWrite>, -) -> RaikoResult> { - let shared_store = store.map(|s| Arc::new(Mutex::new(s))); +) -> RaikoResult { match proof_type { - ProofType::Native => NativeProver::batch_run(input.clone(), output, config, shared_store) + ProofType::Native => NativeProver::batch_run(input.clone(), output, config, store) .await .map_err(>::into), ProofType::Sp1 => { @@ -151,7 +150,7 @@ pub async fn run_batch_prover( } ProofType::Sgx => { #[cfg(feature = "sgx")] - return sgx_prover::SgxProver::run(input.clone(), output, config, store) + return sgx_prover::SgxProver::batch_run(input.clone(), output, config, store) .await .map_err(|e| e.into()); #[cfg(not(feature = "sgx"))] @@ -244,6 +243,8 @@ pub async fn cancel_proof( pub struct ProofRequest { /// The block number for the block to generate a proof for. pub block_number: u64, + /// The block number for the block to generate a proof for. + pub batch_id: u64, /// The l1 block number of the l2 block be proposed. pub l1_inclusion_block_number: u64, /// The l2_l1 block pairs for batch proof generation. @@ -274,6 +275,9 @@ pub struct ProofRequestOpt { /// The block number for the block to generate a proof for. pub block_number: Option, #[arg(long, require_equals = true)] + /// The batch id for the batch of blocks to generate a proof for. + pub batch_id: Option, + #[arg(long, require_equals = true)] /// The block number for the l2 block to be proposed. /// in hekla, it is the anchored l1 block height - 1 /// in ontake, it is the anchored l1 block height - (1..64) @@ -362,6 +366,7 @@ impl TryFrom for ProofRequest { block_number: value.block_number.ok_or(RaikoError::InvalidRequestConfig( "Missing block number".to_string(), ))?, + batch_id: value.batch_id.unwrap_or_default(), l1_inclusion_block_number: value.l1_inclusion_block_number.unwrap_or_default(), network: value.network.ok_or(RaikoError::InvalidRequestConfig( "Missing network".to_string(), @@ -445,6 +450,7 @@ impl From for Vec { .map( |&(block_number, l1_inclusion_block_number)| ProofRequestOpt { block_number: Some(block_number), + batch_id: None, l1_inclusion_block_number, l2_l1_block_pairs: Vec::new(), network: value.network.clone(), @@ -464,6 +470,7 @@ impl From for ProofRequestOpt { fn from(value: AggregationRequest) -> Self { ProofRequestOpt { block_number: None, + batch_id: None, l1_inclusion_block_number: None, l2_l1_block_pairs: value.block_numbers.iter().map(|(id, _)| *id).collect(), network: value.network, diff --git a/core/src/lib.rs b/core/src/lib.rs index c2b8333f1..c6e9772db 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -6,11 +6,12 @@ use interfaces::{cancel_proof, run_batch_prover, run_prover}; use raiko_lib::{ builder::{create_mem_db, RethBlockBuilder}, consts::ChainSpec, - input::{GuestBatchInput, GuestInput, GuestOutput, TaikoProverData}, + input::{GuestBatchInput, GuestBatchOutput, GuestInput, GuestOutput, TaikoProverData}, protocol_instance::ProtocolInstance, prover::{IdStore, IdWrite, Proof, ProofKey}, + utils::{generate_batch_transactions, generate_transactions}, }; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; use serde_json::Value; use tracing::{debug, error, info, warn}; @@ -62,13 +63,14 @@ impl Raiko { fn get_batch_preflight_data(&self) -> BatchPreflightData { BatchPreflightData { + batch_id: self.request.batch_id, block_numbers: self .request .l2_l1_block_pairs .iter() .map(|(b, _)| *b) .collect(), - l1_inclusion_block_numbers: None, // todo: user input + l1_inclusion_block_number: self.request.l1_inclusion_block_number, // todo: user input l1_chain_spec: self.l1_chain_spec.to_owned(), taiko_chain_spec: self.taiko_chain_spec.to_owned(), prover_data: TaikoProverData { @@ -97,7 +99,10 @@ impl Raiko { ) -> RaikoResult { //TODO: read fork from config let preflight_data = self.get_batch_preflight_data(); - info!("Generating input for block {}", self.request.block_number); + info!( + "Generating batch input for block {}", + self.request.block_number + ); batch_preflight(provider, preflight_data) .await .map_err(Into::::into) @@ -106,7 +111,15 @@ impl Raiko { pub fn get_output(&self, input: &GuestInput) -> RaikoResult { let db = create_mem_db(&mut input.clone()).unwrap(); let mut builder = RethBlockBuilder::new(input, db); - builder.execute_transactions(false).expect("execute"); + let pool_tx = generate_transactions( + &input.chain_spec, + &input.taiko.block_proposed, + &input.taiko.tx_data, + &input.taiko.anchor_tx, + ); + builder + .execute_transactions(pool_tx, false) + .expect("execute"); let result = builder.finalize(); match result { @@ -135,22 +148,74 @@ impl Raiko { } } - pub fn get_batch_output(&self, input: &GuestBatchInput) -> RaikoResult { - let outputs = input - .inputs - .iter() - .map(|input| self.get_output(input)) - .collect::>>(); - assert!( - !outputs.iter().any(|output| output.is_err()), - "Error in generating batch output: {:?}", - outputs - ); - let result = - outputs.last().unwrap().as_ref().map_err(|e| { - RaikoError::Preflight(format!("Error in generating batch output: {e}")) - })?; - Ok(result.clone()) + pub fn get_batch_output(&self, batch_input: &GuestBatchInput) -> RaikoResult { + let pool_txs_list = + generate_batch_transactions(&batch_input.taiko.chain_spec, &batch_input.taiko); + let blocks = batch_input.inputs.iter().zip(pool_txs_list).try_fold( + Vec::new(), + |mut acc, input_and_txs| -> RaikoResult> { + let (input, pool_txs) = input_and_txs; + let output = self.single_output_for_batch(pool_txs, input)?; + acc.push(output); + Ok(acc) + }, + )?; + + blocks.windows(2).try_for_each(|window| { + let parent = &window[0]; + let current = &window[1]; + if parent.header.hash_slow() != current.header.parent_hash { + return Err(RaikoError::Guest( + raiko_lib::prover::ProverError::GuestError("Parent hash mismatch".to_string()), + )); + } + Ok(()) + })?; + + Ok(GuestBatchOutput { + blocks: blocks.clone(), + hash: ProtocolInstance::new_batch(batch_input, blocks, self.request.proof_type)? + .instance_hash(), + }) + } + + fn single_output_for_batch( + &self, + origin_pool_txs: Vec, + input: &GuestInput, + ) -> RaikoResult { + let db = create_mem_db(&mut input.clone()).unwrap(); + let mut builder = RethBlockBuilder::new(input, db); + + let mut pool_txs = vec![input.taiko.anchor_tx.clone().unwrap()]; + pool_txs.extend_from_slice(&origin_pool_txs); + + builder + .execute_transactions(pool_txs, false) + .expect("execute"); + let result = builder.finalize_block(); + + match result { + Ok(block) => { + let header = block.header.clone(); + info!("Verifying final state using provider data ..."); + info!( + "Final block hash derived successfully. {}", + header.hash_slow() + ); + debug!("Final block derived successfully. {block:?}"); + // Check if the header is the expected one + check_header(&input.block.header, &header)?; + + Ok(block.clone()) + } + Err(e) => { + warn!("Proving bad block construction!"); + Err(RaikoError::Guest( + raiko_lib::prover::ProverError::GuestError(e.to_string()), + )) + } + } } pub async fn prove( @@ -166,9 +231,9 @@ impl Raiko { pub async fn batch_prove( &self, input: GuestBatchInput, - output: &GuestOutput, + output: &GuestBatchOutput, store: Option<&mut dyn IdWrite>, - ) -> RaikoResult> { + ) -> RaikoResult { let config = serde_json::to_value(&self.request)?; run_batch_prover(self.request.proof_type, input, output, &config, store).await } @@ -274,6 +339,7 @@ pub fn merge(a: &mut Value, b: &Value) { #[cfg(test)] mod tests { use crate::interfaces::aggregate_proofs; + use crate::preflight::parse_l1_batch_proposal_tx_for_pacaya_fork; use crate::{interfaces::ProofRequest, provider::rpc::RpcBlockDataProvider, ChainSpec, Raiko}; use alloy_primitives::Address; use alloy_provider::Provider; @@ -287,7 +353,7 @@ mod tests { }; use serde_json::{json, Value}; use std::{collections::HashMap, env, str::FromStr}; - use tracing::debug; + use tracing::{debug, trace}; fn get_proof_type_from_env() -> ProofType { let proof_type = env::var("TARGET").unwrap_or("native".to_string()); @@ -369,11 +435,18 @@ mod tests { l1_chain_spec: ChainSpec, taiko_chain_spec: ChainSpec, proof_request: ProofRequest, - ) -> Vec { - let batch_parent_block_number = proof_request.l2_l1_block_pairs.first().unwrap().0 - 1; - let provider_target_blocks = (batch_parent_block_number - ..=proof_request.l2_l1_block_pairs.last().unwrap().0) - .collect(); + ) -> Proof { + let all_prove_blocks = parse_l1_batch_proposal_tx_for_pacaya_fork( + &l1_chain_spec, + &taiko_chain_spec, + proof_request.l1_inclusion_block_number, + proof_request.batch_id, + ) + .await + .expect("Could not parse L1 batch proposal tx"); + // provider target blocks are all blocks in the batch and the parent block of block[0] + let provider_target_blocks = + (all_prove_blocks[0] - 1..=*all_prove_blocks.last().unwrap()).collect(); let provider = RpcBlockDataProvider::new_batch(&taiko_chain_spec.rpc, provider_target_blocks) .await @@ -383,7 +456,7 @@ mod tests { .generate_batch_input(provider) .await .expect("input generation failed"); - debug!("batch guest input: {input:?}"); + trace!("batch guest input: {input:?}"); let output = raiko .get_batch_output(&input) .expect("output generation failed"); @@ -396,13 +469,11 @@ mod tests { #[ignore] #[tokio::test(flavor = "multi_thread")] - async fn test_prove_block_taiko_dev() { + async fn test_prove_batch_block_taiko_dev() { + env_logger::init(); let proof_type = get_proof_type_from_env(); let l1_network = "taiko_dev_l1".to_owned(); let network = "taiko_dev".to_owned(); - // Give the CI an simpler block to test because it doesn't have enough memory. - // Unfortunately that also means that kzg is not getting fully verified by CI. - let block_number = 20; let chain_specs = SupportedChainSpecs::merge_from_file( "../host/config/chain_spec_list_devnet.json".into(), ) @@ -411,9 +482,10 @@ mod tests { let l1_chain_spec = chain_specs.get_chain_spec(&l1_network).unwrap(); let proof_request = ProofRequest { - block_number, - l1_inclusion_block_number: 80, - l2_l1_block_pairs: Vec::new(), + block_number: 0, + batch_id: 911, + l1_inclusion_block_number: 1836, + l2_l1_block_pairs: vec![(911, None)], network, graffiti: B256::ZERO, prover: Address::ZERO, @@ -422,7 +494,7 @@ mod tests { blob_proof_type: BlobProofType::ProofOfEquivalence, prover_args: test_proof_params(false), }; - prove_block(l1_chain_spec, taiko_chain_spec, proof_request).await; + batch_prove_block(l1_chain_spec, taiko_chain_spec, proof_request).await; } #[tokio::test(flavor = "multi_thread")] @@ -449,8 +521,9 @@ mod tests { .unwrap(); let proof_request = ProofRequest { - block_number, - l1_inclusion_block_number: 0, + block_number: 0, + batch_id: 1, + l1_inclusion_block_number: 1000, l2_l1_block_pairs: vec![(block_number, None)], network, graffiti: B256::ZERO, @@ -481,8 +554,9 @@ mod tests { let proof_request = ProofRequest { block_number, + batch_id: 0, l1_inclusion_block_number: 0, - l2_l1_block_pairs: vec![(block_number, None)], + l2_l1_block_pairs: vec![], network, graffiti: B256::ZERO, prover: Address::ZERO, @@ -521,6 +595,7 @@ mod tests { ); let proof_request = ProofRequest { block_number, + batch_id: 0, l1_inclusion_block_number: 0, l2_l1_block_pairs: Vec::new(), network, @@ -555,6 +630,7 @@ mod tests { ); let proof_request = ProofRequest { block_number, + batch_id: 0, l1_inclusion_block_number: 0, l2_l1_block_pairs: Vec::new(), network, @@ -586,6 +662,7 @@ mod tests { let proof_request = ProofRequest { block_number, + batch_id: 0, l1_inclusion_block_number: 0, l2_l1_block_pairs: Vec::new(), network, diff --git a/core/src/preflight/mod.rs b/core/src/preflight/mod.rs index 5a9c6fe67..dceef2e3f 100644 --- a/core/src/preflight/mod.rs +++ b/core/src/preflight/mod.rs @@ -1,14 +1,17 @@ use std::collections::HashSet; use alloy_primitives::Bytes; +use anyhow::bail; // use alloy_rpc_types::Block; use raiko_lib::{ builder::RethBlockBuilder, consts::ChainSpec, input::{BlobProofType, GuestBatchInput, GuestInput, TaikoGuestInput, TaikoProverData}, primitives::mpt::proofs_to_tries, + utils::{generate_batch_transactions, generate_transactions}, Measurement, }; +use reth_primitives::TransactionSigned; use crate::{ interfaces::{RaikoError, RaikoResult}, @@ -21,6 +24,8 @@ use util::{ prepare_taiko_chain_batch_input, prepare_taiko_chain_input, }; +pub use util::parse_l1_batch_proposal_tx_for_pacaya_fork; + mod util; pub struct PreflightData { @@ -33,10 +38,9 @@ pub struct PreflightData { } pub struct BatchPreflightData { + pub batch_id: u64, pub block_numbers: Vec, - // in real batch, we will have only 1 inclusion block number - // here use vec for back compatbiility - pub l1_inclusion_block_numbers: Option>, + pub l1_inclusion_block_number: u64, pub l1_chain_spec: ChainSpec, pub taiko_chain_spec: ChainSpec, pub prover_data: TaikoProverData, @@ -117,8 +121,15 @@ pub async fn preflight( // Now re-execute the transactions in the block to collect all required data let mut builder = RethBlockBuilder::new(&input, provider_db); + let pool_tx = generate_transactions( + &input.chain_spec, + &input.taiko.block_proposed, + &input.taiko.tx_data, + &input.taiko.anchor_tx, + ); + // Optimize data gathering by executing the transactions multiple times so data can be requested in batches - execute_txs(&mut builder).await?; + execute_txs(&mut builder, pool_tx).await?; let Some(db) = builder.db.as_mut() else { return Err(RaikoError::Preflight("No db in builder".to_owned())); @@ -172,12 +183,13 @@ pub async fn preflight( pub async fn batch_preflight( provider: BDP, BatchPreflightData { + batch_id, block_numbers, l1_chain_spec, taiko_chain_spec, prover_data, blob_proof_type, - l1_inclusion_block_numbers, + l1_inclusion_block_number, }: BatchPreflightData, ) -> RaikoResult { let measurement = Measurement::start("Fetching block data...", false); @@ -193,34 +205,35 @@ pub async fn batch_preflight( .iter() .map(|(block, _)| block.clone()) .collect::>(); - let taiko_guest_inputs = if taiko_chain_spec.is_taiko() { + let taiko_guest_batch_input = if taiko_chain_spec.is_taiko() { prepare_taiko_chain_batch_input( &l1_chain_spec, &taiko_chain_spec, - l2_l1_block_pairs, + l1_inclusion_block_number, + batch_id, &all_prove_blocks, prover_data, &blob_proof_type, ) .await? } else { - // For Ethereum blocks we just convert the block transactions in a tx_list - // so that we don't have to supports separate paths. - all_prove_blocks - .iter() - .map(|block| { - TaikoGuestInput::try_from(block.body.clone()) - .map_err(|e| RaikoError::Conversion(e.0)) - }) - .collect::>>()? + return Err(RaikoError::Preflight( + "Batch preflight is only used for Taiko chains".to_owned(), + )); }; measurement.stop(); - info!("taiko_guest_inputs.len(): {:?}", taiko_guest_inputs.len()); info!("block_parent_pairs.len(): {:?}", block_parent_pairs.len()); + + /// distrubute txs to each block + let pool_txs_list: Vec> = + generate_batch_transactions(&l1_chain_spec, &taiko_guest_batch_input); + + assert_eq!(block_parent_pairs.len(), pool_txs_list.len()); + let mut batch_guest_input = Vec::new(); - for ((prove_block, parent_block), taiko_input) in - block_parent_pairs.iter().zip(taiko_guest_inputs.iter()) + for ((prove_block, parent_block), pure_pool_txs) in + block_parent_pairs.iter().zip(pool_txs_list.iter()) { let parent_header: reth_primitives::Header = parent_block.header.clone().try_into().map_err(|e| { @@ -228,6 +241,18 @@ pub async fn batch_preflight( })?; let parent_block_number = parent_header.number; + let anchor_tx = prove_block.body.first().unwrap().clone(); + let taiko_input = TaikoGuestInput { + l1_header: taiko_guest_batch_input.l1_header.clone(), + tx_data: Vec::new(), + anchor_tx: Some(anchor_tx.clone()), + block_proposed: taiko_guest_batch_input.batch_proposed.clone(), + prover_data: taiko_guest_batch_input.prover_data.clone(), + blob_commitment: None, + blob_proof: None, + blob_proof_type: taiko_guest_batch_input.blob_proof_type.clone(), + }; + // Create the guest input let input = GuestInput { block: prove_block.clone(), @@ -245,7 +270,9 @@ pub async fn batch_preflight( let mut builder = RethBlockBuilder::new(&input, provider_db); // Optimize data gathering by executing the transactions multiple times so data can be requested in batches - execute_txs(&mut builder).await?; + let mut pool_txs = vec![anchor_tx.clone()]; + pool_txs.extend_from_slice(pure_pool_txs); + execute_txs(&mut builder, pool_txs).await?; let Some(db) = builder.db.as_mut() else { return Err(RaikoError::Preflight("No db in builder".to_owned())); @@ -300,6 +327,7 @@ pub async fn batch_preflight( Ok(GuestBatchInput { inputs: batch_guest_input, + taiko: taiko_guest_batch_input, }) } diff --git a/core/src/preflight/util.rs b/core/src/preflight/util.rs index 418c98e05..2d5655ad6 100644 --- a/core/src/preflight/util.rs +++ b/core/src/preflight/util.rs @@ -15,13 +15,14 @@ use raiko_lib::{ inplace_print, input::{ ontake::{BlockProposedV2, CalldataTxList}, - proposeBlockCall, BlobProofType, BlockProposed, BlockProposedFork, TaikoGuestInput, - TaikoProverData, + pacaya::BatchProposed, + proposeBlockCall, BlobProofType, BlockProposed, BlockProposedFork, TaikoGuestBatchInput, + TaikoGuestInput, TaikoProverData, }, primitives::eip4844::{self, commitment_to_version_hash, KZG_SETTINGS}, }; -use reth_evm_ethereum::taiko::{decode_anchor, decode_anchor_ontake}; -use reth_primitives::Block as RethBlock; +use reth_evm_ethereum::taiko::{decode_anchor, decode_anchor_ontake, decode_anchor_pacaya}; +use reth_primitives::{Block as RethBlock, TransactionSigned}; use reth_revm::primitives::SpecId; use serde::{Deserialize, Serialize}; use std::iter; @@ -36,6 +37,7 @@ use crate::{ /// Optimize data gathering by executing the transactions multiple times so data can be requested in batches pub async fn execute_txs<'a, BDP>( builder: &mut RethBlockBuilder>, + pool_txs: Vec, ) -> RaikoResult<()> where BDP: BlockDataProvider, @@ -50,7 +52,7 @@ where db.optimistic = num_iterations + 1 < max_iterations; builder - .execute_transactions(num_iterations + 1 < max_iterations) + .execute_transactions(pool_txs.clone(), num_iterations + 1 < max_iterations) .map_err(|e| { RaikoError::Preflight(format!("Executing transactions in builder failed: {e}")) })?; @@ -87,6 +89,13 @@ pub async fn prepare_taiko_chain_input( // get anchor block num and state root let fork = taiko_chain_spec.active_fork(block.number, block.timestamp)?; let (anchor_block_height, anchor_state_root) = match fork { + SpecId::PACAYA => { + warn!("pacaya fork does not support prepare_taiko_chain_input for single block"); + return Err(RaikoError::Preflight( + "pacaya fork does not support prepare_taiko_chain_input for single block" + .to_owned(), + )); + } SpecId::ONTAKE => { let anchor_call = decode_anchor_ontake(anchor_tx.input())?; (anchor_call._anchorBlockId, anchor_call._anchorStateRoot) @@ -156,7 +165,7 @@ pub async fn prepare_taiko_chain_input( ), )?; - get_tx_data( + get_tx_blob( expected_blob_hash, l1_inclusion_header.timestamp, l1_chain_spec, @@ -165,6 +174,13 @@ pub async fn prepare_taiko_chain_input( .await? } else { match fork { + SpecId::PACAYA => { + warn!("pacaya fork does not support prepare_taiko_chain_input for single block"); + return Err(RaikoError::Preflight( + "pacaya fork does not support prepare_taiko_chain_input for single block" + .to_owned(), + )); + } SpecId::ONTAKE => { // Get the tx list data directly from the propose block CalldataTxList event let (_, CalldataTxList { txList, .. }) = get_calldata_txlist_event( @@ -200,43 +216,161 @@ pub async fn prepare_taiko_chain_input( }) } +// get fork corresponding anchor block height and state root +fn get_anchor_tx_info_by_fork( + fork: SpecId, + anchor_tx: &TransactionSigned, +) -> RaikoResult<(u64, B256)> { + match fork { + SpecId::PACAYA => { + let anchor_call = decode_anchor_pacaya(anchor_tx.input())?; + Ok((anchor_call._anchorBlockId, anchor_call._anchorStateRoot)) + } + SpecId::ONTAKE => { + let anchor_call = decode_anchor_ontake(anchor_tx.input())?; + Ok((anchor_call._anchorBlockId, anchor_call._anchorStateRoot)) + } + _ => { + let anchor_call = decode_anchor(anchor_tx.input())?; + Ok((anchor_call.l1BlockId, anchor_call.l1StateRoot)) + } + } +} + +/// a problem here is that we need to know the fork of the batch proposal tx +/// but in batch mode, there is no block number in proof request +/// so we hard code the fork to pacaya here. +/// return the block numbers of the batch, i.e. [start(lastBlockId - len() + 1), end(lastBlockId)] +pub async fn parse_l1_batch_proposal_tx_for_pacaya_fork( + l1_chain_spec: &ChainSpec, + taiko_chain_spec: &ChainSpec, + l1_inclusion_block_number: u64, + batch_id: u64, +) -> RaikoResult> { + let provider_l1 = RpcBlockDataProvider::new(&l1_chain_spec.rpc, 0).await?; + let (l1_inclusion_height, _tx, batch_proposed_fork) = get_block_proposed_event_by_height( + provider_l1.provider(), + taiko_chain_spec.clone(), + l1_inclusion_block_number, + batch_id, + SpecId::PACAYA, + ) + .await?; + + assert!( + l1_inclusion_block_number == l1_inclusion_height, + "proposal tx inclusive block != proof_request block" + ); + if let BlockProposedFork::Pacaya(batch_proposed) = batch_proposed_fork { + let batch_info = &batch_proposed.info; + Ok( + ((batch_info.lastBlockId - (batch_info.blocks.len() as u64 - 1)) + ..=batch_info.lastBlockId) + .collect(), + ) + } else { + Err(RaikoError::Preflight( + "BatchProposedFork is not Pacaya".to_owned(), + )) + } +} + /// Prepare the input for a Taiko chain pub async fn prepare_taiko_chain_batch_input( l1_chain_spec: &ChainSpec, taiko_chain_spec: &ChainSpec, - l2_l1_block_pairs: Vec<(u64, Option)>, - blocks: &[RethBlock], + l1_inclusion_block_number: u64, + batch_id: u64, + batch_blocks: &[RethBlock], prover_data: TaikoProverData, blob_proof_type: &BlobProofType, -) -> RaikoResult> { - let mut batch_inputs = Vec::with_capacity(l2_l1_block_pairs.len()); - for (l2_block_number, l1_inclusion_block_number) in l2_l1_block_pairs { - let block = blocks - .iter() - .find(|block| block.number == l2_block_number) - .ok_or_else(|| { - RaikoError::Preflight("No block for requested block number".to_owned()) - })?; +) -> RaikoResult { + // Get the L1 block in which the L2 block was included so we can fetch the DA data. + // Also get the L1 state block header so that we can prove the L1 state root. + // Decode the anchor tx to find out which L1 blocks we need to fetch + let batch_anchor_tx_info = batch_blocks.iter().try_fold(Vec::new(), |mut acc, block| { + let anchor_tx = block + .body + .first() + .ok_or_else(|| RaikoError::Preflight("No anchor tx in the block".to_owned()))?; + let fork = taiko_chain_spec.active_fork(block.number, block.timestamp)?; + ensure!(fork == SpecId::PACAYA, "Only pacaya fork supports batch"); + let anchor_info = get_anchor_tx_info_by_fork(fork, anchor_tx)?; + acc.push(anchor_info); + Ok(acc) + })?; - let input = prepare_taiko_chain_input( - l1_chain_spec, - taiko_chain_spec, - l2_block_number, + assert!( + batch_anchor_tx_info.windows(2).all(|w| w[0] == w[1]), + "batch anchor tx info mismatch" + ); + + let (anchor_block_height, anchor_state_root) = batch_anchor_tx_info[0]; + let fork = taiko_chain_spec.active_fork(batch_blocks[0].number, batch_blocks[0].timestamp)?; + let provider_l1 = RpcBlockDataProvider::new(&l1_chain_spec.rpc, 0).await?; + // todo: duplicate code with parse_l1_batch_proposal_tx_for_pacaya_fork(), better to make these values fn parameters + let (l1_inclusion_height, batch_proposal_tx, batch_proposed_fork) = + get_block_proposed_event_by_height( + provider_l1.provider(), + taiko_chain_spec.clone(), l1_inclusion_block_number, - block, - prover_data.clone(), + batch_id, + fork, + ) + .await?; + assert_eq!(l1_inclusion_block_number, l1_inclusion_height); + let (l1_inclusion_header, l1_state_header) = get_headers( + &provider_l1, + (l1_inclusion_block_number, anchor_block_height), + ) + .await?; + assert_eq!(anchor_state_root, l1_state_header.state_root); + + if let BlockProposedFork::Pacaya(batch_proposed) = batch_proposed_fork { + let batch_info = &batch_proposed.info; + let batch_meta = &batch_proposed.meta; + let blob_hashes = batch_info.blobHashes.clone(); + let blob_byte_offset = batch_info.blobByteOffset as usize; + let blob_byte_size = batch_info.blobByteSize as usize; + let blob_tx_buffers = get_batch_tx_data_with_proofs( + blob_hashes, + l1_inclusion_header.timestamp, + l1_chain_spec, blob_proof_type, ) .await?; - info!( - "Prepared batch input for block number: {l2_block_number:?}, L1 inclusion block number: {l1_inclusion_block_number:?}" - ); - batch_inputs.push(input); + + // todo: extract tx from tx calldata + let tx_data_from_calldata = Vec::new(); + return Ok(TaikoGuestBatchInput { + batch_id: batch_id, + batch_proposed: BlockProposedFork::Pacaya(batch_proposed), + l1_header: l1_state_header.try_into().unwrap(), + chain_spec: taiko_chain_spec.clone(), + prover_data: prover_data, + tx_data_from_blob: blob_tx_buffers + .iter() + .map(|(tx, _, _)| tx.clone()) + .collect(), + tx_data_from_calldata, + blob_commitments: blob_tx_buffers + .iter() + .map(|(_, commmit, _)| commmit.clone()) + .collect(), + blob_proofs: blob_tx_buffers + .iter() + .map(|(_, _, proof)| proof.clone()) + .collect(), + blob_proof_type: blob_proof_type.clone(), + }); + } else { + Err(RaikoError::Preflight( + "BatchProposedFork is not Pacaya".to_owned(), + )) } - Ok(batch_inputs) } -pub async fn get_tx_data( +pub async fn get_tx_blob( blob_hash: B256, timestamp: u64, chain_spec: &ChainSpec, @@ -276,6 +410,22 @@ pub async fn get_tx_data( Ok((blob, Some(commitment.to_vec()), blob_proof)) } +/// get tx data(blob data) vec from blob hashs +/// and get proofs for each blobs +pub async fn get_batch_tx_data_with_proofs( + blob_hashs: Vec, + timestamp: u64, + chain_spec: &ChainSpec, + blob_proof_type: &BlobProofType, +) -> RaikoResult, Option>, Option>)>> { + let mut tx_data = Vec::new(); + for hash in blob_hashs { + let data = get_tx_blob(hash, timestamp, chain_spec, blob_proof_type).await?; + tx_data.push(data); + } + Ok(tx_data) +} + pub async fn filter_blockchain_event( provider: &ReqwestProvider, gen_block_event_filter: impl Fn() -> Filter, @@ -343,7 +493,7 @@ pub async fn filter_block_proposed_event( provider: &ReqwestProvider, chain_spec: ChainSpec, filter_condition: EventFilterConditioin, - l2_block_number: u64, + block_num_or_batch_id: u64, fork: SpecId, ) -> Result<(u64, AlloyRpcTransaction, BlockProposedFork)> { // Get the address that emitted the event @@ -353,6 +503,7 @@ pub async fn filter_block_proposed_event( // Get the event signature (value can differ between chains) let event_signature = match fork { + SpecId::PACAYA => BatchProposed::SIGNATURE_HASH, SpecId::ONTAKE => BlockProposedV2::SIGNATURE_HASH, _ => BlockProposed::SIGNATURE_HASH, }; @@ -385,7 +536,15 @@ pub async fn filter_block_proposed_event( ) else { bail!("Could not create log") }; - let (block_id, block_propose_event) = match fork { + let (block_or_batch_id, block_propose_event) = match fork { + SpecId::PACAYA => { + let event = BatchProposed::decode_log(&log_struct, false) + .map_err(|_| RaikoError::Anyhow(anyhow!("Could not decode log")))?; + ( + raiko_lib::primitives::U256::from(event.meta.batchId), + BlockProposedFork::Pacaya(event.data), + ) + } SpecId::ONTAKE => { let event = BlockProposedV2::decode_log(&log_struct, false) .map_err(|_| RaikoError::Anyhow(anyhow!("Could not decode log")))?; @@ -398,7 +557,7 @@ pub async fn filter_block_proposed_event( } }; - if block_id == raiko_lib::primitives::U256::from(l2_block_number) { + if block_or_batch_id == raiko_lib::primitives::U256::from(block_num_or_batch_id) { let Some(log_tx_hash) = log.transaction_hash else { bail!("No transaction hash in the log") }; @@ -412,7 +571,7 @@ pub async fn filter_block_proposed_event( } Err(anyhow!( - "No BlockProposed event found for block {l2_block_number}" + "No BlockProposed event found for block {block_num_or_batch_id}" )) } @@ -437,14 +596,14 @@ pub async fn get_block_proposed_event_by_height( provider: &ReqwestProvider, chain_spec: ChainSpec, l1_inclusion_block_number: u64, - l2_block_number: u64, + block_num_or_batch_id: u64, fork: SpecId, ) -> Result<(u64, AlloyRpcTransaction, BlockProposedFork)> { filter_block_proposed_event( provider, chain_spec, EventFilterConditioin::Height(l1_inclusion_block_number), - l2_block_number, + block_num_or_batch_id, fork, ) .await diff --git a/core/src/prover.rs b/core/src/prover.rs index aa1ca8242..a5cf4a075 100644 --- a/core/src/prover.rs +++ b/core/src/prover.rs @@ -3,11 +3,12 @@ use std::sync::{Arc, Mutex}; use std::{ops::Deref, path::Path}; use raiko_lib::{ - input::{GuestInput, GuestOutput}, + input::{GuestBatchInput, GuestBatchOutput, GuestInput, GuestOutput}, proof_type::ProofType, protocol_instance::ProtocolInstance, prover::{IdStore, IdWrite, Proof, ProofKey, Prover, ProverConfig, ProverError, ProverResult}, }; +use reth_primitives::B256; use serde::{de::Error, Deserialize, Serialize}; use serde_with::serde_as; use tracing::trace; @@ -69,24 +70,49 @@ impl Prover for NativeProver { } async fn batch_run( - input: raiko_lib::input::GuestBatchInput, - output: &GuestOutput, + batch_input: GuestBatchInput, + batch_output: &GuestBatchOutput, config: &ProverConfig, - store: Option>>, - ) -> ProverResult> { - let mut batch_proofs = Vec::new(); - for input in input.inputs { - if store.is_some() { - let mut guard = store.as_ref().unwrap().lock().unwrap(); - let proof = - Self::run(input, output, config, Some(guard.deref_mut().deref_mut())).await?; - batch_proofs.push(proof); - } else { - let proof = Self::run(input, output, config, None).await?; - batch_proofs.push(proof); + _store: Option<&mut dyn IdWrite>, + ) -> ProverResult { + let param = + config + .get("native") + .map(NativeParam::deserialize) + .ok_or(ProverError::Param(serde_json::Error::custom( + "native param not provided", + )))??; + + if let Some(path) = param.json_guest_input { + let path = Path::new(&path); + if let Some(parent) = path.parent() { + std::fs::create_dir_all(parent)?; } + let json = serde_json::to_string(&batch_input)?; + std::fs::write(path, json)?; + } + + trace!("Running the native prover for input {batch_input:?}"); + + let pi = ProtocolInstance::new_batch( + &batch_input, + batch_output.blocks.clone(), + ProofType::Native, + ) + .map_err(|e| ProverError::GuestError(e.to_string()))?; + if pi.instance_hash() != batch_output.hash { + return Err(ProverError::GuestError( + "Protocol Instance hash not matched".to_string(), + )); } - Ok(batch_proofs) + + Ok(Proof { + input: Some(batch_output.hash), + proof: None, + quote: None, + uuid: None, + kzg_proof: None, + }) } async fn cancel(_proof_key: ProofKey, _read: Box<&mut dyn IdStore>) -> ProverResult<()> { diff --git a/core/src/provider/rpc.rs b/core/src/provider/rpc.rs index 855a6a6e1..1a3424e57 100644 --- a/core/src/provider/rpc.rs +++ b/core/src/provider/rpc.rs @@ -3,7 +3,6 @@ use alloy_provider::{ProviderBuilder, ReqwestProvider, RootProvider}; use alloy_rpc_client::{ClientBuilder, RpcClient}; use alloy_rpc_types::{Block, BlockId, BlockNumberOrTag, EIP1186AccountProofResponse}; use alloy_transport_http::Http; -use ethers_core::k256::sha2::digest::block_buffer; use raiko_lib::clear_line; use reqwest_alloy::Client; use reth_primitives::revm_primitives::{AccountInfo, Bytecode}; diff --git a/host/config/chain_spec_list_devnet.json b/host/config/chain_spec_list_devnet.json index f5daf3628..49da0139f 100644 --- a/host/config/chain_spec_list_devnet.json +++ b/host/config/chain_spec_list_devnet.json @@ -31,20 +31,23 @@ "RISC0": null } }, - "genesis_time": 1728872400, + "genesis_time": 1738916500, "seconds_per_slot": 12, "is_taiko": false }, { "name": "taiko_dev", "chain_id": 167001, - "max_spec_id": "ONTAKE", + "max_spec_id": "PACAYA", "hard_forks": { "HEKLA": { "Block": 0 }, "ONTAKE": { - "Block": 500 + "Block": 0 + }, + "PACAYA": { + "Block": 0 }, "CANCUN": "TBD" }, @@ -54,7 +57,7 @@ "base_fee_max_decrease_denominator": "0x8", "elasticity_multiplier": "0x2" }, - "l1_contract": "0xA4702E22F8807Df82Fe5B6dDdd99eB3Fcb0237B0", + "l1_contract": "0x8096ba9b2ba1bf0C34777309a58eF88926A009a8", "l2_contract": "0x1670010000000000000000000000000000010001", "rpc": "https://rpc.internal.taiko.xyz", "beacon_rpc": null, @@ -68,10 +71,15 @@ "SGX": "0xebB0DA61818F639f460F67940EB269b36d1F104E", "SP1": "0x748d4a7e3a49adEbA2157B2d581434A6Cc226D1F", "RISC0": "0xDf8038e9f4535040D7421A89ead398b3A38366EC" + }, + "PACAYA": { + "SGX": "0xebB0DA61818F639f460F67940EB269b36d1F104E", + "SP1": "0x748d4a7e3a49adEbA2157B2d581434A6Cc226D1F", + "RISC0": "0xDf8038e9f4535040D7421A89ead398b3A38366EC" } }, "genesis_time": 0, "seconds_per_slot": 1, "is_taiko": true } -] +] \ No newline at end of file diff --git a/host/src/proof.rs b/host/src/proof.rs index c111fe0f2..76d97e14b 100644 --- a/host/src/proof.rs +++ b/host/src/proof.rs @@ -503,7 +503,6 @@ pub async fn handle_proof( // Execute the proof generation. let total_time = Measurement::start("", false); - let batch_mode = proof_request.l2_l1_block_pairs.is_some(); let raiko = Raiko::new( l1_chain_spec.clone(), taiko_chain_spec.clone(), @@ -596,6 +595,7 @@ mod tests { // Add some pending tasks actor.pending_tasks.lock().await.push_back(ProofRequest { block_number: 1, + batch_id: 0, l1_inclusion_block_number: 1, l2_l1_block_pairs: None, network: "test".to_string(), @@ -670,6 +670,7 @@ mod tests { { actor.pending_tasks.lock().await.push_back(ProofRequest { block_number: 1, + batch_id: 0, l1_inclusion_block_number: 1, l2_l1_block_pairs: None, network: "test".to_string(), diff --git a/host/src/server/api/v3/proof/mod.rs b/host/src/server/api/v3/proof/mod.rs index a47861cae..ff8246ef4 100644 --- a/host/src/server/api/v3/proof/mod.rs +++ b/host/src/server/api/v3/proof/mod.rs @@ -47,8 +47,6 @@ async fn proof_handler( // options with the request from the client. aggregation_request.merge(&prover_state.request_config())?; - let mut tasks = Vec::with_capacity(aggregation_request.block_numbers.len()); - let proof_request_opt: ProofRequestOpt = aggregation_request.clone().into(); // Construct the actual proof request from the available configs. @@ -58,7 +56,7 @@ async fn proof_handler( let (chain_id, blockhash) = get_batch_task_data( &proof_request.network, - proof_request.block_numbers.last().unwrap().0, + &vec![proof_request.batch_id], &prover_state.chain_specs, ) .await?; diff --git a/lib/src/builder.rs b/lib/src/builder.rs index 0148533dd..4e8700d2c 100644 --- a/lib/src/builder.rs +++ b/lib/src/builder.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use crate::primitives::keccak::keccak; use crate::primitives::mpt::StateAccount; -use crate::utils::generate_transactions; +use crate::utils::{generate_batch_transactions, generate_transactions}; use crate::{ consts::{ChainSpec, MAX_BLOCK_HASH_AGE}, guest_mem_forget, - input::GuestInput, + input::{GuestBatchInput, GuestInput}, mem_db::{AccountState, DbAccount, MemDb}, CycleTracker, }; @@ -24,7 +24,9 @@ use reth_primitives::revm_primitives::db::{Database, DatabaseCommit}; use reth_primitives::revm_primitives::{ Account, AccountInfo, AccountStatus, Bytecode, Bytes, HashMap, SpecId, }; -use reth_primitives::{Address, BlockWithSenders, Header, B256, KECCAK_EMPTY, U256}; +use reth_primitives::{ + Address, Block, BlockWithSenders, Header, TransactionSigned, B256, KECCAK_EMPTY, U256, +}; use tracing::{debug, error}; pub fn calculate_block_header(input: &GuestInput) -> Header { @@ -33,9 +35,17 @@ pub fn calculate_block_header(input: &GuestInput) -> Header { cycle_tracker.end(); let mut builder = RethBlockBuilder::new(input, db); + let pool_tx = generate_transactions( + &input.chain_spec, + &input.taiko.block_proposed, + &input.taiko.tx_data, + &input.taiko.anchor_tx, + ); let cycle_tracker = CycleTracker::start("execute_transactions"); - builder.execute_transactions(false).expect("execute"); + builder + .execute_transactions(pool_tx, false) + .expect("execute"); cycle_tracker.end(); let cycle_tracker = CycleTracker::start("finalize"); @@ -45,22 +55,24 @@ pub fn calculate_block_header(input: &GuestInput) -> Header { header } -pub fn calculate_batch_blocks_final_header(input: &GuestInput) -> Header { - let cycle_tracker = CycleTracker::start("initialize_database"); - let db = create_mem_db(&mut input.clone()).unwrap(); - cycle_tracker.end(); - - let mut builder = RethBlockBuilder::new(input, db); - - let cycle_tracker = CycleTracker::start("execute_transactions"); - builder.execute_transactions(false).expect("execute"); - cycle_tracker.end(); - - let cycle_tracker = CycleTracker::start("finalize"); - let header = builder.finalize().expect("execute"); - cycle_tracker.end(); - - header +pub fn calculate_batch_blocks_final_header(input: &GuestBatchInput) -> Vec { + let pool_txs_list = generate_batch_transactions(&input.taiko.chain_spec, &input.taiko); + let mut final_blocks = Vec::new(); + for (i, pool_txs) in pool_txs_list.iter().enumerate() { + let mut builder = RethBlockBuilder::new( + &input.inputs[i], + create_mem_db(&mut input.inputs[i].clone()).unwrap(), + ); + builder + .execute_transactions(pool_txs.clone(), false) + .expect("execute"); + final_blocks.push( + builder + .finalize_block() + .expect("execute single batched block"), + ); + } + final_blocks } /// Optimistic database @@ -93,7 +105,11 @@ impl + DatabaseCommit + OptimisticDatabase> } /// Executes all input transactions. - pub fn execute_transactions(&mut self, optimistic: bool) -> Result<()> { + pub fn execute_transactions( + &mut self, + pool_txs: Vec, + optimistic: bool, + ) -> Result<()> { // Get the chain spec let chain_spec = &self.input.chain_spec; let total_difficulty = U256::ZERO; @@ -130,7 +146,7 @@ impl + DatabaseCommit + OptimisticDatabase> reth_chain_spec .fork(Hardfork::Hekla) .active_at_block(block_num), - "evm fork is not active, please update the chain spec" + "evm fork HEKLA is not active, please update the chain spec" ); } SpecId::ONTAKE => { @@ -138,7 +154,15 @@ impl + DatabaseCommit + OptimisticDatabase> reth_chain_spec .fork(Hardfork::Ontake) .active_at_block(block_num), - "evm fork is not active, please update the chain spec" + "evm fork ONTAKE is not active, please update the chain spec" + ); + } + SpecId::PACAYA => { + assert!( + reth_chain_spec + .fork(Hardfork::Pacaya) + .active_at_block(block_num), + "evm fork PACAYA is not active, please update the chain spec" ); } _ => unimplemented!(), @@ -147,12 +171,7 @@ impl + DatabaseCommit + OptimisticDatabase> // Generate the transactions from the tx list let mut block = self.input.block.clone(); - block.body = generate_transactions( - &self.input.chain_spec, - &self.input.taiko.block_proposed, - &self.input.taiko.tx_data, - &self.input.taiko.anchor_tx, - ); + block.body = pool_txs; // Recover senders let mut block = block .with_recovered_senders() @@ -249,6 +268,13 @@ impl RethBlockBuilder { Ok(self.input.block.header.clone()) } + /// Finalizes the block building and returns the header + pub fn finalize_block(&mut self) -> Result { + let state_root = self.calculate_state_root()?; + ensure!(self.input.block.state_root == state_root); + Ok(self.input.block.clone()) + } + /// Calculates the state root of the block pub fn calculate_state_root(&mut self) -> Result { let mut account_touched = 0; diff --git a/lib/src/input.rs b/lib/src/input.rs index dd7b57643..2db426f50 100644 --- a/lib/src/input.rs +++ b/lib/src/input.rs @@ -2,6 +2,7 @@ use core::{fmt::Debug, str::FromStr}; use anyhow::{anyhow, Error, Result}; use ontake::BlockProposedV2; +use pacaya::{BatchInfo, BatchProposed}; use reth_evm_ethereum::taiko::ProtocolBaseFeeConfig; use reth_primitives::{ revm_primitives::{Address, Bytes, HashMap, B256, U256}, @@ -43,11 +44,28 @@ pub struct GuestInput { pub taiko: TaikoGuestInput, } +/// External block input. +#[serde_as] +#[derive(Debug, Clone, Default, Deserialize, Serialize)] +pub struct TaikoGuestBatchInput { + pub batch_id: u64, + pub l1_header: Header, + pub batch_proposed: BlockProposedFork, + pub chain_spec: ChainSpec, + pub prover_data: TaikoProverData, + pub tx_data_from_blob: Vec>, + pub tx_data_from_calldata: Vec, + pub blob_commitments: Option>>, + pub blob_proofs: Option>>, + pub blob_proof_type: BlobProofType, +} + /// External block input. #[serde_as] #[derive(Debug, Clone, Default, Deserialize, Serialize)] pub struct GuestBatchInput { pub inputs: Vec, + pub taiko: TaikoGuestBatchInput, } /// External aggregation input. @@ -93,6 +111,7 @@ pub enum BlockProposedFork { Nothing, Hekla(BlockProposed), Ontake(BlockProposedV2), + Pacaya(BatchProposed), } impl BlockProposedFork { @@ -100,6 +119,7 @@ impl BlockProposedFork { match self { BlockProposedFork::Hekla(block) => block.meta.blobUsed, BlockProposedFork::Ontake(block) => block.meta.blobUsed, + BlockProposedFork::Pacaya(batch) => batch.info.blobHashes.len() > 0, _ => false, } } @@ -108,6 +128,7 @@ impl BlockProposedFork { match self { BlockProposedFork::Hekla(block) => block.meta.id, BlockProposedFork::Ontake(block) => block.meta.id, + BlockProposedFork::Pacaya(_batch) => 0, _ => 0, } } @@ -116,6 +137,7 @@ impl BlockProposedFork { match self { BlockProposedFork::Hekla(block) => block.meta.timestamp, BlockProposedFork::Ontake(block) => block.meta.timestamp, + BlockProposedFork::Pacaya(_batch) => 0, _ => 0, } } @@ -129,6 +151,13 @@ impl BlockProposedFork { min_gas_excess: block.meta.baseFeeConfig.minGasExcess, max_gas_issuance_per_block: block.meta.baseFeeConfig.maxGasIssuancePerBlock, }, + BlockProposedFork::Pacaya(batch) => ProtocolBaseFeeConfig { + adjustment_quotient: batch.info.baseFeeConfig.adjustmentQuotient, + sharing_pctg: batch.info.baseFeeConfig.sharingPctg, + gas_issuance_per_second: batch.info.baseFeeConfig.gasIssuancePerSecond, + min_gas_excess: batch.info.baseFeeConfig.minGasExcess, + max_gas_issuance_per_block: batch.info.baseFeeConfig.maxGasIssuancePerBlock, + }, _ => ProtocolBaseFeeConfig::default(), } } @@ -139,6 +168,10 @@ impl BlockProposedFork { block.meta.blobTxListOffset as usize, block.meta.blobTxListLength as usize, )), + BlockProposedFork::Pacaya(batch) => Some(( + batch.info.blobByteOffset as usize, + batch.info.blobByteSize as usize, + )), _ => None, } } @@ -147,9 +180,25 @@ impl BlockProposedFork { match self { BlockProposedFork::Hekla(block) => block.meta.blobHash, BlockProposedFork::Ontake(block) => block.meta.blobHash, + // meaningless for pakaya + BlockProposedFork::Pacaya(batch) => B256::default(), _ => B256::default(), } } + + pub fn blob_hashes(&self) -> &[B256] { + match self { + BlockProposedFork::Pacaya(batch) => &batch.info.blobHashes, + _ => &[], + } + } + + pub fn batch_info(&self) -> Option<&BatchInfo> { + match self { + BlockProposedFork::Pacaya(batch) => Some(&batch.info), + _ => None, + } + } } #[serde_as] @@ -223,6 +272,13 @@ pub struct GuestOutput { pub hash: B256, } +#[serde_as] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct GuestBatchOutput { + pub blocks: Vec, + pub hash: B256, +} + #[cfg(feature = "std")] use std::path::Path; #[cfg(feature = "std")] @@ -235,5 +291,6 @@ pub fn get_input_path(dir: &Path, block_number: u64, network: &str) -> PathBuf { mod hekla; pub mod ontake; +pub mod pacaya; pub use hekla::*; diff --git a/lib/src/input/pacaya.rs b/lib/src/input/pacaya.rs new file mode 100644 index 000000000..d752849ab --- /dev/null +++ b/lib/src/input/pacaya.rs @@ -0,0 +1,128 @@ +use alloy_sol_types::sol; +use core::fmt::Debug; +use serde::{Deserialize, Serialize}; + +sol! { + #[derive(Debug, Default, Deserialize, Serialize)] + struct BaseFeeConfig { + uint8 adjustmentQuotient; + uint8 sharingPctg; + uint32 gasIssuancePerSecond; + uint64 minGasExcess; + uint32 maxGasIssuancePerBlock; + } + + #[derive(Debug, Default, Deserialize, Serialize)] + struct BlockParams { + // the max number of transactions in this block. Note that if there are not enough + // transactions in calldata or blobs, the block will contains as many transactions as + // possible. + uint16 numTransactions; + // For the first block in a batch, the block timestamp is the batch params' `timestamp` + // plus this time shift value; + // For all other blocks in the same batch, the block timestamp is its parent block's + // timestamp plus this time shift value. + uint8 timeShift; + } + + #[derive(Debug, Default, Deserialize, Serialize)] + struct BlobParams { + // The hashes of the blob. Note that if this array is not empty. `firstBlobIndex` and + // `numBlobs` must be 0. + bytes32[] blobHashes; + // The index of the first blob in this batch. + uint8 firstBlobIndex; + // The number of blobs in this batch. Blobs are initially concatenated and subsequently + // decompressed via Zlib. + uint8 numBlobs; + // The byte offset of the blob in the batch. + uint32 byteOffset; + // The byte size of the blob. + uint32 byteSize; + } + + #[derive(Debug, Default, Deserialize, Serialize)] + struct BatchParams { + address proposer; + address coinbase; + bytes32 parentMetaHash; + uint64 anchorBlockId; + bytes32 anchorInput; + uint64 lastBlockTimestamp; + bool revertIfNotFirstProposal; + bytes32[] signalSlots; + // Specifies the number of blocks to be generated from this batch. + BlobParams blobParams; + BlockParams[] blocks; + } + + #[derive(Debug, Default, Deserialize, Serialize)] + /// @dev This struct holds batch information essential for constructing blocks offchain, but it + /// does not include data necessary for batch proving. + struct BatchInfo { + bytes32 txsHash; + // Data to build L2 blocks + BlockParams[] blocks; + bytes32[] blobHashes; + bytes32 extraData; + address coinbase; + uint64 proposedIn; // Used by node/client + uint32 blobByteOffset; + uint32 blobByteSize; + uint32 gasLimit; + uint64 lastBlockId; + uint64 lastBlockTimestamp; + // Data for the L2 anchor transaction, shared by all blocks in the batch + uint64 anchorBlockId; + // corresponds to the `_anchorStateRoot` parameter in the anchor transaction. + // The batch's validity proof shall verify the integrity of these two values. + bytes32 anchorBlockHash; + bytes32 anchorInput; + BaseFeeConfig baseFeeConfig; + bytes32[] signalSlots; + } + + #[derive(Debug, Default, Deserialize, Serialize)] + /// @dev This struct holds batch metadata essential for proving the batch. + struct BatchMetadata { + bytes32 infoHash; + address proposer; + uint64 batchId; + uint64 proposedAt; // Used by node/client + } + + /// @notice Struct representing transition to be proven. + struct Transition { + bytes32 parentHash; + bytes32 blockHash; + bytes32 stateRoot; + } + + /// @notice Emitted when a batch is proposed. + /// @param info The info of the proposed batch. + /// @param meta The metadata of the proposed batch. + /// @param txList The tx list in calldata. + #[derive(Debug, Default, Deserialize, Serialize)] + event BatchProposed(BatchInfo info, BatchMetadata meta, bytes txList); + + #[derive(Debug)] + /// @notice Proposes a batch of blocks. + /// @param _params ABI-encoded BlockParams. + /// @param _txList The transaction list in calldata. If the txList is empty, blob will be used + /// for data availability. + /// @return info_ The info of the proposed batch. + /// @return meta_ The metadata of the proposed batch. + function proposeBatch( + bytes calldata _params, + bytes calldata _txList + ) + external + returns (BatchInfo memory info_, BatchMetadata memory meta_); + + /// @notice Proves state transitions for multiple batches with a single aggregated proof. + /// @param _params ABI-encoded parameter containing: + /// - metas: Array of metadata for each batch being proved. + /// - transitions: Array of batch transitions to be proved. + /// @param _proof The aggregated cryptographic proof proving the batches transitions. + function proveBatches(bytes calldata _params, bytes calldata _proof) external; +} diff --git a/lib/src/protocol_instance.rs b/lib/src/protocol_instance.rs index 051fa04b6..3b274a09d 100644 --- a/lib/src/protocol_instance.rs +++ b/lib/src/protocol_instance.rs @@ -1,7 +1,7 @@ use alloy_primitives::{Address, TxHash, B256}; use alloy_sol_types::SolValue; use anyhow::{ensure, Error, Result}; -use reth_primitives::Header; +use reth_primitives::{Block, Header}; #[cfg(not(feature = "std"))] use crate::no_std::*; @@ -9,6 +9,7 @@ use crate::{ consts::SupportedChainSpecs, input::{ ontake::{BlockMetadataV2, BlockProposedV2}, + pacaya::{BatchInfo, BatchMetadata, BlockParams}, BlobProofType, BlockMetadata, BlockProposed, BlockProposedFork, EthDeposit, GuestBatchInput, GuestInput, Transition, }, @@ -27,6 +28,7 @@ pub enum BlockMetaDataFork { None, Hekla(BlockMetadata), Ontake(BlockMetadataV2), + Pacaya(BatchMetadata), } impl From<(&GuestInput, &Header, B256, &BlockProposed)> for BlockMetadata { @@ -113,6 +115,117 @@ impl BlockMetaDataFork { BlockProposedFork::Ontake(block_proposed_v2) => { Self::Ontake((input, header, tx_list_hash, block_proposed_v2).into()) } + BlockProposedFork::Pacaya(_batch_proposed) => { + unimplemented!("single block signature is not supported for pacaya fork") + } + } + } + + fn calculate_pacaya_txs_hash(tx_list_hash: B256, blob_hashes: &Vec) -> B256 { + debug!( + "calculate_pacaya_txs_hash from tx_list_hash: {:?}, blob_hashes: {:?}", + tx_list_hash, blob_hashes + ); + + let abi_encode_data: Vec = ( + tx_list_hash, + blob_hashes.iter().map(|hash| hash).collect::>(), + ) + .abi_encode() + .split_off(32); + debug!("abi_encode_data: {:?}", hex::encode(&abi_encode_data)); + keccak(abi_encode_data).into() + } + + fn from_batch_inputs(batch_input: &GuestBatchInput, final_blocks: Vec) -> Self { + match &batch_input.taiko.batch_proposed { + BlockProposedFork::Pacaya(batch_proposed) => { + // todo: review the calculation 1 by 1 to make sure all of them are rooted from a trustable source + let txs_hash = Self::calculate_pacaya_txs_hash( + keccak(batch_input.taiko.tx_data_from_calldata.as_slice()).into(), + &batch_proposed.info.blobHashes, + ); + assert_eq!( + txs_hash, batch_proposed.info.txsHash, + "txs hash mismatch, expected: {:?}, got: {:?}", + txs_hash, batch_proposed.info.txsHash, + ); + let blocks = final_blocks + .iter() + .map(|block| BlockParams { + numTransactions: block.body.len() as u16 - 1, // exclude anchor tx + timeShift: (block.timestamp - batch_proposed.meta.proposedAt) as u8, + }) + .collect::>(); + assert!( + blocks + .iter() + .zip(batch_proposed.info.blocks.iter()) + .all(|(a, b)| a.numTransactions == b.numTransactions + && a.timeShift == b.timeShift), + "blocks mismatch, expected: {:?}, got: {:?}", + blocks, + batch_proposed.info.blocks, + ); + let blob_hashes = batch_proposed.info.blobHashes.clone(); + let extra_data = batch_proposed.info.extraData; + let coinbase = batch_proposed.info.coinbase; + let proposed_in = batch_proposed.info.proposedIn; + let blob_byte_offset = batch_proposed.info.blobByteOffset; + let blob_byte_size = batch_proposed.info.blobByteSize; + let gas_limit = batch_proposed.info.gasLimit; + let last_block_id = final_blocks.last().unwrap().header.number; + assert!( + last_block_id == batch_proposed.info.lastBlockId, + "last block id mismatch, expected: {:?}, got: {:?}", + last_block_id, + batch_proposed.info.lastBlockId, + ); + let last_block_timestamp = final_blocks.last().unwrap().header.timestamp; + assert!( + last_block_timestamp == batch_proposed.info.lastBlockTimestamp, + "last block timestamp mismatch, expected: {:?}, got: {:?}", + last_block_timestamp, + batch_proposed.info.lastBlockTimestamp, + ); + // checked in anchor_check() + let anchor_block_id = batch_proposed.info.anchorBlockId; + let anchor_block_hash = batch_proposed.info.anchorBlockHash; + let anchor_input = batch_proposed.info.anchorInput; + let base_fee_config = batch_proposed.info.baseFeeConfig.clone(); + let signal_slots = batch_proposed.info.signalSlots.clone(); + BlockMetaDataFork::Pacaya(BatchMetadata { + // todo: keccak data based on input + infoHash: keccak( + BatchInfo { + txsHash: txs_hash, + blocks, + blobHashes: blob_hashes, + extraData: extra_data, + coinbase, + proposedIn: proposed_in, + blobByteOffset: blob_byte_offset, + blobByteSize: blob_byte_size, + gasLimit: gas_limit, + lastBlockId: last_block_id, + lastBlockTimestamp: last_block_timestamp, + anchorBlockId: anchor_block_id, + anchorBlockHash: anchor_block_hash, + anchorInput: anchor_input, + baseFeeConfig: base_fee_config, + signalSlots: signal_slots, + } + .abi_encode(), + ) + .into(), + proposer: batch_proposed.meta.proposer, + batchId: batch_input.taiko.batch_id, + proposedAt: batch_proposed.meta.proposedAt, + }) + } + _ => { + unimplemented!("batch blocks signature is not supported before pacaya fork") + } } } @@ -122,6 +235,9 @@ impl BlockMetaDataFork { (Self::Ontake(a), BlockProposedFork::Ontake(b)) => { a.abi_encode() == b.meta.abi_encode() } + (Self::Pacaya(a), BlockProposedFork::Pacaya(b)) => { + a.abi_encode() == b.meta.abi_encode() + } (Self::None, BlockProposedFork::Nothing) => true, _ => false, } @@ -138,6 +254,83 @@ pub struct ProtocolInstance { pub verifier_address: Address, } +fn verify_blob( + blob_proof_type: BlobProofType, + blob_data: &[u8], + versioned_hash: B256, + commitment: &[u8; 48], + blob_proof: Option>, +) -> Result<()> { + info!("blob proof type: {:?}", &blob_proof_type); + match blob_proof_type { + crate::input::BlobProofType::ProofOfEquivalence => { + let ct = CycleTracker::start("proof_of_equivalence"); + let (x, y) = eip4844::proof_of_equivalence(&blob_data, &versioned_hash)?; + ct.end(); + let verified = eip4844::verify_kzg_proof_impl( + commitment.clone().try_into().unwrap(), + x, + y, + blob_proof + .map(|p| TryInto::<[u8; 48]>::try_into(p).unwrap()) + .unwrap(), + )?; + ensure!(verified); + } + BlobProofType::KzgVersionedHash => { + let ct = CycleTracker::start("proof_of_commitment"); + ensure!(commitment == &eip4844::calc_kzg_proof_commitment(blob_data)?); + ct.end(); + } + }; + Ok(()) +} + +/// Verify the blob usage in batch mode, i.e., check if raw blob commitment == input blob commitment +/// then the blob version hash is calculated from the blob data, and eventually get connected to the +/// on-chain blob hash. +fn verify_batch_mode_blob_usage( + batch_input: &GuestBatchInput, + proof_type: ProofType, +) -> Result<()> { + let blob_proof_type = + get_blob_proof_type(proof_type, batch_input.taiko.blob_proof_type.clone()); + + for blob_verify_param in batch_input + .taiko + .tx_data_from_blob + .iter() + .zip( + batch_input + .taiko + .blob_commitments + .clone() + .unwrap_or_default() + .iter(), + ) + .zip( + batch_input + .taiko + .blob_proofs + .clone() + .unwrap_or_default() + .iter(), + ) + { + let blob_data = blob_verify_param.0 .0; + let commitment = blob_verify_param.0 .1; + let versioned_hash = commitment_to_version_hash(&commitment.clone().try_into().unwrap()); + verify_blob( + blob_proof_type.clone(), + blob_data, + versioned_hash.clone(), + &commitment.clone().try_into().unwrap(), + Some(blob_verify_param.1.clone()), + )?; + } + Ok(()) +} + impl ProtocolInstance { pub fn new(input: &GuestInput, header: &Header, proof_type: ProofType) -> Result { let blob_used = input.taiko.block_proposed.blob_used(); @@ -154,36 +347,13 @@ impl ProtocolInstance { let versioned_hash = commitment_to_version_hash(&commitment.clone().try_into().unwrap()); - let blob_proof_type = - get_blob_proof_type(proof_type, input.taiko.blob_proof_type.clone()); - info!("blob proof type: {:?}", &blob_proof_type); - match blob_proof_type { - crate::input::BlobProofType::ProofOfEquivalence => { - let ct = CycleTracker::start("proof_of_equivalence"); - let (x, y) = - eip4844::proof_of_equivalence(&input.taiko.tx_data, &versioned_hash)?; - ct.end(); - let verified = eip4844::verify_kzg_proof_impl( - commitment.clone().try_into().unwrap(), - x, - y, - input - .taiko - .blob_proof - .clone() - .map(|p| TryInto::<[u8; 48]>::try_into(p).unwrap()) - .unwrap(), - )?; - ensure!(verified); - } - BlobProofType::KzgVersionedHash => { - let ct = CycleTracker::start("proof_of_commitment"); - ensure!( - commitment == &eip4844::calc_kzg_proof_commitment(&input.taiko.tx_data)? - ); - ct.end(); - } - }; + verify_blob( + get_blob_proof_type(proof_type, input.taiko.blob_proof_type.clone()), + &input.taiko.tx_data, + versioned_hash.clone(), + &commitment.clone().try_into().unwrap(), + input.taiko.blob_proof.clone(), + )?; versioned_hash } else { TxHash::from(keccak(input.taiko.tx_data.as_slice())) @@ -261,75 +431,65 @@ impl ProtocolInstance { pub fn new_batch( batch_input: &GuestBatchInput, - header: &Header, + blocks: Vec, proof_type: ProofType, ) -> Result { - let tx_list_hashs = batch_input.inputs.iter().try_fold( - Vec::::new(), - |mut tx_list_hashs, guest_input| { - if guest_input.taiko.block_proposed.blob_used() { - let tx_list_hash = - Self::get_verifiable_blob_tx_list_hash(guest_input, proof_type)?; - tx_list_hashs.push(tx_list_hash); - } else { - tx_list_hashs.push(TxHash::from(keccak(guest_input.taiko.tx_data.as_slice()))); - } - Ok::, Error>(tx_list_hashs) - }, - )?; - - let input = &batch_input.inputs[0]; - - // If the passed in chain spec contains a known chain id, the chain spec NEEDS to match the - // one we expect, because the prover could otherwise just fill in any values. - // The chain id is used because that is the value that is put onchain, - // and so all other chain data needs to be derived from it. - // For unknown chain ids we just skip this check so that tests using test data can still pass. - // TODO: we should probably split things up in critical and non-critical parts - // in the chain spec itself so we don't have to manually all the ones we have to care about. - if let Some(verified_chain_spec) = - SupportedChainSpecs::default().get_chain_spec_with_chain_id(input.chain_spec.chain_id) - { - ensure!( - input.chain_spec.max_spec_id == verified_chain_spec.max_spec_id, - "unexpected max_spec_id" - ); - ensure!( - input.chain_spec.hard_forks == verified_chain_spec.hard_forks, - "unexpected hard_forks" - ); - ensure!( - input.chain_spec.eip_1559_constants == verified_chain_spec.eip_1559_constants, - "unexpected eip_1559_constants" - ); - ensure!( - input.chain_spec.l1_contract == verified_chain_spec.l1_contract, - "unexpected l1_contract" - ); - ensure!( - input.chain_spec.l2_contract == verified_chain_spec.l2_contract, - "unexpected l2_contract" - ); - ensure!( - input.chain_spec.is_taiko == verified_chain_spec.is_taiko, - "unexpected eip_1559_constants" - ); + // verify blob usage, either by commitment or proof equality. + verify_batch_mode_blob_usage(batch_input, proof_type)?; + + for input in &batch_input.inputs { + // If the passed in chain spec contains a known chain id, the chain spec NEEDS to match the + // one we expect, because the prover could otherwise just fill in any values. + // The chain id is used because that is the value that is put onchain, + // and so all other chain data needs to be derived from it. + // For unknown chain ids we just skip this check so that tests using test data can still pass. + // TODO: we should probably split things up in critical and non-critical parts + // in the chain spec itself so we don't have to manually all the ones we have to care about. + if let Some(verified_chain_spec) = SupportedChainSpecs::default() + .get_chain_spec_with_chain_id(input.chain_spec.chain_id) + { + ensure!( + input.chain_spec.max_spec_id == verified_chain_spec.max_spec_id, + "unexpected max_spec_id" + ); + ensure!( + input.chain_spec.hard_forks == verified_chain_spec.hard_forks, + "unexpected hard_forks" + ); + ensure!( + input.chain_spec.eip_1559_constants == verified_chain_spec.eip_1559_constants, + "unexpected eip_1559_constants" + ); + ensure!( + input.chain_spec.l1_contract == verified_chain_spec.l1_contract, + "unexpected l1_contract" + ); + ensure!( + input.chain_spec.l2_contract == verified_chain_spec.l2_contract, + "unexpected l2_contract" + ); + ensure!( + input.chain_spec.is_taiko == verified_chain_spec.is_taiko, + "unexpected eip_1559_constants" + ); + } } + // todo: move chain_spec into the batch input + let input = &batch_input.inputs[0]; let verifier_address = input .chain_spec .get_fork_verifier_address(input.taiko.block_proposed.block_number(), proof_type) .unwrap_or_default(); - let tx_list_hash = tx_list_hashs[0]; let pi = ProtocolInstance { transition: Transition { - parentHash: header.parent_hash, - blockHash: header.hash_slow(), - stateRoot: header.state_root, + parentHash: blocks.first().unwrap().header.parent_hash, + blockHash: blocks.last().unwrap().header.hash_slow(), + stateRoot: blocks.last().unwrap().header.state_root, graffiti: input.taiko.prover_data.graffiti, }, - block_metadata: BlockMetaDataFork::from(input, header, tx_list_hash), + block_metadata: BlockMetaDataFork::from_batch_inputs(batch_input, blocks), sgx_instance: Address::default(), prover: input.taiko.prover_data.prover, chain_id: input.chain_spec.chain_id, @@ -340,9 +500,9 @@ impl ProtocolInstance { if input.chain_spec.is_taiko() { ensure!( pi.block_metadata - .match_block_proposal(&input.taiko.block_proposed), + .match_block_proposal(&batch_input.taiko.batch_proposed), format!( - "block hash mismatch, expected: {:?}, got: {:?}", + "batch block hash mismatch, expected: {:?}, got: {:?}", input.taiko.block_proposed, pi.block_metadata ) ); @@ -351,46 +511,6 @@ impl ProtocolInstance { Ok(pi) } - // if blob is used, tx_list_hash is the commitment to the blob. - // either we verify the blob openning (if in zk) here by using proof_of_equivalence, - // or we just calculate the blob commitment and eventually it will be verified on-chain. - fn get_verifiable_blob_tx_list_hash(input: &GuestInput, proof_type: ProofType) -> Result { - let commitment = input - .taiko - .blob_commitment - .as_ref() - .expect("no blob commitment"); - let versioned_hash = commitment_to_version_hash(&commitment.clone().try_into().unwrap()); - - let blob_proof_type = get_blob_proof_type(proof_type, input.taiko.blob_proof_type.clone()); - info!("blob proof type: {:?}", &blob_proof_type); - match blob_proof_type { - crate::input::BlobProofType::ProofOfEquivalence => { - let ct = CycleTracker::start("proof_of_equivalence"); - let (x, y) = eip4844::proof_of_equivalence(&input.taiko.tx_data, &versioned_hash)?; - ct.end(); - let verified = eip4844::verify_kzg_proof_impl( - commitment.clone().try_into().unwrap(), - x, - y, - input - .taiko - .blob_proof - .clone() - .map(|p| TryInto::<[u8; 48]>::try_into(p).unwrap()) - .unwrap(), - )?; - ensure!(verified); - } - BlobProofType::KzgVersionedHash => { - let ct = CycleTracker::start("proof_of_commitment"); - ensure!(commitment == &eip4844::calc_kzg_proof_commitment(&input.taiko.tx_data)?); - ct.end(); - } - }; - Ok(versioned_hash) - } - pub fn sgx_instance(mut self, instance: Address) -> Self { self.sgx_instance = instance; self @@ -401,6 +521,7 @@ impl ProtocolInstance { BlockMetaDataFork::None => keccak(vec![]).into(), BlockMetaDataFork::Hekla(ref meta) => keccak(meta.abi_encode()).into(), BlockMetaDataFork::Ontake(ref meta) => keccak(meta.abi_encode()).into(), + BlockMetaDataFork::Pacaya(ref meta) => keccak(meta.abi_encode()).into(), } } diff --git a/lib/src/prover.rs b/lib/src/prover.rs index a7681019d..e0416fc61 100644 --- a/lib/src/prover.rs +++ b/lib/src/prover.rs @@ -5,7 +5,8 @@ use std::sync::Mutex; use utoipa::ToSchema; use crate::input::{ - AggregationGuestInput, AggregationGuestOutput, GuestBatchInput, GuestInput, GuestOutput, + AggregationGuestInput, AggregationGuestOutput, GuestBatchInput, GuestBatchOutput, GuestInput, + GuestOutput, }; #[derive(thiserror::Error, Debug)] @@ -68,10 +69,10 @@ pub trait Prover { async fn batch_run( input: GuestBatchInput, - output: &GuestOutput, + output: &GuestBatchOutput, config: &ProverConfig, - store: Option>>, - ) -> ProverResult>; + store: Option<&mut dyn IdWrite>, + ) -> ProverResult; async fn aggregate( input: AggregationGuestInput, diff --git a/lib/src/utils.rs b/lib/src/utils.rs index 3adefa77b..0850721b6 100644 --- a/lib/src/utils.rs +++ b/lib/src/utils.rs @@ -7,7 +7,7 @@ use reth_primitives::TransactionSigned; use tracing::{debug, error, warn}; use crate::consts::{ChainSpec, Network}; -use crate::input::BlockProposedFork; +use crate::input::{BlockProposedFork, TaikoGuestBatchInput}; #[cfg(not(feature = "std"))] use crate::no_std::*; @@ -96,6 +96,55 @@ pub fn generate_transactions( transactions } +// distribute txs to each block by its tx_nums +fn distribute_txs(data: &[T], sizes: &[usize]) -> Vec> { + let mut positions = Vec::with_capacity(sizes.len() + 1); + positions.push(0); + + let mut pos = 0; + for &size in sizes { + pos += size; + positions.push(pos); + } + + positions + .windows(2) + .map(|w| data[w[0]..w[1]].to_vec()) + .collect() +} + +/// concat blob & decode a whole txlist, then +/// each block will get a portion of the txlist by its tx_nums +pub fn generate_batch_transactions( + chain_spec: &ChainSpec, + taiko_guest_batch_input: &TaikoGuestBatchInput, +) -> Vec> { + assert!( + matches!( + taiko_guest_batch_input.batch_proposed, + BlockProposedFork::Pacaya(_) + ), + "only pacaya batch supported" + ); + let batch_proposal = &taiko_guest_batch_input.batch_proposed; + let blob_data_bufs = taiko_guest_batch_input.tx_data_from_blob.clone(); + let compressed_tx_list_buf = blob_data_bufs + .iter() + .map(|blob_data_buf| decode_blob_data(blob_data_buf)) + .collect::>>() + .concat(); + let tx_list_buf = zlib_decompress_data(&compressed_tx_list_buf).unwrap_or_default(); + let txs = decode_transactions(&tx_list_buf); + let tx_num_sizes = batch_proposal + .batch_info() + .unwrap() + .blocks + .iter() + .map(|b| b.numTransactions as usize) + .collect::>(); + distribute_txs(&txs, &tx_num_sizes) +} + const BLOB_FIELD_ELEMENT_NUM: usize = 4096; const BLOB_FIELD_ELEMENT_BYTES: usize = 32; const BLOB_DATA_CAPACITY: usize = BLOB_FIELD_ELEMENT_NUM * BLOB_FIELD_ELEMENT_BYTES; diff --git a/provers/sgx/guest/src/app_args.rs b/provers/sgx/guest/src/app_args.rs index 10f8ca18e..a29ca4719 100644 --- a/provers/sgx/guest/src/app_args.rs +++ b/provers/sgx/guest/src/app_args.rs @@ -17,6 +17,8 @@ pub struct App { pub enum Command { /// Prove (i.e. sign) a single block and exit. OneShot(OneShotArgs), + /// Prove (i.e. sign) a single block and exit. + OneBatchShot(OneShotArgs), /// Aggregate proofs Aggregate(OneShotArgs), /// Bootstrap the application and then exit. The bootstrapping process generates the diff --git a/provers/sgx/guest/src/main.rs b/provers/sgx/guest/src/main.rs index c7af5db30..09c3b412e 100644 --- a/provers/sgx/guest/src/main.rs +++ b/provers/sgx/guest/src/main.rs @@ -23,6 +23,10 @@ pub async fn main() -> Result<()> { println!("Starting one shot mode"); one_shot(args.global_opts, one_shot_args).await? } + Command::OneBatchShot(one_shot_args) => { + println!("Starting one shot mode"); + one_shot(args.global_opts, one_shot_args).await? + } Command::Aggregate(one_shot_args) => { println!("Starting one shot mode"); aggregate(args.global_opts, one_shot_args).await? diff --git a/provers/sgx/guest/src/one_shot.rs b/provers/sgx/guest/src/one_shot.rs index e3d9c0e20..bb9068863 100644 --- a/provers/sgx/guest/src/one_shot.rs +++ b/provers/sgx/guest/src/one_shot.rs @@ -8,13 +8,13 @@ use std::{ use anyhow::{anyhow, bail, Context, Error, Result}; use base64_serde::base64_serde_type; use raiko_lib::{ - builder::calculate_block_header, - input::{GuestInput, RawAggregationGuestInput}, + builder::{calculate_batch_blocks_final_header, calculate_block_header}, + input::{GuestBatchInput, GuestInput, RawAggregationGuestInput}, primitives::{keccak, Address, B256}, proof_type::ProofType, protocol_instance::{aggregation_output_combine, ProtocolInstance}, }; -use secp256k1::{Keypair, SecretKey}; +use secp256k1::{Keypair, PublicKey, SecretKey}; use serde::Serialize; base64_serde_type!(Base64Standard, base64::engine::general_purpose::STANDARD); @@ -172,6 +172,68 @@ pub async fn one_shot(global_opts: GlobalOpts, args: OneShotArgs) -> Result<()> print_sgx_info() } +pub fn load_bootstrap_privkey(secrets_dir: &Path) -> Result<(SecretKey, PublicKey, Address)> { + // Make sure this SGX instance was bootstrapped + let prev_privkey = load_bootstrap(secrets_dir) + .or_else(|_| bail!("Application was not bootstrapped or has a deprecated bootstrap.")) + .unwrap(); + + let new_pubkey = public_key(&prev_privkey); + let new_instance = public_key_to_address(&new_pubkey); + Ok((prev_privkey, new_pubkey, new_instance)) +} + +pub async fn one_shot_batch(global_opts: GlobalOpts, args: OneShotArgs) -> Result<()> { + println!("Global options: {global_opts:?}, OneShot options: {args:?}"); + let (prev_privkey, new_pubkey, new_instance) = + load_bootstrap_privkey(&global_opts.secrets_dir)?; + let batch_input: GuestBatchInput = + bincode::deserialize_from(std::io::stdin()).expect("unable to deserialize batch input"); + + // Process the block + let final_blocks = calculate_batch_blocks_final_header(&batch_input); + // Calculate the public input hash + let pi = ProtocolInstance::new_batch(&batch_input, final_blocks, ProofType::Sgx)? + .sgx_instance(new_instance); + let pi_hash = pi.instance_hash(); + + println!( + "Block batch {} for blocks[{}..={}]. PI data to be signed: {pi_hash}", + batch_input.taiko.batch_id, + batch_input.inputs.first().unwrap().block.number, + batch_input.inputs.last().unwrap().block.number + ); + + // Sign the public input hash which contains all required block inputs and outputs + let sig = sign_message(&prev_privkey, pi_hash)?; + + // Create the proof for the onchain SGX verifier + // 4(id) + 20(new) + 65(sig) = 89 + const SGX_PROOF_LEN: usize = 89; + let mut proof = Vec::with_capacity(SGX_PROOF_LEN); + proof.extend(args.sgx_instance_id.to_be_bytes()); + proof.extend(new_instance); + proof.extend(sig); + let proof = hex::encode(proof); + + // Store the public key address in the attestation data + save_attestation_user_report_data(new_instance)?; + + // Print out the proof and updated public info + let quote = get_sgx_quote()?; + let data = serde_json::json!({ + "proof": format!("0x{proof}"), + "quote": hex::encode(quote), + "public_key": format!("0x{new_pubkey}"), + "instance_address": new_instance.to_string(), + "input": pi_hash.to_string(), + }); + println!("{data}"); + + // Print out general SGX information + print_sgx_info() +} + pub async fn aggregate(global_opts: GlobalOpts, args: OneShotArgs) -> Result<()> { // Make sure this SGX instance was bootstrapped let prev_privkey = load_bootstrap(&global_opts.secrets_dir) diff --git a/provers/sgx/prover/src/lib.rs b/provers/sgx/prover/src/lib.rs index 6b649857c..b2e01bebc 100644 --- a/provers/sgx/prover/src/lib.rs +++ b/provers/sgx/prover/src/lib.rs @@ -11,8 +11,8 @@ use std::{ use once_cell::sync::Lazy; use raiko_lib::{ input::{ - AggregationGuestInput, AggregationGuestOutput, GuestInput, GuestOutput, - RawAggregationGuestInput, RawProof, + AggregationGuestInput, AggregationGuestOutput, GuestBatchInput, GuestBatchOutput, + GuestInput, GuestOutput, RawAggregationGuestInput, RawProof, }, primitives::B256, prover::{IdStore, IdWrite, Proof, ProofKey, Prover, ProverConfig, ProverError, ProverResult}, @@ -237,14 +237,86 @@ impl Prover for SgxProver { async fn cancel(_proof_key: ProofKey, _read: Box<&mut dyn IdStore>) -> ProverResult<()> { Ok(()) } - + async fn batch_run( - input: raiko_lib::input::GuestBatchInput, - output: &GuestOutput, + input: GuestBatchInput, + _output: &GuestBatchOutput, config: &ProverConfig, - store: Option>>, - ) -> ProverResult> { - todo!() + _store: Option<&mut dyn IdWrite>, + ) -> ProverResult { + let sgx_param = SgxParam::deserialize(config.get("sgx").unwrap()).unwrap(); + + // Support both SGX and the direct backend for testing + let direct_mode = match env::var("SGX_DIRECT") { + Ok(value) => value == "1", + Err(_) => false, + }; + + println!( + "WARNING: running SGX in {} mode!", + if direct_mode { + "direct (a.k.a. simulation)" + } else { + "hardware" + } + ); + + // The working directory + let mut cur_dir = env::current_exe() + .expect("Fail to get current directory") + .parent() + .unwrap() + .to_path_buf(); + + // When running in tests we might be in a child folder + if cur_dir.ends_with("deps") { + cur_dir = cur_dir.parent().unwrap().to_path_buf(); + } + + println!("Current directory: {cur_dir:?}\n"); + // Working paths + PRIVATE_KEY + .get_or_init(|| async { cur_dir.join("secrets").join(PRIV_KEY_FILENAME) }) + .await; + GRAMINE_MANIFEST_TEMPLATE + .get_or_init(|| async { + cur_dir + .join(CONFIG) + .join("sgx-guest.local.manifest.template") + }) + .await; + + // The gramine command (gramine or gramine-direct for testing in non-SGX environment) + let gramine_cmd = || -> StdCommand { + let mut cmd = if direct_mode { + StdCommand::new("gramine-direct") + } else { + let mut cmd = StdCommand::new("sudo"); + cmd.arg("gramine-sgx"); + cmd + }; + cmd.current_dir(&cur_dir).arg(ELF_NAME); + cmd + }; + + // Setup: run this once while setting up your SGX instance + if sgx_param.setup { + setup(&cur_dir, direct_mode).await?; + } + + let mut sgx_proof = if sgx_param.bootstrap { + bootstrap(cur_dir.clone().join("secrets"), gramine_cmd()).await + } else { + // Dummy proof: it's ok when only setup/bootstrap was requested + Ok(SgxResponse::default()) + }; + + if sgx_param.prove { + // overwrite sgx_proof as the bootstrap quote stays the same in bootstrap & prove. + sgx_proof = batch_prove(gramine_cmd(), input.clone(), sgx_param.instance_id).await + } + + sgx_proof.map(|r| r.into()) } } @@ -399,6 +471,42 @@ async fn prove( .map_err(|e| ProverError::GuestError(e.to_string()))? } +async fn batch_prove( + mut gramine_cmd: StdCommand, + input: GuestBatchInput, + instance_id: u64, +) -> ProverResult { + tokio::task::spawn_blocking(move || { + let mut child = gramine_cmd + .arg("one-shot-batch") + .arg("--sgx-instance-id") + .arg(instance_id.to_string()) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .map_err(|e| format!("Could not spawn gramine cmd: {e}"))?; + let stdin = child.stdin.as_mut().expect("Failed to open stdin"); + let input_success = bincode::serialize_into(stdin, &input); + let output_success = child.wait_with_output(); + + match (input_success, output_success) { + (Ok(_), Ok(output)) => { + handle_output(&output, "SGX prove")?; + Ok(parse_sgx_result(output.stdout)?) + } + (Err(i), output_success) => Err(ProverError::GuestError(format!( + "Can not serialize input for SGX {i}, output is {output_success:?}" + ))), + (Ok(_), Err(output_err)) => Err(ProverError::GuestError( + handle_gramine_error("Could not run SGX guest prover", output_err).to_string(), + )), + } + }) + .await + .map_err(|e| ProverError::GuestError(e.to_string()))? +} + async fn aggregate( mut gramine_cmd: StdCommand, input: AggregationGuestInput,