From faec2e888f86754f955cc38ffbeb1f495bce9aa4 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Mon, 20 Oct 2025 16:17:10 -0700 Subject: [PATCH 1/9] [encrypted-mempool] Introduce new payload type --- api/src/transactions.rs | 28 ++++++++-- types/src/transaction/encrypted_payload.rs | 59 ++++++++++++++++++++++ types/src/transaction/mod.rs | 27 ++++++++-- types/src/transaction/use_case.rs | 9 ++++ 4 files changed, 115 insertions(+), 8 deletions(-) create mode 100644 types/src/transaction/encrypted_payload.rs diff --git a/api/src/transactions.rs b/api/src/transactions.rs index 62b5a07159966..7bb049c783c6e 100644 --- a/api/src/transactions.rs +++ b/api/src/transactions.rs @@ -33,10 +33,11 @@ use aptos_crypto::{hash::CryptoHash, signing_message}; use aptos_logger::error; use aptos_types::{ account_address::AccountAddress, + executable, mempool_status::MempoolStatusCode, transaction::{ - EntryFunction, ExecutionStatus, MultisigTransactionPayload, RawTransaction, - RawTransactionWithData, Script, SignedTransaction, TransactionExecutable, + encrypted_payload, EntryFunction, ExecutionStatus, MultisigTransactionPayload, + RawTransaction, RawTransactionWithData, Script, SignedTransaction, TransactionExecutable, TransactionPayload, TransactionPayloadInner, }, vm_status::StatusCode, @@ -1254,9 +1255,6 @@ impl TransactionsApi { } } }, - - // Deprecated. To avoid panics when malicios users submit this - // payload, return an error. TransactionPayload::ModuleBundle(_) => { return Err(SubmitTransactionError::bad_request_with_code( "Module bundle payload has been removed", @@ -1294,6 +1292,15 @@ impl TransactionsApi { } }, }, + TransactionPayload::EncryptedPayload(encrypted_payload) => { + if !encrypted_payload.is_encrypted() { + return Err(SubmitTransactionError::bad_request_with_code( + "Received encrypted transaction payload in decrypted form", + AptosErrorCode::InvalidInput, + ledger_info, + )); + } + }, } // TODO: Verify script args? @@ -1580,6 +1587,14 @@ impl TransactionsApi { )); } + if txn.raw_transaction_ref().payload_ref().is_encrypted() { + return Err(SubmitTransactionError::bad_request_with_code( + "Encrypted transactions cannot be simulated", + AptosErrorCode::InvalidInput, + &ledger_info, + )); + } + // Simulate transaction let state_view = self.context.latest_state_view_poem(&ledger_info)?; let (vm_status, output) = @@ -1639,6 +1654,9 @@ impl TransactionsApi { }; stats_key }, + TransactionPayload::EncryptedPayload(encrypted_payload) => { + unreachable!("Encrypted transactions must not be simulated") + }, }; self.context .simulate_txn_stats() diff --git a/types/src/transaction/encrypted_payload.rs b/types/src/transaction/encrypted_payload.rs new file mode 100644 index 0000000000000..0528045c4b2a9 --- /dev/null +++ b/types/src/transaction/encrypted_payload.rs @@ -0,0 +1,59 @@ +use crate::transaction::{TransactionExecutable, TransactionExecutableRef, TransactionExtraConfig}; +use anyhow::{bail, Result}; +use aptos_crypto::HashValue; +use serde::{Deserialize, Serialize}; +use std::ops::Deref; + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub enum EncryptedPayload { + V1(EncryptedPayloadV1), +} + +impl Deref for EncryptedPayload { + type Target = EncryptedPayloadV1; + + fn deref(&self) -> &Self::Target { + let Self::V1(payload) = self; + payload + } +} + +#[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] +pub enum EncryptedPayloadV1 { + Encrypted { + cipher_text: Vec, + payload_hash: HashValue, + extra_config: TransactionExtraConfig, + }, + Decrypted { + cipher_text: Vec, + payload_hash: HashValue, + extra_config: TransactionExtraConfig, + + encryption_nonce: u64, + executable: TransactionExecutable, + }, +} + +impl EncryptedPayloadV1 { + pub fn executable(&self) -> Result { + let Self::Decrypted { executable, .. } = self else { + bail!("Transaction is encrypted"); + }; + Ok(executable.clone()) + } + + pub fn executable_ref(&self) -> Result> { + let Self::Decrypted { executable, .. } = self else { + bail!("Transaction is encrypted"); + }; + Ok(executable.as_ref()) + } + + pub(crate) fn extra_config(&self) -> &TransactionExtraConfig { + match self { + EncryptedPayloadV1::Encrypted { extra_config, .. } => extra_config, + EncryptedPayloadV1::Decrypted { extra_config, .. } => extra_config, + } + } +} diff --git a/types/src/transaction/mod.rs b/types/src/transaction/mod.rs index c830d25501cfe..9cea038278718 100644 --- a/types/src/transaction/mod.rs +++ b/types/src/transaction/mod.rs @@ -12,9 +12,12 @@ use crate::{ keyless::{KeylessPublicKey, KeylessSignature}, ledger_info::LedgerInfo, proof::{TransactionInfoListWithProof, TransactionInfoWithProof}, - transaction::authenticator::{ - AASigningData, AccountAuthenticator, AnyPublicKey, AnySignature, SingleKeyAuthenticator, - TransactionAuthenticator, + transaction::{ + authenticator::{ + AASigningData, AccountAuthenticator, AnyPublicKey, AnySignature, + SingleKeyAuthenticator, TransactionAuthenticator, + }, + encrypted_payload::{EncryptedPayload, EncryptedPayloadV1}, }, vm_status::{DiscardedVMStatus, KeptVMStatus, StatusCode, StatusType, VMStatus}, write_set::{HotStateOp, WriteSet}, @@ -46,6 +49,7 @@ pub mod authenticator; pub mod block_epilogue; mod block_output; mod change_set; +pub mod encrypted_payload; mod module; mod multisig; mod script; @@ -679,6 +683,8 @@ pub enum TransactionPayload { /// Contains an executable (script/entry function) along with extra configuration. /// Once this new format is fully rolled out, above payload variants will be deprecated. Payload(TransactionPayloadInner), + /// Represents an encrypted transaction payload + EncryptedPayload(EncryptedPayload), } #[derive(Clone, Debug, Hash, Eq, PartialEq, Serialize, Deserialize)] @@ -761,6 +767,12 @@ impl TransactionPayload { TransactionPayload::Payload(TransactionPayloadInner::V1 { extra_config, .. }) => { extra_config.is_multisig() }, + TransactionPayload::EncryptedPayload(EncryptedPayload::V1( + EncryptedPayloadV1::Encrypted { extra_config, .. }, + )) + | TransactionPayload::EncryptedPayload(EncryptedPayload::V1( + EncryptedPayloadV1::Decrypted { extra_config, .. }, + )) => extra_config.is_multisig(), } } @@ -793,6 +805,9 @@ impl TransactionPayload { TransactionPayload::ModuleBundle(_) => { Err(format_err!("ModuleBundle variant is deprecated")) }, + TransactionPayload::EncryptedPayload(encrypted_payload) => { + encrypted_payload.executable() + }, } } @@ -809,6 +824,9 @@ impl TransactionPayload { TransactionPayload::ModuleBundle(_) => { Err(format_err!("ModuleBundle variant is deprecated")) }, + TransactionPayload::EncryptedPayload(encrypted_payload) => { + encrypted_payload.executable_ref() + }, } } @@ -827,6 +845,9 @@ impl TransactionPayload { TransactionPayload::Payload(TransactionPayloadInner::V1 { extra_config, .. }) => { extra_config.clone() }, + TransactionPayload::EncryptedPayload(encrypted_payload) => { + encrypted_payload.extra_config().clone() + }, } } diff --git a/types/src/transaction/use_case.rs b/types/src/transaction/use_case.rs index e22fb0194ecb3..2efded33174a5 100644 --- a/types/src/transaction/use_case.rs +++ b/types/src/transaction/use_case.rs @@ -41,6 +41,15 @@ fn parse_use_case(payload: &TransactionPayload) -> UseCaseKey { None } }, + EncryptedPayload(encrypted_payload) => { + if let Ok(TransactionExecutableRef::EntryFunction(entry_fun)) = + encrypted_payload.executable_ref() + { + Some(entry_fun) + } else { + None + } + }, }; match maybe_entry_func { From 4000134d181532be5813a763d971013cabd162c4 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Thu, 6 Nov 2025 08:57:13 -0800 Subject: [PATCH 2/9] payload wip --- api/types/src/convert.rs | 4 +++- crates/aptos-transaction-filters/src/transaction_filter.rs | 4 ++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/api/types/src/convert.rs b/api/types/src/convert.rs index 44978f4422cfd..ec6b50b1cb421 100644 --- a/api/types/src/convert.rs +++ b/api/types/src/convert.rs @@ -401,8 +401,10 @@ impl<'a, S: StateView> MoveConverter<'a, S> { } }, }, - // Deprecated. ModuleBundle(_) => bail!("Module bundle payload has been removed"), + aptos_types::transaction::TransactionPayload::Script(script) => todo!(), + aptos_types::transaction::TransactionPayload::EntryFunction(entry_function) => todo!(), + EncryptedPayload(encrypted_payload) => todo!(), }; Ok(ret) } diff --git a/crates/aptos-transaction-filters/src/transaction_filter.rs b/crates/aptos-transaction-filters/src/transaction_filter.rs index 18bfcbb3f7029..d7536e2b655fd 100644 --- a/crates/aptos-transaction-filters/src/transaction_filter.rs +++ b/crates/aptos-transaction-filters/src/transaction_filter.rs @@ -343,6 +343,7 @@ fn matches_entry_function( }, } }, + TransactionPayload::EncryptedPayload(encrypted_payload) => todo!(), } } @@ -374,6 +375,7 @@ fn matches_entry_function_module_address( }, } }, + TransactionPayload::EncryptedPayload(encrypted_payload) => todo!(), } } @@ -397,6 +399,7 @@ fn matches_multisig_address( .unwrap_or(false), } }, + TransactionPayload::EncryptedPayload(encrypted_payload) => todo!(), } } @@ -421,6 +424,7 @@ fn matches_script_argument_address( }, } }, + TransactionPayload::EncryptedPayload(encrypted_payload) => todo!(), } } From 573ff77ee1dcade1628ae52bff771b6c832f1bd0 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Thu, 6 Nov 2025 08:57:13 -0800 Subject: [PATCH 3/9] payload wip From 9c38022d1239ebb96239c395bfa75fdb9375a019 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Thu, 6 Nov 2025 09:02:17 -0800 Subject: [PATCH 4/9] SignedBatchInfo and ProofOfStore with Generic BatchInfo --- consensus/consensus-types/src/common.rs | 10 +- .../consensus-types/src/opt_proposal_msg.rs | 7 +- consensus/consensus-types/src/payload.rs | 4 +- .../consensus-types/src/proof_of_store.rs | 123 +++++++++++++----- consensus/consensus-types/src/proposal_msg.rs | 9 +- .../network/observer_message.rs | 24 ++-- consensus/src/epoch_manager.rs | 4 +- consensus/src/network.rs | 19 ++- consensus/src/network_interface.rs | 12 +- .../src/quorum_store/batch_proof_queue.rs | 8 +- consensus/src/quorum_store/batch_store.rs | 8 +- .../src/quorum_store/proof_coordinator.rs | 16 +-- consensus/src/quorum_store/proof_manager.rs | 4 +- .../src/quorum_store/quorum_store_builder.rs | 8 +- .../tests/batch_generator_test.rs | 4 +- .../tests/batch_proof_queue_test.rs | 8 +- .../tests/batch_requester_test.rs | 14 +- .../quorum_store/tests/proof_manager_test.rs | 8 +- consensus/src/round_manager.rs | 12 +- .../test_utils/mock_quorum_store_sender.rs | 16 ++- 20 files changed, 203 insertions(+), 115 deletions(-) diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index 36dea5826897a..fdd7dbbd86a00 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -127,11 +127,11 @@ pub struct RejectedTransactionSummary { #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct ProofWithData { - pub proofs: Vec, + pub proofs: Vec>, } impl ProofWithData { - pub fn new(proofs: Vec) -> Self { + pub fn new(proofs: Vec>) -> Self { Self { proofs } } @@ -516,9 +516,9 @@ impl Payload { } fn verify_with_cache( - proofs: &[ProofOfStore], + proofs: &[ProofOfStore], validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, ) -> anyhow::Result<()> { let unverified: Vec<_> = proofs .iter() @@ -571,7 +571,7 @@ impl Payload { pub fn verify( &self, verifier: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, ) -> anyhow::Result<()> { match (quorum_store_enabled, self) { diff --git a/consensus/consensus-types/src/opt_proposal_msg.rs b/consensus/consensus-types/src/opt_proposal_msg.rs index e26e78032cc80..cf6c21a0142b1 100644 --- a/consensus/consensus-types/src/opt_proposal_msg.rs +++ b/consensus/consensus-types/src/opt_proposal_msg.rs @@ -3,7 +3,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::{ - common::Author, opt_block_data::OptBlockData, proof_of_store::ProofCache, sync_info::SyncInfo, + common::Author, + opt_block_data::OptBlockData, + proof_of_store::{BatchInfo, ProofCache}, + sync_info::SyncInfo, }; use anyhow::{ensure, Context, Result}; use aptos_types::validator_verifier::ValidatorVerifier; @@ -98,7 +101,7 @@ impl OptProposalMsg { &self, sender: Author, validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, ) -> Result<()> { ensure!( diff --git a/consensus/consensus-types/src/payload.rs b/consensus/consensus-types/src/payload.rs index 8e7a82cbb1bb0..0eaeabd2fb512 100644 --- a/consensus/consensus-types/src/payload.rs +++ b/consensus/consensus-types/src/payload.rs @@ -13,7 +13,7 @@ use std::{ pub type OptBatches = BatchPointer; -pub type ProofBatches = BatchPointer; +pub type ProofBatches = BatchPointer>; pub trait TDataInfo { fn num_txns(&self) -> u64; @@ -382,7 +382,7 @@ impl OptQuorumStorePayload { &self.inline_batches } - pub fn proof_with_data(&self) -> &BatchPointer { + pub fn proof_with_data(&self) -> &BatchPointer> { &self.proofs } diff --git a/consensus/consensus-types/src/proof_of_store.rs b/consensus/consensus-types/src/proof_of_store.rs index 9a912046a5226..0b53b065c37b0 100644 --- a/consensus/consensus-types/src/proof_of_store.rs +++ b/consensus/consensus-types/src/proof_of_store.rs @@ -3,7 +3,7 @@ use crate::{payload::TDataInfo, utils::PayloadTxnsSize}; use anyhow::{bail, ensure, Context}; -use aptos_crypto::{bls12381, CryptoMaterialError, HashValue}; +use aptos_crypto::{bls12381, hash::CryptoHash, CryptoMaterialError, HashValue}; use aptos_crypto_derive::{BCSCryptoHash, CryptoHasher}; use aptos_types::{ aggregate_signature::AggregateSignature, ledger_info::SignatureWithStatus, @@ -14,11 +14,23 @@ use mini_moka::sync::Cache; use rand::{seq::SliceRandom, thread_rng}; use serde::{Deserialize, Serialize}; use std::{ - fmt::{Display, Formatter}, + fmt::{Debug, Display, Formatter}, hash::Hash, ops::Deref, }; +pub trait TBatchInfo: Serialize + CryptoHash + Debug + Clone + Hash + Eq { + fn epoch(&self) -> u64; + + fn expiration(&self) -> u64; + + fn num_txns(&self) -> u64; + + fn num_bytes(&self) -> u64; + + fn as_batch_info(&self) -> &BatchInfo; +} + #[derive( Clone, Debug, Deserialize, Serialize, CryptoHasher, BCSCryptoHash, PartialEq, Eq, Hash, )] @@ -93,6 +105,28 @@ impl BatchInfo { } } +impl TBatchInfo for BatchInfo { + fn epoch(&self) -> u64 { + self.epoch + } + + fn expiration(&self) -> u64 { + self.expiration + } + + fn num_txns(&self) -> u64 { + self.num_txns + } + + fn num_bytes(&self) -> u64 { + self.num_bytes + } + + fn as_batch_info(&self) -> &BatchInfo { + &self + } +} + impl Display for BatchInfo { fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { write!(f, "({}:{}:{})", self.author, self.batch_id, self.digest) @@ -118,12 +152,15 @@ impl TDataInfo for BatchInfo { } #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct SignedBatchInfoMsg { - signed_infos: Vec, +pub struct SignedBatchInfoMsg { + signed_infos: Vec>, } -impl SignedBatchInfoMsg { - pub fn new(signed_infos: Vec) -> Self { +impl SignedBatchInfoMsg +where + T: TBatchInfo, +{ + pub fn new(signed_infos: Vec>) -> Self { Self { signed_infos } } @@ -161,21 +198,24 @@ impl SignedBatchInfoMsg { Ok(epoch) } - pub fn take(self) -> Vec { + pub fn take(self) -> Vec> { self.signed_infos } } #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct SignedBatchInfo { - info: BatchInfo, +pub struct SignedBatchInfo { + info: T, signer: PeerId, signature: SignatureWithStatus, } -impl SignedBatchInfo { +impl SignedBatchInfo +where + T: TBatchInfo, +{ pub fn new( - batch_info: BatchInfo, + batch_info: T, validator_signer: &ValidatorSigner, ) -> Result { let signature = validator_signer.sign(&batch_info)?; @@ -188,7 +228,7 @@ impl SignedBatchInfo { } pub fn new_with_signature( - batch_info: BatchInfo, + batch_info: T, signer: PeerId, signature: bls12381::Signature, ) -> Self { @@ -200,7 +240,7 @@ impl SignedBatchInfo { } #[cfg(any(test, feature = "fuzzing"))] - pub fn dummy(batch_info: BatchInfo, signer: PeerId) -> Self { + pub fn dummy(batch_info: T, signer: PeerId) -> Self { Self::new_with_signature(batch_info, signer, bls12381::Signature::dummy_signature()) } @@ -241,13 +281,13 @@ impl SignedBatchInfo { &self.signature } - pub fn batch_info(&self) -> &BatchInfo { + pub fn batch_info(&self) -> &T { &self.info } } -impl Deref for SignedBatchInfo { - type Target = BatchInfo; +impl Deref for SignedBatchInfo { + type Target = T; fn deref(&self) -> &Self::Target { &self.info @@ -267,12 +307,15 @@ pub enum SignedBatchInfoError { } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] -pub struct ProofOfStoreMsg { - proofs: Vec, +pub struct ProofOfStoreMsg { + proofs: Vec>, } -impl ProofOfStoreMsg { - pub fn new(proofs: Vec) -> Self { +impl ProofOfStoreMsg +where + T: TBatchInfo + Send + Sync + 'static, +{ + pub fn new(proofs: Vec>) -> Self { Self { proofs } } @@ -280,7 +323,7 @@ impl ProofOfStoreMsg { &self, max_num_proofs: usize, validator: &ValidatorVerifier, - cache: &ProofCache, + cache: &ProofCache, ) -> anyhow::Result<()> { ensure!(!self.proofs.is_empty(), "Empty message"); ensure!( @@ -309,28 +352,35 @@ impl ProofOfStoreMsg { Ok(epoch) } - pub fn take(self) -> Vec { + pub fn take(self) -> Vec> { self.proofs } } -pub type ProofCache = Cache; +pub type ProofCache = Cache; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] -pub struct ProofOfStore { - info: BatchInfo, +pub struct ProofOfStore { + info: T, multi_signature: AggregateSignature, } -impl ProofOfStore { - pub fn new(info: BatchInfo, multi_signature: AggregateSignature) -> Self { +impl ProofOfStore +where + T: TBatchInfo + Send + Sync + 'static, +{ + pub fn new(info: T, multi_signature: AggregateSignature) -> Self { Self { info, multi_signature, } } - pub fn verify(&self, validator: &ValidatorVerifier, cache: &ProofCache) -> anyhow::Result<()> { + pub fn verify( + &self, + validator: &ValidatorVerifier, + cache: &ProofCache, + ) -> anyhow::Result<()> { if let Some(signature) = cache.get(&self.info) { if signature == self.multi_signature { return Ok(()); @@ -354,7 +404,7 @@ impl ProofOfStore { ret } - pub fn info(&self) -> &BatchInfo { + pub fn info(&self) -> &T { &self.info } @@ -363,25 +413,28 @@ impl ProofOfStore { } } -impl Deref for ProofOfStore { - type Target = BatchInfo; +impl Deref for ProofOfStore { + type Target = T; fn deref(&self) -> &Self::Target { &self.info } } -impl TDataInfo for ProofOfStore { +impl TDataInfo for ProofOfStore +where + T: TBatchInfo + Send + Sync + 'static, +{ fn num_txns(&self) -> u64 { - self.num_txns + self.info.num_txns() } fn num_bytes(&self) -> u64 { - self.num_bytes + self.info.num_bytes() } fn info(&self) -> &BatchInfo { - self.info() + self.info.as_batch_info() } fn signers(&self, ordered_authors: &[PeerId]) -> Vec { diff --git a/consensus/consensus-types/src/proposal_msg.rs b/consensus/consensus-types/src/proposal_msg.rs index 3784651ac88c9..11c14ce723b3f 100644 --- a/consensus/consensus-types/src/proposal_msg.rs +++ b/consensus/consensus-types/src/proposal_msg.rs @@ -2,7 +2,12 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{block::Block, common::Author, proof_of_store::ProofCache, sync_info::SyncInfo}; +use crate::{ + block::Block, + common::Author, + proof_of_store::{BatchInfo, ProofCache}, + sync_info::SyncInfo, +}; use anyhow::{anyhow, ensure, format_err, Context, Result}; use aptos_short_hex_str::AsShortHexStr; use aptos_types::validator_verifier::ValidatorVerifier; @@ -84,7 +89,7 @@ impl ProposalMsg { &self, sender: Author, validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, ) -> Result<()> { if let Some(proposal_author) = self.proposal.author() { diff --git a/consensus/src/consensus_observer/network/observer_message.rs b/consensus/src/consensus_observer/network/observer_message.rs index bf7faed8a84cf..fd47d820a31bd 100644 --- a/consensus/src/consensus_observer/network/observer_message.rs +++ b/consensus/src/consensus_observer/network/observer_message.rs @@ -379,11 +379,11 @@ impl CommitDecision { #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct PayloadWithProof { transactions: Vec, - proofs: Vec, + proofs: Vec>, } impl PayloadWithProof { - pub fn new(transactions: Vec, proofs: Vec) -> Self { + pub fn new(transactions: Vec, proofs: Vec>) -> Self { Self { transactions, proofs, @@ -439,7 +439,7 @@ impl TransactionsWithProof { } } - pub fn proofs(&self) -> Vec { + pub fn proofs(&self) -> Vec> { match self { TransactionsWithProof::TransactionsWithProofAndLimits(payload) => { payload.payload_with_proof.proofs.clone() @@ -512,7 +512,7 @@ impl BlockTransactionPayload { /// Creates a returns a new InQuorumStore transaction payload pub fn new_in_quorum_store( transactions: Vec, - proofs: Vec, + proofs: Vec>, ) -> Self { let payload_with_proof = PayloadWithProof::new(transactions, proofs); Self::DeprecatedInQuorumStore(payload_with_proof) @@ -521,7 +521,7 @@ impl BlockTransactionPayload { /// Creates a returns a new InQuorumStoreWithLimit transaction payload pub fn new_in_quorum_store_with_limit( transactions: Vec, - proofs: Vec, + proofs: Vec>, limit: Option, ) -> Self { let payload_with_proof = PayloadWithProof::new(transactions, proofs); @@ -532,7 +532,7 @@ impl BlockTransactionPayload { /// Creates a returns a new QuorumStoreInlineHybrid transaction payload pub fn new_quorum_store_inline_hybrid( transactions: Vec, - proofs: Vec, + proofs: Vec>, transaction_limit: Option, gas_limit: Option, inline_batches: Vec, @@ -557,7 +557,7 @@ impl BlockTransactionPayload { pub fn new_opt_quorum_store( transactions: Vec, - proofs: Vec, + proofs: Vec>, limit: Option, batch_infos: Vec, ) -> Self { @@ -612,7 +612,7 @@ impl BlockTransactionPayload { } /// Returns the proofs of the transaction payload - pub fn payload_proofs(&self) -> Vec { + pub fn payload_proofs(&self) -> Vec> { match self { BlockTransactionPayload::DeprecatedInQuorumStore(payload) => payload.proofs.clone(), BlockTransactionPayload::DeprecatedInQuorumStoreWithLimit(payload) => { @@ -714,7 +714,7 @@ impl BlockTransactionPayload { } /// Verifies the payload batches against the expected batches - fn verify_batches(&self, expected_proofs: &[ProofOfStore]) -> Result<(), Error> { + fn verify_batches(&self, expected_proofs: &[ProofOfStore]) -> Result<(), Error> { // Get the batches in the block transaction payload let payload_proofs = self.payload_proofs(); let payload_batches: Vec<&BatchInfo> = @@ -1851,7 +1851,7 @@ mod test { fn create_block_payload( block_info: Option, signed_transactions: &[SignedTransaction], - proofs: &[ProofOfStore], + proofs: &[ProofOfStore], inline_batches: &[BatchInfo], ) -> BlockPayload { // Create the transaction payload @@ -1875,7 +1875,7 @@ mod test { fn create_block_optqs_payload( block_info: Option, signed_transactions: &[SignedTransaction], - proofs: &[ProofOfStore], + proofs: &[ProofOfStore], opt_and_inline_batches: &[BatchInfo], ) -> BlockPayload { // Create the transaction payload @@ -1906,7 +1906,7 @@ mod test { fn create_mixed_expiration_proofs( block_timestamp: u64, signed_transactions: &[SignedTransaction], - ) -> (Vec, Vec) { + ) -> (Vec>, Vec) { let mut proofs = vec![]; let mut non_expired_transactions = vec![]; diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index 31181850b32ca..a7863113ffb37 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -66,7 +66,7 @@ use aptos_consensus_types::{ block_retrieval::BlockRetrievalRequest, common::{Author, Round}, epoch_retrieval::EpochRetrievalRequest, - proof_of_store::ProofCache, + proof_of_store::{BatchInfo, ProofCache}, utils::PayloadTxnsSize, }; use aptos_crypto::bls12381::PrivateKey; @@ -172,7 +172,7 @@ pub struct EpochManager { dag_config: DagConsensusConfig, payload_manager: Arc, rand_storage: Arc>, - proof_cache: ProofCache, + proof_cache: ProofCache, consensus_publisher: Option>, pending_blocks: Arc>, key_storage: PersistentSafetyStorage, diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 33d84b8d6db80..214fc50a14ee2 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -28,7 +28,9 @@ use aptos_consensus_types::{ opt_proposal_msg::OptProposalMsg, order_vote_msg::OrderVoteMsg, pipeline::{commit_decision::CommitDecision, commit_vote::CommitVote}, - proof_of_store::{ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg}, + proof_of_store::{ + BatchInfo, ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg, + }, proposal_msg::ProposalMsg, round_timeout::RoundTimeoutMsg, sync_info::SyncInfo, @@ -202,15 +204,18 @@ pub trait QuorumStoreSender: Send + Clone { async fn send_signed_batch_info_msg( &self, - signed_batch_infos: Vec, + signed_batch_infos: Vec>, recipients: Vec, ); async fn broadcast_batch_msg(&mut self, batches: Vec); - async fn broadcast_proof_of_store_msg(&mut self, proof_of_stores: Vec); + async fn broadcast_proof_of_store_msg(&mut self, proof_of_stores: Vec>); - async fn send_proof_of_store_msg_to_self(&mut self, proof_of_stores: Vec); + async fn send_proof_of_store_msg_to_self( + &mut self, + proof_of_stores: Vec>, + ); } /// Implements the actual networking support for all consensus messaging. @@ -556,7 +561,7 @@ impl QuorumStoreSender for NetworkSender { async fn send_signed_batch_info_msg( &self, - signed_batch_infos: Vec, + signed_batch_infos: Vec>, recipients: Vec, ) { fail_point!("consensus::send::signed_batch_info", |_| ()); @@ -571,13 +576,13 @@ impl QuorumStoreSender for NetworkSender { self.broadcast(msg).await } - async fn broadcast_proof_of_store_msg(&mut self, proofs: Vec) { + async fn broadcast_proof_of_store_msg(&mut self, proofs: Vec>) { fail_point!("consensus::send::proof_of_store", |_| ()); let msg = ConsensusMsg::ProofOfStoreMsg(Box::new(ProofOfStoreMsg::new(proofs))); self.broadcast(msg).await } - async fn send_proof_of_store_msg_to_self(&mut self, proofs: Vec) { + async fn send_proof_of_store_msg_to_self(&mut self, proofs: Vec>) { fail_point!("consensus::send::proof_of_store", |_| ()); let msg = ConsensusMsg::ProofOfStoreMsg(Box::new(ProofOfStoreMsg::new(proofs))); self.send(msg, vec![self.author]).await diff --git a/consensus/src/network_interface.rs b/consensus/src/network_interface.rs index aef081a8273a3..9a159f3cbdd93 100644 --- a/consensus/src/network_interface.rs +++ b/consensus/src/network_interface.rs @@ -17,7 +17,7 @@ use aptos_consensus_types::{ opt_proposal_msg::OptProposalMsg, order_vote_msg::OrderVoteMsg, pipeline::{commit_decision::CommitDecision, commit_vote::CommitVote}, - proof_of_store::{ProofOfStoreMsg, SignedBatchInfoMsg}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStoreMsg, SignedBatchInfoMsg}, proposal_msg::ProposalMsg, round_timeout::RoundTimeoutMsg, sync_info::SyncInfo, @@ -71,9 +71,9 @@ pub enum ConsensusMsg { BatchResponse(Box), /// Quorum Store: Send a signed batch digest. This is a vote for the batch and a promise that /// the batch of transactions was received and will be persisted until batch expiration. - SignedBatchInfo(Box), + SignedBatchInfo(Box>), /// Quorum Store: Broadcast a certified proof of store (a digest that received 2f+1 votes). - ProofOfStoreMsg(Box), + ProofOfStoreMsg(Box>), /// DAG protocol message DAGMessage(DAGNetworkMessage), /// Commit message @@ -91,6 +91,10 @@ pub enum ConsensusMsg { BlockRetrievalRequest(Box), /// OptProposalMsg contains the optimistic proposal and sync info. OptProposalMsg(Box), + + SignedBatchInfoV2(Box>), + + ProofOfStoreMsgV2(Box>), } /// Network type for consensus @@ -121,6 +125,8 @@ impl ConsensusMsg { ConsensusMsg::BatchResponseV2(_) => "BatchResponseV2", ConsensusMsg::RoundTimeoutMsg(_) => "RoundTimeoutV2", ConsensusMsg::BlockRetrievalRequest(_) => "BlockRetrievalRequest", + ConsensusMsg::SignedBatchInfoV2(_) => "SignedBatchInfoV2", + ConsensusMsg::ProofOfStoreMsgV2(_) => "ProofOfStoreMsgV2", } } } diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs index 01a16cef86cf7..7f67b5d97caf2 100644 --- a/consensus/src/quorum_store/batch_proof_queue.rs +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -37,7 +37,7 @@ struct QueueItem { /// Contains the proof associated with the batch. /// It is optional as the proof can be updated after the summary. - proof: Option, + proof: Option>, /// The time when the proof is inserted into this item. proof_insertion_time: Option, } @@ -173,7 +173,7 @@ impl BatchProofQueue { } /// Add the ProofOfStore to proof queue. - pub(crate) fn insert_proof(&mut self, proof: ProofOfStore) { + pub(crate) fn insert_proof(&mut self, proof: ProofOfStore) { if proof.expiration() <= self.latest_block_timestamp { counters::inc_rejected_pos_count(counters::POS_EXPIRED_LABEL); return; @@ -342,7 +342,7 @@ impl BatchProofQueue { fn log_remaining_data_after_pull( &self, excluded_batches: &HashSet, - pulled_proofs: &[ProofOfStore], + pulled_proofs: &[ProofOfStore], ) { let mut num_proofs_remaining_after_pull = 0; let mut num_txns_remaining_after_pull = 0; @@ -406,7 +406,7 @@ impl BatchProofQueue { soft_max_txns_after_filtering: u64, return_non_full: bool, block_timestamp: Duration, - ) -> (Vec, PayloadTxnsSize, u64, bool) { + ) -> (Vec>, PayloadTxnsSize, u64, bool) { let (result, all_txns, unique_txns, is_full) = self.pull_internal( false, excluded_batches, diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index d5a1c65ed2a59..02364580a8b65 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -381,7 +381,7 @@ impl BatchStore { fn generate_signed_batch_info( &self, batch_info: BatchInfo, - ) -> Result { + ) -> Result, CryptoMaterialError> { fail_point!("quorum_store::create_invalid_signed_batch_info", |_| { Ok(SignedBatchInfo::new_with_signature( batch_info.clone(), @@ -392,7 +392,7 @@ impl BatchStore { SignedBatchInfo::new(batch_info, &self.validator_signer) } - fn persist_inner(&self, persist_request: PersistedValue) -> Option { + fn persist_inner(&self, persist_request: PersistedValue) -> Option> { match self.save(&persist_request) { Ok(needs_db) => { let batch_info = persist_request.batch_info().clone(); @@ -483,7 +483,7 @@ impl BatchStore { } impl BatchWriter for BatchStore { - fn persist(&self, persist_requests: Vec) -> Vec { + fn persist(&self, persist_requests: Vec) -> Vec> { let mut signed_infos = vec![]; for persist_request in persist_requests.into_iter() { if let Some(signed_info) = self.persist_inner(persist_request.clone()) { @@ -610,5 +610,5 @@ impl BatchReader for Batch } pub trait BatchWriter: Send + Sync { - fn persist(&self, persist_requests: Vec) -> Vec; + fn persist(&self, persist_requests: Vec) -> Vec>; } diff --git a/consensus/src/quorum_store/proof_coordinator.rs b/consensus/src/quorum_store/proof_coordinator.rs index ff5bba176a050..506250464c706 100644 --- a/consensus/src/quorum_store/proof_coordinator.rs +++ b/consensus/src/quorum_store/proof_coordinator.rs @@ -37,7 +37,7 @@ use tokio::{ #[derive(Debug)] pub(crate) enum ProofCoordinatorCommand { - AppendSignature(PeerId, SignedBatchInfoMsg), + AppendSignature(PeerId, SignedBatchInfoMsg), CommitNotification(Vec), Shutdown(TokioOneshot::Sender<()>), } @@ -76,7 +76,7 @@ impl IncrementalProofState { fn add_signature( &mut self, - signed_batch_info: &SignedBatchInfo, + signed_batch_info: &SignedBatchInfo, validator_verifier: &ValidatorVerifier, ) -> Result<(), SignedBatchInfoError> { if signed_batch_info.batch_info() != self.signature_aggregator.data() { @@ -138,7 +138,7 @@ impl IncrementalProofState { pub fn aggregate_and_verify( &mut self, validator_verifier: &ValidatorVerifier, - ) -> Result { + ) -> Result, SignedBatchInfoError> { if self.completed { panic!("Cannot call take twice, unexpected issue occurred"); } @@ -168,7 +168,7 @@ pub(crate) struct ProofCoordinator { timeouts: Timeouts, batch_reader: Arc, batch_generator_cmd_tx: tokio::sync::mpsc::Sender, - proof_cache: ProofCache, + proof_cache: ProofCache, broadcast_proofs: bool, batch_expiry_gap_when_init_usecs: u64, } @@ -180,7 +180,7 @@ impl ProofCoordinator { peer_id: PeerId, batch_reader: Arc, batch_generator_cmd_tx: tokio::sync::mpsc::Sender, - proof_cache: ProofCache, + proof_cache: ProofCache, broadcast_proofs: bool, batch_expiry_gap_when_init_usecs: u64, ) -> Self { @@ -200,7 +200,7 @@ impl ProofCoordinator { fn init_proof( &mut self, - signed_batch_info: &SignedBatchInfo, + signed_batch_info: &SignedBatchInfo, ) -> Result<(), SignedBatchInfoError> { // Check if the signed digest corresponding to our batch if signed_batch_info.author() != self.peer_id { @@ -235,9 +235,9 @@ impl ProofCoordinator { fn add_signature( &mut self, - signed_batch_info: SignedBatchInfo, + signed_batch_info: SignedBatchInfo, validator_verifier: &ValidatorVerifier, - ) -> Result, SignedBatchInfoError> { + ) -> Result>, SignedBatchInfoError> { if !self .batch_info_to_proof .contains_key(signed_batch_info.batch_info()) diff --git a/consensus/src/quorum_store/proof_manager.rs b/consensus/src/quorum_store/proof_manager.rs index de6913ed8aeec..3201574adaba9 100644 --- a/consensus/src/quorum_store/proof_manager.rs +++ b/consensus/src/quorum_store/proof_manager.rs @@ -21,7 +21,7 @@ use std::{cmp::min, collections::HashSet, sync::Arc, time::Duration}; #[derive(Debug)] pub enum ProofManagerCommand { - ReceiveProofs(ProofOfStoreMsg), + ReceiveProofs(ProofOfStoreMsg), ReceiveBatches(Vec<(BatchInfo, Vec)>), CommitNotification(u64, Vec), Shutdown(tokio::sync::oneshot::Sender<()>), @@ -62,7 +62,7 @@ impl ProofManager { } } - pub(crate) fn receive_proofs(&mut self, proofs: Vec) { + pub(crate) fn receive_proofs(&mut self, proofs: Vec>) { for proof in proofs.into_iter() { self.batch_proof_queue.insert_proof(proof); } diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 77e78de8b002d..949b77fa40732 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -30,7 +30,9 @@ use crate::{ use aptos_channels::{aptos_channel, message_queues::QueueStyle}; use aptos_config::config::{BatchTransactionFilterConfig, QuorumStoreConfig}; use aptos_consensus_types::{ - common::Author, proof_of_store::ProofCache, request_response::GetPayloadCommand, + common::Author, + proof_of_store::{BatchInfo, ProofCache}, + request_response::GetPayloadCommand, }; use aptos_crypto::bls12381::PrivateKey; use aptos_logger::prelude::*; @@ -132,7 +134,7 @@ pub struct InnerBuilder { aptos_db: Arc, network_sender: NetworkSender, verifier: Arc, - proof_cache: ProofCache, + proof_cache: ProofCache, coordinator_tx: Sender, coordinator_rx: Option>, batch_generator_cmd_tx: tokio::sync::mpsc::Sender, @@ -168,7 +170,7 @@ impl InnerBuilder { aptos_db: Arc, network_sender: NetworkSender, verifier: Arc, - proof_cache: ProofCache, + proof_cache: ProofCache, quorum_store_storage: Arc, broadcast_proofs: bool, consensus_key: Arc, diff --git a/consensus/src/quorum_store/tests/batch_generator_test.rs b/consensus/src/quorum_store/tests/batch_generator_test.rs index a29d48226b246..a5169d0867926 100644 --- a/consensus/src/quorum_store/tests/batch_generator_test.rs +++ b/consensus/src/quorum_store/tests/batch_generator_test.rs @@ -14,7 +14,7 @@ use crate::{ use aptos_config::config::QuorumStoreConfig; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::SignedBatchInfo, + proof_of_store::{BatchInfo, SignedBatchInfo}, }; use aptos_mempool::{QuorumStoreRequest, QuorumStoreResponse}; use aptos_types::{quorum_store::BatchId, transaction::SignedTransaction}; @@ -35,7 +35,7 @@ impl MockBatchWriter { } impl BatchWriter for MockBatchWriter { - fn persist(&self, _persist_requests: Vec) -> Vec { + fn persist(&self, _persist_requests: Vec) -> Vec> { vec![] } } diff --git a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs index 731aa730bedad..a75e031f1ceb9 100644 --- a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs +++ b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs @@ -23,7 +23,7 @@ fn proof_of_store( batch_id: BatchId, gas_bucket_start: u64, expiration: u64, -) -> ProofOfStore { +) -> ProofOfStore { ProofOfStore::new( BatchInfo::new( author, @@ -45,7 +45,7 @@ fn proof_of_store_with_size( gas_bucket_start: u64, expiration: u64, num_txns: u64, -) -> ProofOfStore { +) -> ProofOfStore { ProofOfStore::new( BatchInfo::new( author, @@ -100,7 +100,7 @@ async fn test_proof_queue_sorting() { ); let mut count_author_0 = 0; let mut count_author_1 = 0; - let mut prev: Option<&ProofOfStore> = None; + let mut prev: Option<&ProofOfStore> = None; for batch in &pulled { if let Some(prev) = prev { assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); @@ -129,7 +129,7 @@ async fn test_proof_queue_sorting() { ); let mut count_author_0 = 0; let mut count_author_1 = 0; - let mut prev: Option<&ProofOfStore> = None; + let mut prev: Option<&ProofOfStore> = None; for batch in &pulled { if let Some(prev) = prev { assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index 1f171ce722bca..01476e90eec31 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -11,7 +11,7 @@ use crate::{ }; use aptos_consensus_types::{ common::Author, - proof_of_store::{ProofOfStore, SignedBatchInfo}, + proof_of_store::{BatchInfo, ProofOfStore, SignedBatchInfo}, }; use aptos_crypto::HashValue; use aptos_infallible::Mutex; @@ -56,7 +56,7 @@ impl QuorumStoreSender for MockBatchRequester { async fn send_signed_batch_info_msg( &self, - _signed_batch_infos: Vec, + _signed_batch_infos: Vec>, _recipients: Vec, ) { unimplemented!() @@ -66,11 +66,17 @@ impl QuorumStoreSender for MockBatchRequester { unimplemented!() } - async fn broadcast_proof_of_store_msg(&mut self, _proof_of_stores: Vec) { + async fn broadcast_proof_of_store_msg( + &mut self, + _proof_of_stores: Vec>, + ) { unimplemented!() } - async fn send_proof_of_store_msg_to_self(&mut self, _proof_of_stores: Vec) { + async fn send_proof_of_store_msg_to_self( + &mut self, + _proof_of_stores: Vec>, + ) { unimplemented!() } } diff --git a/consensus/src/quorum_store/tests/proof_manager_test.rs b/consensus/src/quorum_store/tests/proof_manager_test.rs index 4f3ba16eaec9b..9f188e7159821 100644 --- a/consensus/src/quorum_store/tests/proof_manager_test.rs +++ b/consensus/src/quorum_store/tests/proof_manager_test.rs @@ -20,7 +20,7 @@ fn create_proof_manager() -> ProofManager { ProofManager::new(PeerId::random(), 10, 10, batch_store, true, true, 1) } -fn create_proof(author: PeerId, expiration: u64, batch_sequence: u64) -> ProofOfStore { +fn create_proof(author: PeerId, expiration: u64, batch_sequence: u64) -> ProofOfStore { create_proof_with_gas(author, expiration, batch_sequence, 0) } @@ -29,7 +29,7 @@ fn create_proof_with_gas( expiration: u64, batch_sequence: u64, gas_bucket_start: u64, -) -> ProofOfStore { +) -> ProofOfStore { let digest = HashValue::random(); let batch_id = BatchId::new_for_test(batch_sequence); ProofOfStore::new( @@ -72,7 +72,7 @@ async fn get_proposal( fn assert_payload_response( payload: Payload, - expected: &[ProofOfStore], + expected: &[ProofOfStore], max_txns_from_block_to_execute: Option, expected_block_gas_limit: Option, ) { @@ -116,7 +116,7 @@ async fn get_proposal_and_assert( proof_manager: &mut ProofManager, max_txns: u64, filter: &[BatchInfo], - expected: &[ProofOfStore], + expected: &[ProofOfStore], ) { assert_payload_response( get_proposal(proof_manager, max_txns, filter).await, diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index b5ff3dbe11229..53146a5a451b5 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -45,7 +45,7 @@ use aptos_consensus_types::{ order_vote::OrderVote, order_vote_msg::OrderVoteMsg, pipelined_block::PipelinedBlock, - proof_of_store::{ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, + proof_of_store::{BatchInfo, ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, proposal_msg::ProposalMsg, quorum_cert::QuorumCert, round_timeout::{RoundTimeout, RoundTimeoutMsg, RoundTimeoutReason}, @@ -95,8 +95,8 @@ pub enum UnverifiedEvent { OrderVoteMsg(Box), SyncInfo(Box), BatchMsg(Box), - SignedBatchInfo(Box), - ProofOfStoreMsg(Box), + SignedBatchInfo(Box>), + ProofOfStoreMsg(Box>), OptProposalMsg(Box), } @@ -107,7 +107,7 @@ impl UnverifiedEvent { self, peer_id: PeerId, validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, self_message: bool, max_num_batches: usize, @@ -240,8 +240,8 @@ pub enum VerifiedEvent { OrderVoteMsg(Box), UnverifiedSyncInfo(Box), BatchMsg(Box), - SignedBatchInfo(Box), - ProofOfStoreMsg(Box), + SignedBatchInfo(Box>), + ProofOfStoreMsg(Box>), // local messages LocalTimeout(Round), // Shutdown the NetworkListener diff --git a/consensus/src/test_utils/mock_quorum_store_sender.rs b/consensus/src/test_utils/mock_quorum_store_sender.rs index bd962d348b51f..f7a7ddd5dff7d 100644 --- a/consensus/src/test_utils/mock_quorum_store_sender.rs +++ b/consensus/src/test_utils/mock_quorum_store_sender.rs @@ -8,7 +8,9 @@ use crate::{ }; use aptos_consensus_types::{ common::Author, - proof_of_store::{ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg}, + proof_of_store::{ + BatchInfo, ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg, + }, }; use std::time::Duration; use tokio::sync::mpsc::Sender; @@ -37,7 +39,7 @@ impl QuorumStoreSender for MockQuorumStoreSender { async fn send_signed_batch_info_msg( &self, - signed_batch_infos: Vec, + signed_batch_infos: Vec>, recipients: Vec, ) { self.tx @@ -55,7 +57,10 @@ impl QuorumStoreSender for MockQuorumStoreSender { unimplemented!() } - async fn broadcast_proof_of_store_msg(&mut self, proof_of_stores: Vec) { + async fn broadcast_proof_of_store_msg( + &mut self, + proof_of_stores: Vec>, + ) { self.tx .send(( ConsensusMsg::ProofOfStoreMsg(Box::new(ProofOfStoreMsg::new(proof_of_stores))), @@ -65,7 +70,10 @@ impl QuorumStoreSender for MockQuorumStoreSender { .expect("We should be able to send the proof of store message"); } - async fn send_proof_of_store_msg_to_self(&mut self, _proof_of_stores: Vec) { + async fn send_proof_of_store_msg_to_self( + &mut self, + _proof_of_stores: Vec>, + ) { unimplemented!() } } From e5fe2e38ebaa3841dfd5fb77c993b2fc147f09e4 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Fri, 7 Nov 2025 08:47:31 -0800 Subject: [PATCH 5/9] [qs] introduce BatchInfoExt for Proofs and Signed infos --- consensus/consensus-types/src/common.rs | 24 +-- .../consensus-types/src/opt_proposal_msg.rs | 2 +- consensus/consensus-types/src/payload.rs | 9 +- .../consensus-types/src/proof_of_store.rs | 187 +++++++++++++++++- consensus/consensus-types/src/proposal_msg.rs | 9 +- consensus/src/epoch_manager.rs | 2 +- consensus/src/network_interface.rs | 4 +- .../quorum_store_payload_manager.rs | 14 +- .../src/quorum_store/batch_coordinator.rs | 2 +- consensus/src/quorum_store/batch_generator.rs | 4 +- .../src/quorum_store/batch_proof_queue.rs | 38 ++-- .../src/quorum_store/proof_coordinator.rs | 41 ++-- consensus/src/quorum_store/proof_manager.rs | 33 +++- .../src/quorum_store/quorum_store_builder.rs | 4 +- .../quorum_store/quorum_store_coordinator.rs | 4 +- .../tests/batch_proof_queue_test.rs | 16 +- .../tests/proof_coordinator_test.rs | 10 +- .../quorum_store/tests/proof_manager_test.rs | 56 ++++-- consensus/src/quorum_store/utils.rs | 6 +- consensus/src/round_manager.rs | 24 ++- consensus/src/round_manager_tests/mod.rs | 4 +- 21 files changed, 353 insertions(+), 140 deletions(-) diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index fdd7dbbd86a00..f27597edca7f6 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -4,7 +4,7 @@ use crate::{ payload::{OptBatches, OptQuorumStorePayload, PayloadExecutionLimit, TxnAndGasLimits}, - proof_of_store::{BatchInfo, ProofCache, ProofOfStore}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofCache, ProofOfStore, TBatchInfo}, }; use anyhow::ensure; use aptos_crypto::{ @@ -518,13 +518,13 @@ impl Payload { fn verify_with_cache( proofs: &[ProofOfStore], validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, ) -> anyhow::Result<()> { let unverified: Vec<_> = proofs .iter() .filter(|proof| { proof_cache - .get(proof.info()) + .get(&BatchInfoExt::from(proof.info().clone())) .is_none_or(|cached_proof| cached_proof != *proof.multi_signature()) }) .collect(); @@ -571,7 +571,7 @@ impl Payload { pub fn verify( &self, verifier: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, ) -> anyhow::Result<()> { match (quorum_store_enabled, self) { @@ -741,7 +741,7 @@ impl BatchPayload { #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub enum PayloadFilter { DirectMempool(Vec), - InQuorumStore(HashSet), + InQuorumStore(HashSet), Empty, } @@ -772,21 +772,21 @@ impl From<&Vec<&Payload>> for PayloadFilter { match payload { Payload::InQuorumStore(proof_with_status) => { for proof in &proof_with_status.proofs { - exclude_batches.insert(proof.info().clone()); + exclude_batches.insert(proof.info().clone().into()); } }, Payload::InQuorumStoreWithLimit(proof_with_status) => { for proof in &proof_with_status.proof_with_data.proofs { - exclude_batches.insert(proof.info().clone()); + exclude_batches.insert(proof.info().clone().into()); } }, Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) | Payload::QuorumStoreInlineHybridV2(inline_batches, proof_with_data, _) => { for proof in &proof_with_data.proofs { - exclude_batches.insert(proof.info().clone()); + exclude_batches.insert(proof.info().clone().into()); } for (batch_info, _) in inline_batches { - exclude_batches.insert(batch_info.clone()); + exclude_batches.insert(batch_info.clone().into()); } }, Payload::DirectMempool(_) => { @@ -794,13 +794,13 @@ impl From<&Vec<&Payload>> for PayloadFilter { }, Payload::OptQuorumStore(opt_qs_payload) => { for batch in opt_qs_payload.inline_batches().iter() { - exclude_batches.insert(batch.info().clone()); + exclude_batches.insert(batch.info().clone().into()); } for batch_info in &opt_qs_payload.opt_batches().batch_summary { - exclude_batches.insert(batch_info.clone()); + exclude_batches.insert(batch_info.clone().into()); } for proof in &opt_qs_payload.proof_with_data().batch_summary { - exclude_batches.insert(proof.info().clone()); + exclude_batches.insert(proof.info().clone().into()); } }, } diff --git a/consensus/consensus-types/src/opt_proposal_msg.rs b/consensus/consensus-types/src/opt_proposal_msg.rs index cf6c21a0142b1..ffee840939130 100644 --- a/consensus/consensus-types/src/opt_proposal_msg.rs +++ b/consensus/consensus-types/src/opt_proposal_msg.rs @@ -101,7 +101,7 @@ impl OptProposalMsg { &self, sender: Author, validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, ) -> Result<()> { ensure!( diff --git a/consensus/consensus-types/src/payload.rs b/consensus/consensus-types/src/payload.rs index 0eaeabd2fb512..c9be597722df1 100644 --- a/consensus/consensus-types/src/payload.rs +++ b/consensus/consensus-types/src/payload.rs @@ -1,7 +1,7 @@ // Copyright (c) Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::proof_of_store::{BatchInfo, ProofOfStore}; +use crate::proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore}; use anyhow::ensure; use aptos_types::{transaction::SignedTransaction, PeerId}; use core::fmt; @@ -15,6 +15,10 @@ pub type OptBatches = BatchPointer; pub type ProofBatches = BatchPointer>; +pub type OptBatchesExt = BatchPointer; + +pub type ProofBatchesExt = BatchPointer; + pub trait TDataInfo { fn num_txns(&self) -> u64; @@ -290,7 +294,7 @@ pub struct OptQuorumStorePayloadV1 { } impl OptQuorumStorePayloadV1 { - pub fn get_all_batch_infos(self) -> Vec { + pub fn get_all_batch_infos(self) -> Vec { let Self { inline_batches, opt_batches, @@ -303,6 +307,7 @@ impl OptQuorumStorePayloadV1 { .map(|batch| batch.batch_info) .chain(opt_batches) .chain(proofs.into_iter().map(|proof| proof.info().clone())) + .map(|info| info.into()) .collect() } diff --git a/consensus/consensus-types/src/proof_of_store.rs b/consensus/consensus-types/src/proof_of_store.rs index 0b53b065c37b0..cd9d16eb62844 100644 --- a/consensus/consensus-types/src/proof_of_store.rs +++ b/consensus/consensus-types/src/proof_of_store.rs @@ -19,7 +19,9 @@ use std::{ ops::Deref, }; -pub trait TBatchInfo: Serialize + CryptoHash + Debug + Clone + Hash + Eq { +pub trait TBatchInfo: + Serialize + CryptoHash + Debug + Clone + Hash + Eq + PartialEq + Into +{ fn epoch(&self) -> u64; fn expiration(&self) -> u64; @@ -29,6 +31,16 @@ pub trait TBatchInfo: Serialize + CryptoHash + Debug + Clone + Hash + Eq { fn num_bytes(&self) -> u64; fn as_batch_info(&self) -> &BatchInfo; + + fn batch_id(&self) -> BatchId; + + fn author(&self) -> PeerId; + + fn digest(&self) -> &HashValue; + + fn gas_bucket_start(&self) -> u64; + + fn size(&self) -> PayloadTxnsSize; } #[derive( @@ -105,6 +117,12 @@ impl BatchInfo { } } +impl From for BatchInfoExt { + fn from(info: BatchInfo) -> Self { + Self::V1 { info } + } +} + impl TBatchInfo for BatchInfo { fn epoch(&self) -> u64 { self.epoch @@ -125,6 +143,26 @@ impl TBatchInfo for BatchInfo { fn as_batch_info(&self) -> &BatchInfo { &self } + + fn batch_id(&self) -> BatchId { + self.batch_id + } + + fn author(&self) -> PeerId { + self.author + } + + fn digest(&self) -> &HashValue { + &self.digest + } + + fn gas_bucket_start(&self) -> u64 { + self.gas_bucket_start + } + + fn size(&self) -> PayloadTxnsSize { + PayloadTxnsSize::new(self.num_txns, self.num_bytes) + } } impl Display for BatchInfo { @@ -151,6 +189,85 @@ impl TDataInfo for BatchInfo { } } +#[derive( + Clone, Debug, Deserialize, Serialize, CryptoHasher, BCSCryptoHash, PartialEq, Eq, Hash, +)] +pub enum BatchInfoExt { + V1 { + info: BatchInfo, + }, + V2 { + info: BatchInfo, + extra: ExtraBatchInfo, + }, +} + +impl BatchInfoExt { + pub fn info(&self) -> &BatchInfo { + match self { + BatchInfoExt::V1 { info } => info, + BatchInfoExt::V2 { info, .. } => info, + } + } +} + +impl TBatchInfo for BatchInfoExt { + fn epoch(&self) -> u64 { + self.info().epoch() + } + + fn expiration(&self) -> u64 { + self.info().expiration() + } + + fn num_txns(&self) -> u64 { + self.info().num_txns() + } + + fn num_bytes(&self) -> u64 { + self.info().num_bytes() + } + + fn as_batch_info(&self) -> &BatchInfo { + self.info() + } + + fn batch_id(&self) -> BatchId { + self.info().batch_id() + } + + fn author(&self) -> PeerId { + self.info().author() + } + + fn digest(&self) -> &HashValue { + self.info().digest() + } + + fn gas_bucket_start(&self) -> u64 { + self.info().gas_bucket_start() + } + + fn size(&self) -> PayloadTxnsSize { + PayloadTxnsSize::new(self.num_txns(), self.num_bytes()) + } +} + +#[derive( + Clone, Debug, Deserialize, Serialize, CryptoHasher, BCSCryptoHash, PartialEq, Eq, Hash, +)] +pub struct ExtraBatchInfo { + pub batch_kind: BatchKind, +} + +#[derive( + Clone, Debug, Deserialize, Serialize, CryptoHasher, BCSCryptoHash, PartialEq, Eq, Hash, +)] +pub enum BatchKind { + Normal, + Encrypted, +} + #[derive(Clone, Debug, Deserialize, Serialize)] pub struct SignedBatchInfoMsg { signed_infos: Vec>, @@ -203,6 +320,18 @@ where } } +impl From> for SignedBatchInfoMsg { + fn from(info: SignedBatchInfoMsg) -> Self { + Self { + signed_infos: info + .signed_infos + .into_iter() + .map(|signed_info| signed_info.into()) + .collect(), + } + } +} + #[derive(Clone, Debug, Deserialize, Serialize)] pub struct SignedBatchInfo { info: T, @@ -294,6 +423,21 @@ impl Deref for SignedBatchInfo { } } +impl From> for SignedBatchInfo { + fn from(signed_batch_info: SignedBatchInfo) -> Self { + let SignedBatchInfo { + info, + signer, + signature, + } = signed_batch_info; + Self { + info: info.into(), + signer, + signature, + } + } +} + #[derive(Debug, PartialEq)] pub enum SignedBatchInfoError { WrongAuthor, @@ -323,7 +467,7 @@ where &self, max_num_proofs: usize, validator: &ValidatorVerifier, - cache: &ProofCache, + cache: &ProofCache, ) -> anyhow::Result<()> { ensure!(!self.proofs.is_empty(), "Empty message"); ensure!( @@ -357,7 +501,19 @@ where } } -pub type ProofCache = Cache; +impl From> for ProofOfStoreMsg { + fn from(proof_msg: ProofOfStoreMsg) -> Self { + Self { + proofs: proof_msg + .proofs + .into_iter() + .map(|proof| proof.into()) + .collect(), + } + } +} + +pub type ProofCache = Cache; #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)] pub struct ProofOfStore { @@ -376,12 +532,9 @@ where } } - pub fn verify( - &self, - validator: &ValidatorVerifier, - cache: &ProofCache, - ) -> anyhow::Result<()> { - if let Some(signature) = cache.get(&self.info) { + pub fn verify(&self, validator: &ValidatorVerifier, cache: &ProofCache) -> anyhow::Result<()> { + let batch_info_ext: BatchInfoExt = self.info.clone().into(); + if let Some(signature) = cache.get(&batch_info_ext) { if signature == self.multi_signature { return Ok(()); } @@ -393,7 +546,7 @@ where self.info )); if result.is_ok() { - cache.insert(self.info.clone(), self.multi_signature.clone()); + cache.insert(batch_info_ext, self.multi_signature.clone()); } result } @@ -411,6 +564,10 @@ where pub fn multi_signature(&self) -> &AggregateSignature { &self.multi_signature } + + pub fn unpack(self) -> (T, AggregateSignature) { + (self.info, self.multi_signature) + } } impl Deref for ProofOfStore { @@ -441,3 +598,13 @@ where self.shuffled_signers(ordered_authors) } } + +impl From> for ProofOfStore { + fn from(proof: ProofOfStore) -> Self { + let (info, sig) = proof.unpack(); + Self { + info: info.into(), + multi_signature: sig, + } + } +} diff --git a/consensus/consensus-types/src/proposal_msg.rs b/consensus/consensus-types/src/proposal_msg.rs index 11c14ce723b3f..3784651ac88c9 100644 --- a/consensus/consensus-types/src/proposal_msg.rs +++ b/consensus/consensus-types/src/proposal_msg.rs @@ -2,12 +2,7 @@ // Parts of the project are originally copyright © Meta Platforms, Inc. // SPDX-License-Identifier: Apache-2.0 -use crate::{ - block::Block, - common::Author, - proof_of_store::{BatchInfo, ProofCache}, - sync_info::SyncInfo, -}; +use crate::{block::Block, common::Author, proof_of_store::ProofCache, sync_info::SyncInfo}; use anyhow::{anyhow, ensure, format_err, Context, Result}; use aptos_short_hex_str::AsShortHexStr; use aptos_types::validator_verifier::ValidatorVerifier; @@ -89,7 +84,7 @@ impl ProposalMsg { &self, sender: Author, validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, ) -> Result<()> { if let Some(proposal_author) = self.proposal.author() { diff --git a/consensus/src/epoch_manager.rs b/consensus/src/epoch_manager.rs index a7863113ffb37..97b05f3d4b2d7 100644 --- a/consensus/src/epoch_manager.rs +++ b/consensus/src/epoch_manager.rs @@ -172,7 +172,7 @@ pub struct EpochManager { dag_config: DagConsensusConfig, payload_manager: Arc, rand_storage: Arc>, - proof_cache: ProofCache, + proof_cache: ProofCache, consensus_publisher: Option>, pending_blocks: Arc>, key_storage: PersistentSafetyStorage, diff --git a/consensus/src/network_interface.rs b/consensus/src/network_interface.rs index 9a159f3cbdd93..3591016aa6fe4 100644 --- a/consensus/src/network_interface.rs +++ b/consensus/src/network_interface.rs @@ -92,7 +92,7 @@ pub enum ConsensusMsg { /// OptProposalMsg contains the optimistic proposal and sync info. OptProposalMsg(Box), - SignedBatchInfoV2(Box>), + SignedBatchInfoMsgV2(Box>), ProofOfStoreMsgV2(Box>), } @@ -125,7 +125,7 @@ impl ConsensusMsg { ConsensusMsg::BatchResponseV2(_) => "BatchResponseV2", ConsensusMsg::RoundTimeoutMsg(_) => "RoundTimeoutV2", ConsensusMsg::BlockRetrievalRequest(_) => "BlockRetrievalRequest", - ConsensusMsg::SignedBatchInfoV2(_) => "SignedBatchInfoV2", + ConsensusMsg::SignedBatchInfoMsgV2(_) => "SignedBatchInfoV2", ConsensusMsg::ProofOfStoreMsgV2(_) => "ProofOfStoreMsgV2", } } diff --git a/consensus/src/payload_manager/quorum_store_payload_manager.rs b/consensus/src/payload_manager/quorum_store_payload_manager.rs index eb02e66b3a5eb..e9d005dc817ca 100644 --- a/consensus/src/payload_manager/quorum_store_payload_manager.rs +++ b/consensus/src/payload_manager/quorum_store_payload_manager.rs @@ -16,7 +16,7 @@ use aptos_consensus_types::{ block::Block, common::{Author, Payload, ProofWithData}, payload::{BatchPointer, TDataInfo}, - proof_of_store::BatchInfo, + proof_of_store::{BatchInfo, BatchInfoExt}, }; use aptos_crypto::HashValue; use aptos_executor_types::*; @@ -28,7 +28,7 @@ use itertools::Itertools; use std::{collections::HashMap, future::Future, ops::Deref, pin::Pin, sync::Arc}; pub trait TQuorumStoreCommitNotifier: Send + Sync { - fn notify(&self, block_timestamp: u64, batches: Vec); + fn notify(&self, block_timestamp: u64, batches: Vec); } pub struct QuorumStoreCommitNotifier { @@ -42,7 +42,7 @@ impl QuorumStoreCommitNotifier { } impl TQuorumStoreCommitNotifier for QuorumStoreCommitNotifier { - fn notify(&self, block_timestamp: u64, batches: Vec) { + fn notify(&self, block_timestamp: u64, batches: Vec) { let mut tx = self.coordinator_tx.clone(); if let Err(e) = tx.try_send(CoordinatorCommand::CommitNotification( @@ -178,24 +178,24 @@ impl TPayloadManager for QuorumStorePayloadManager { Payload::InQuorumStore(proof_with_status) => proof_with_status .proofs .iter() - .map(|proof| proof.info().clone()) + .map(|proof| proof.info().clone().into()) .collect::>(), Payload::InQuorumStoreWithLimit(proof_with_status) => proof_with_status .proof_with_data .proofs .iter() - .map(|proof| proof.info().clone()) + .map(|proof| proof.info().clone().into()) .collect::>(), Payload::QuorumStoreInlineHybrid(inline_batches, proof_with_data, _) | Payload::QuorumStoreInlineHybridV2(inline_batches, proof_with_data, _) => { inline_batches .iter() - .map(|(batch_info, _)| batch_info.clone()) + .map(|(batch_info, _)| batch_info.clone().into()) .chain( proof_with_data .proofs .iter() - .map(|proof| proof.info().clone()), + .map(|proof| proof.info().clone().into()), ) .collect::>() }, diff --git a/consensus/src/quorum_store/batch_coordinator.rs b/consensus/src/quorum_store/batch_coordinator.rs index 7f794db5e0e03..0486d6f0be479 100644 --- a/consensus/src/quorum_store/batch_coordinator.rs +++ b/consensus/src/quorum_store/batch_coordinator.rs @@ -93,7 +93,7 @@ impl BatchCoordinator { .iter() .map(|persisted_value| { ( - persisted_value.batch_info().clone(), + persisted_value.batch_info().clone().into(), persisted_value.summary(), ) }) diff --git a/consensus/src/quorum_store/batch_generator.rs b/consensus/src/quorum_store/batch_generator.rs index cba8e670d97e3..4d3765f755049 100644 --- a/consensus/src/quorum_store/batch_generator.rs +++ b/consensus/src/quorum_store/batch_generator.rs @@ -14,7 +14,7 @@ use crate::{ use aptos_config::config::QuorumStoreConfig; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::BatchInfo, + proof_of_store::{BatchInfoExt, TBatchInfo}, }; use aptos_experimental_runtimes::thread_manager::optimal_min_len; use aptos_logger::prelude::*; @@ -31,7 +31,7 @@ use tokio::time::Interval; #[derive(Debug)] pub enum BatchGeneratorCommand { - CommitNotification(u64, Vec), + CommitNotification(u64, Vec), ProofExpiration(Vec), RemoteBatch(Batch), Shutdown(tokio::sync::oneshot::Sender<()>), diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs index 7f67b5d97caf2..0563212ba69b9 100644 --- a/consensus/src/quorum_store/batch_proof_queue.rs +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -9,7 +9,7 @@ use crate::quorum_store::counters; use aptos_consensus_types::{ common::{Author, TxnSummaryWithExpiration}, payload::TDataInfo, - proof_of_store::{BatchInfo, ProofOfStore}, + proof_of_store::{BatchInfoExt, ProofOfStore, TBatchInfo}, utils::PayloadTxnsSize, }; use aptos_logger::{info, sample, sample::SampleRate, warn}; @@ -30,14 +30,14 @@ use std::{ /// batch. struct QueueItem { /// The info of the Batch this item stores - info: BatchInfo, + info: BatchInfoExt, /// Contains the summary of transactions in the batch. /// It is optional as the summary can be updated after the proof. txn_summaries: Option>, /// Contains the proof associated with the batch. /// It is optional as the proof can be updated after the summary. - proof: Option>, + proof: Option>, /// The time when the proof is inserted into this item. proof_insertion_time: Option, } @@ -57,7 +57,7 @@ impl QueueItem { pub struct BatchProofQueue { my_peer_id: PeerId, // Queue per peer to ensure fairness between peers and priority within peer - author_to_batches: HashMap>, + author_to_batches: HashMap>, // Map of Batch key to QueueItem containing Batch data and proofs items: HashMap, // Number of unexpired and uncommitted proofs in which the txn_summary = (sender, replay protector, hash, expiration) @@ -173,7 +173,7 @@ impl BatchProofQueue { } /// Add the ProofOfStore to proof queue. - pub(crate) fn insert_proof(&mut self, proof: ProofOfStore) { + pub(crate) fn insert_proof(&mut self, proof: ProofOfStore) { if proof.expiration() <= self.latest_block_timestamp { counters::inc_rejected_pos_count(counters::POS_EXPIRED_LABEL); return; @@ -258,7 +258,7 @@ impl BatchProofQueue { pub fn insert_batches( &mut self, - batches_with_txn_summaries: Vec<(BatchInfo, Vec)>, + batches_with_txn_summaries: Vec<(BatchInfoExt, Vec)>, ) { let start = Instant::now(); @@ -341,8 +341,8 @@ impl BatchProofQueue { fn log_remaining_data_after_pull( &self, - excluded_batches: &HashSet, - pulled_proofs: &[ProofOfStore], + excluded_batches: &HashSet, + pulled_proofs: &[ProofOfStore], ) { let mut num_proofs_remaining_after_pull = 0; let mut num_txns_remaining_after_pull = 0; @@ -400,13 +400,13 @@ impl BatchProofQueue { // whether the proof queue is fully utilized. pub(crate) fn pull_proofs( &mut self, - excluded_batches: &HashSet, + excluded_batches: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, soft_max_txns_after_filtering: u64, return_non_full: bool, block_timestamp: Duration, - ) -> (Vec>, PayloadTxnsSize, u64, bool) { + ) -> (Vec>, PayloadTxnsSize, u64, bool) { let (result, all_txns, unique_txns, is_full) = self.pull_internal( false, excluded_batches, @@ -456,7 +456,7 @@ impl BatchProofQueue { pub fn pull_batches( &mut self, - excluded_batches: &HashSet, + excluded_batches: &HashSet, exclude_authors: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, @@ -464,7 +464,7 @@ impl BatchProofQueue { return_non_full: bool, block_timestamp: Duration, minimum_batch_age_usecs: Option, - ) -> (Vec, PayloadTxnsSize, u64) { + ) -> (Vec, PayloadTxnsSize, u64) { let (result, pulled_txns, unique_txns, is_full) = self.pull_batches_internal( excluded_batches, exclude_authors, @@ -489,7 +489,7 @@ impl BatchProofQueue { pub fn pull_batches_internal( &mut self, - excluded_batches: &HashSet, + excluded_batches: &HashSet, exclude_authors: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, @@ -497,7 +497,7 @@ impl BatchProofQueue { return_non_full: bool, block_timestamp: Duration, minimum_batch_age_usecs: Option, - ) -> (Vec, PayloadTxnsSize, u64, bool) { + ) -> (Vec, PayloadTxnsSize, u64, bool) { let (result, all_txns, unique_txns, is_full) = self.pull_internal( true, excluded_batches, @@ -515,14 +515,14 @@ impl BatchProofQueue { pub fn pull_batches_with_transactions( &mut self, - excluded_batches: &HashSet, + excluded_batches: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, soft_max_txns_after_filtering: u64, return_non_full: bool, block_timestamp: Duration, ) -> ( - Vec<(BatchInfo, Vec)>, + Vec<(BatchInfoExt, Vec)>, PayloadTxnsSize, u64, ) { @@ -562,7 +562,7 @@ impl BatchProofQueue { fn pull_internal( &mut self, batches_without_proofs: bool, - excluded_batches: &HashSet, + excluded_batches: &HashSet, exclude_authors: &HashSet, max_txns: PayloadTxnsSize, max_txns_after_filtering: u64, @@ -844,7 +844,7 @@ impl BatchProofQueue { } // Mark in the hashmap committed PoS, but keep them until they expire - pub(crate) fn mark_committed(&mut self, batches: Vec) { + pub(crate) fn mark_committed(&mut self, batches: Vec) { let start = Instant::now(); for batch in batches.into_iter() { let batch_key = BatchKey::from_info(&batch); @@ -889,7 +889,7 @@ impl BatchProofQueue { // When the batch is expired, then it will be removed from items. item.mark_committed(); } else { - let batch_sort_key = BatchSortKey::from_info(batch.info()); + let batch_sort_key = BatchSortKey::from_info(&batch); self.expirations .add_item(batch_sort_key.clone(), batch.expiration()); self.author_to_batches diff --git a/consensus/src/quorum_store/proof_coordinator.rs b/consensus/src/quorum_store/proof_coordinator.rs index 506250464c706..973b1fdb87f7e 100644 --- a/consensus/src/quorum_store/proof_coordinator.rs +++ b/consensus/src/quorum_store/proof_coordinator.rs @@ -16,8 +16,8 @@ use crate::{ use aptos_consensus_types::{ payload::TDataInfo, proof_of_store::{ - BatchInfo, ProofCache, ProofOfStore, SignedBatchInfo, SignedBatchInfoError, - SignedBatchInfoMsg, + BatchInfo, BatchInfoExt, ProofCache, ProofOfStore, SignedBatchInfo, SignedBatchInfoError, + SignedBatchInfoMsg, TBatchInfo, }, }; use aptos_logger::prelude::*; @@ -37,13 +37,13 @@ use tokio::{ #[derive(Debug)] pub(crate) enum ProofCoordinatorCommand { - AppendSignature(PeerId, SignedBatchInfoMsg), - CommitNotification(Vec), + AppendSignature(PeerId, SignedBatchInfoMsg), + CommitNotification(Vec), Shutdown(TokioOneshot::Sender<()>), } struct IncrementalProofState { - signature_aggregator: SignatureAggregator, + signature_aggregator: SignatureAggregator, aggregated_voting_power: u128, self_voted: bool, completed: bool, @@ -52,7 +52,7 @@ struct IncrementalProofState { } impl IncrementalProofState { - fn new(info: BatchInfo) -> Self { + fn new(info: BatchInfoExt) -> Self { Self { signature_aggregator: SignatureAggregator::new(info), aggregated_voting_power: 0, @@ -76,12 +76,12 @@ impl IncrementalProofState { fn add_signature( &mut self, - signed_batch_info: &SignedBatchInfo, + signed_batch_info: &SignedBatchInfo, validator_verifier: &ValidatorVerifier, ) -> Result<(), SignedBatchInfoError> { if signed_batch_info.batch_info() != self.signature_aggregator.data() { return Err(SignedBatchInfoError::WrongInfo(( - signed_batch_info.batch_id().id, + signed_batch_info.batch_info().batch_id().id, self.signature_aggregator.data().batch_id().id, ))); } @@ -138,7 +138,7 @@ impl IncrementalProofState { pub fn aggregate_and_verify( &mut self, validator_verifier: &ValidatorVerifier, - ) -> Result, SignedBatchInfoError> { + ) -> Result, SignedBatchInfoError> { if self.completed { panic!("Cannot call take twice, unexpected issue occurred"); } @@ -154,7 +154,7 @@ impl IncrementalProofState { } } - pub fn batch_info(&self) -> &BatchInfo { + pub fn batch_info(&self) -> &BatchInfoExt { self.signature_aggregator.data() } } @@ -162,13 +162,13 @@ impl IncrementalProofState { pub(crate) struct ProofCoordinator { peer_id: PeerId, proof_timeout_ms: usize, - batch_info_to_proof: HashMap, + batch_info_to_proof: HashMap, // to record the batch creation time - batch_info_to_time: HashMap, - timeouts: Timeouts, + batch_info_to_time: HashMap, + timeouts: Timeouts, batch_reader: Arc, batch_generator_cmd_tx: tokio::sync::mpsc::Sender, - proof_cache: ProofCache, + proof_cache: ProofCache, broadcast_proofs: bool, batch_expiry_gap_when_init_usecs: u64, } @@ -180,7 +180,7 @@ impl ProofCoordinator { peer_id: PeerId, batch_reader: Arc, batch_generator_cmd_tx: tokio::sync::mpsc::Sender, - proof_cache: ProofCache, + proof_cache: ProofCache, broadcast_proofs: bool, batch_expiry_gap_when_init_usecs: u64, ) -> Self { @@ -200,7 +200,7 @@ impl ProofCoordinator { fn init_proof( &mut self, - signed_batch_info: &SignedBatchInfo, + signed_batch_info: &SignedBatchInfo, ) -> Result<(), SignedBatchInfoError> { // Check if the signed digest corresponding to our batch if signed_batch_info.author() != self.peer_id { @@ -235,9 +235,9 @@ impl ProofCoordinator { fn add_signature( &mut self, - signed_batch_info: SignedBatchInfo, + signed_batch_info: SignedBatchInfo, validator_verifier: &ValidatorVerifier, - ) -> Result>, SignedBatchInfoError> { + ) -> Result>, SignedBatchInfoError> { if !self .batch_info_to_proof .contains_key(signed_batch_info.batch_info()) @@ -370,7 +370,7 @@ impl ProofCoordinator { error!("Empty signed batch info received from {}", signer.short_str().as_str()); return; }; - let info = signed_batch_info.info().clone(); + let info = signed_batch_info.batch_info().clone(); let approx_created_ts_usecs = signed_batch_info .expiration() .saturating_sub(self.batch_expiry_gap_when_init_usecs); @@ -388,7 +388,8 @@ impl ProofCoordinator { digest = digest, batch_id = batch_id.id, ); - proofs.push(proof); + let (info, sig) = proof.unpack(); + proofs.push(ProofOfStore::new(info.info().clone(), sig)); } }, Err(e) => { diff --git a/consensus/src/quorum_store/proof_manager.rs b/consensus/src/quorum_store/proof_manager.rs index 3201574adaba9..5d0cd17ecfc94 100644 --- a/consensus/src/quorum_store/proof_manager.rs +++ b/consensus/src/quorum_store/proof_manager.rs @@ -8,8 +8,8 @@ use crate::{ }; use aptos_consensus_types::{ common::{Payload, PayloadFilter, ProofWithData, TxnSummaryWithExpiration}, - payload::{OptQuorumStorePayload, PayloadExecutionLimit}, - proof_of_store::{BatchInfo, ProofOfStore, ProofOfStoreMsg}, + payload::{OptBatches, OptQuorumStorePayload, PayloadExecutionLimit}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore, ProofOfStoreMsg, TBatchInfo}, request_response::{GetPayloadCommand, GetPayloadResponse}, utils::PayloadTxnsSize, }; @@ -21,9 +21,9 @@ use std::{cmp::min, collections::HashSet, sync::Arc, time::Duration}; #[derive(Debug)] pub enum ProofManagerCommand { - ReceiveProofs(ProofOfStoreMsg), - ReceiveBatches(Vec<(BatchInfo, Vec)>), - CommitNotification(u64, Vec), + ReceiveProofs(ProofOfStoreMsg), + ReceiveBatches(Vec<(BatchInfoExt, Vec)>), + CommitNotification(u64, Vec), Shutdown(tokio::sync::oneshot::Sender<()>), } @@ -62,7 +62,7 @@ impl ProofManager { } } - pub(crate) fn receive_proofs(&mut self, proofs: Vec>) { + pub(crate) fn receive_proofs(&mut self, proofs: Vec>) { for proof in proofs.into_iter() { self.batch_proof_queue.insert_proof(proof); } @@ -79,7 +79,7 @@ impl ProofManager { pub(crate) fn receive_batches( &mut self, - batch_summaries: Vec<(BatchInfo, Vec)>, + batch_summaries: Vec<(BatchInfoExt, Vec)>, ) { self.batch_proof_queue.insert_batches(batch_summaries); self.update_remaining_txns_and_proofs(); @@ -88,7 +88,7 @@ impl ProofManager { pub(crate) fn handle_commit_notification( &mut self, block_timestamp: u64, - batches: Vec, + batches: Vec, ) { trace!( "QS: got clean request from execution at block timestamp {}", @@ -185,6 +185,23 @@ impl ProofManager { counters::NUM_INLINE_BATCHES.observe(inline_block.len() as f64); counters::NUM_INLINE_TXNS.observe(inline_block_size.count() as f64); + // TODO(ibalajiarun): Avoid clones + let inline_block: Vec<_> = inline_block + .into_iter() + .map(|(info, txns)| (info.info().clone(), txns)) + .collect(); + let opt_batches: Vec<_> = opt_batches + .into_iter() + .map(|info| info.info().clone()) + .collect(); + let proof_block: Vec<_> = proof_block + .into_iter() + .map(|proof| { + let (info, sig) = proof.unpack(); + ProofOfStore::new(info.info().clone(), sig) + }) + .collect(); + let response = if request.maybe_optqs_payload_pull_params.is_some() { let inline_batches = inline_block.into(); Payload::OptQuorumStore(OptQuorumStorePayload::new( diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 949b77fa40732..4b6a040e6f4f5 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -134,7 +134,7 @@ pub struct InnerBuilder { aptos_db: Arc, network_sender: NetworkSender, verifier: Arc, - proof_cache: ProofCache, + proof_cache: ProofCache, coordinator_tx: Sender, coordinator_rx: Option>, batch_generator_cmd_tx: tokio::sync::mpsc::Sender, @@ -170,7 +170,7 @@ impl InnerBuilder { aptos_db: Arc, network_sender: NetworkSender, verifier: Arc, - proof_cache: ProofCache, + proof_cache: ProofCache, quorum_store_storage: Arc, broadcast_proofs: bool, consensus_key: Arc, diff --git a/consensus/src/quorum_store/quorum_store_coordinator.rs b/consensus/src/quorum_store/quorum_store_coordinator.rs index baadc5e51d4a9..c69f2c2d4c2b2 100644 --- a/consensus/src/quorum_store/quorum_store_coordinator.rs +++ b/consensus/src/quorum_store/quorum_store_coordinator.rs @@ -10,14 +10,14 @@ use crate::{ round_manager::VerifiedEvent, }; use aptos_channels::aptos_channel; -use aptos_consensus_types::{common::Author, proof_of_store::BatchInfo}; +use aptos_consensus_types::{common::Author, proof_of_store::BatchInfoExt}; use aptos_logger::prelude::*; use aptos_types::{account_address::AccountAddress, PeerId}; use futures::StreamExt; use tokio::sync::{mpsc, oneshot}; pub enum CoordinatorCommand { - CommitNotification(u64, Vec), + CommitNotification(u64, Vec), Shutdown(futures_channel::oneshot::Sender<()>), } diff --git a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs index a75e031f1ceb9..ee28e14b2813f 100644 --- a/consensus/src/quorum_store/tests/batch_proof_queue_test.rs +++ b/consensus/src/quorum_store/tests/batch_proof_queue_test.rs @@ -6,7 +6,7 @@ use crate::quorum_store::{ }; use aptos_consensus_types::{ common::TxnSummaryWithExpiration, - proof_of_store::{BatchInfo, ProofOfStore}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore, TBatchInfo}, utils::PayloadTxnsSize, }; use aptos_crypto::HashValue; @@ -23,7 +23,7 @@ fn proof_of_store( batch_id: BatchId, gas_bucket_start: u64, expiration: u64, -) -> ProofOfStore { +) -> ProofOfStore { ProofOfStore::new( BatchInfo::new( author, @@ -34,7 +34,8 @@ fn proof_of_store( 1, 1, gas_bucket_start, - ), + ) + .into(), AggregateSignature::empty(), ) } @@ -45,7 +46,7 @@ fn proof_of_store_with_size( gas_bucket_start: u64, expiration: u64, num_txns: u64, -) -> ProofOfStore { +) -> ProofOfStore { ProofOfStore::new( BatchInfo::new( author, @@ -56,7 +57,8 @@ fn proof_of_store_with_size( num_txns, num_txns, gas_bucket_start, - ), + ) + .into(), AggregateSignature::empty(), ) } @@ -100,7 +102,7 @@ async fn test_proof_queue_sorting() { ); let mut count_author_0 = 0; let mut count_author_1 = 0; - let mut prev: Option<&ProofOfStore> = None; + let mut prev: Option<&ProofOfStore> = None; for batch in &pulled { if let Some(prev) = prev { assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); @@ -129,7 +131,7 @@ async fn test_proof_queue_sorting() { ); let mut count_author_0 = 0; let mut count_author_1 = 0; - let mut prev: Option<&ProofOfStore> = None; + let mut prev: Option<&ProofOfStore> = None; for batch in &pulled { if let Some(prev) = prev { assert!(prev.gas_bucket_start() >= batch.gas_bucket_start()); diff --git a/consensus/src/quorum_store/tests/proof_coordinator_test.rs b/consensus/src/quorum_store/tests/proof_coordinator_test.rs index efb2e40136dac..509875a4dd56b 100644 --- a/consensus/src/quorum_store/tests/proof_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/proof_coordinator_test.rs @@ -74,7 +74,8 @@ async fn test_proof_coordinator_basic() { let digest = batch.digest(); for signer in &signers { - let signed_batch_info = SignedBatchInfo::new(batch.batch_info().clone(), signer).unwrap(); + let signed_batch_info = + SignedBatchInfo::new(batch.batch_info().clone().into(), signer).unwrap(); assert!(proof_coordinator_tx .send(ProofCoordinatorCommand::AppendSignature( signer.author(), @@ -127,8 +128,9 @@ async fn test_proof_coordinator_with_unverified_signatures() { for (signer_index, signer) in signers.iter().enumerate() { if signer_index > 2 { - let signed_batch_info = SignedBatchInfo::new(batch.batch_info().clone(), signer) - .expect("Failed to create SignedBatchInfo"); + let signed_batch_info = + SignedBatchInfo::new(batch.batch_info().clone().into(), signer) + .expect("Failed to create SignedBatchInfo"); assert!(proof_coordinator_tx .send(ProofCoordinatorCommand::AppendSignature( signer.author(), @@ -138,7 +140,7 @@ async fn test_proof_coordinator_with_unverified_signatures() { .is_ok()) } else { let signed_batch_info = - SignedBatchInfo::dummy(batch.batch_info().clone(), signer.author()); + SignedBatchInfo::dummy(batch.batch_info().clone().into(), signer.author()); assert!(proof_coordinator_tx .send(ProofCoordinatorCommand::AppendSignature( signer.author(), diff --git a/consensus/src/quorum_store/tests/proof_manager_test.rs b/consensus/src/quorum_store/tests/proof_manager_test.rs index 9f188e7159821..c9291a913f8eb 100644 --- a/consensus/src/quorum_store/tests/proof_manager_test.rs +++ b/consensus/src/quorum_store/tests/proof_manager_test.rs @@ -6,7 +6,7 @@ use crate::quorum_store::{ }; use aptos_consensus_types::{ common::{Payload, PayloadFilter}, - proof_of_store::{BatchInfo, ProofOfStore}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore}, request_response::{GetPayloadCommand, GetPayloadRequest, GetPayloadResponse}, utils::PayloadTxnsSize, }; @@ -20,7 +20,11 @@ fn create_proof_manager() -> ProofManager { ProofManager::new(PeerId::random(), 10, 10, batch_store, true, true, 1) } -fn create_proof(author: PeerId, expiration: u64, batch_sequence: u64) -> ProofOfStore { +fn create_proof( + author: PeerId, + expiration: u64, + batch_sequence: u64, +) -> ProofOfStore { create_proof_with_gas(author, expiration, batch_sequence, 0) } @@ -29,7 +33,7 @@ fn create_proof_with_gas( expiration: u64, batch_sequence: u64, gas_bucket_start: u64, -) -> ProofOfStore { +) -> ProofOfStore { let digest = HashValue::random(); let batch_id = BatchId::new_for_test(batch_sequence); ProofOfStore::new( @@ -42,7 +46,8 @@ fn create_proof_with_gas( 1, 1, gas_bucket_start, - ), + ) + .into(), AggregateSignature::empty(), ) } @@ -50,7 +55,7 @@ fn create_proof_with_gas( async fn get_proposal( proof_manager: &mut ProofManager, max_txns: u64, - filter: &[BatchInfo], + filter: &[BatchInfoExt], ) -> Payload { let (callback_tx, callback_rx) = oneshot::channel(); let filter_set = HashSet::from_iter(filter.iter().cloned()); @@ -72,7 +77,7 @@ async fn get_proposal( fn assert_payload_response( payload: Payload, - expected: &[ProofOfStore], + expected: &[ProofOfStore], max_txns_from_block_to_execute: Option, expected_block_gas_limit: Option, ) { @@ -80,27 +85,27 @@ fn assert_payload_response( Payload::InQuorumStore(proofs) => { assert_eq!(proofs.proofs.len(), expected.len()); for proof in proofs.proofs { - assert!(expected.contains(&proof)); + assert!(expected.contains(&proof.into())); } }, Payload::InQuorumStoreWithLimit(proofs) => { assert_eq!(proofs.proof_with_data.proofs.len(), expected.len()); for proof in proofs.proof_with_data.proofs { - assert!(expected.contains(&proof)); + assert!(expected.contains(&proof.into())); } assert_eq!(proofs.max_txns_to_execute, max_txns_from_block_to_execute); }, Payload::QuorumStoreInlineHybrid(_inline_batches, proofs, max_txns_to_execute) => { assert_eq!(proofs.proofs.len(), expected.len()); for proof in proofs.proofs { - assert!(expected.contains(&proof)); + assert!(expected.contains(&proof.into())); } assert_eq!(max_txns_to_execute, max_txns_from_block_to_execute); }, Payload::QuorumStoreInlineHybridV2(_inline_batches, proofs, execution_limits) => { assert_eq!(proofs.proofs.len(), expected.len()); for proof in proofs.proofs { - assert!(expected.contains(&proof)); + assert!(expected.contains(&proof.into())); } assert_eq!( execution_limits.max_txns_to_execute(), @@ -115,8 +120,8 @@ fn assert_payload_response( async fn get_proposal_and_assert( proof_manager: &mut ProofManager, max_txns: u64, - filter: &[BatchInfo], - expected: &[ProofOfStore], + filter: &[BatchInfoExt], + expected: &[ProofOfStore], ) { assert_payload_response( get_proposal(proof_manager, max_txns, filter).await, @@ -263,9 +268,18 @@ async fn test_duplicate_batches_on_commit() { let digest = HashValue::random(); let batch_id = BatchId::new_for_test(1); let batch = BatchInfo::new(author, batch_id, 0, 10, digest, 1, 1, 0); - let proof0 = ProofOfStore::new(batch.clone(), AggregateSignature::empty()); - let proof1 = ProofOfStore::new(batch.clone(), AggregateSignature::empty()); - let proof2 = ProofOfStore::new(batch.clone(), AggregateSignature::empty()); + let proof0 = ProofOfStore::new( + BatchInfoExt::from(batch.clone()), + AggregateSignature::empty(), + ); + let proof1 = ProofOfStore::new( + BatchInfoExt::from(batch.clone()), + AggregateSignature::empty(), + ); + let proof2 = ProofOfStore::new( + BatchInfoExt::from(batch.clone()), + AggregateSignature::empty(), + ); proof_manager.receive_proofs(vec![proof0.clone()]); proof_manager.receive_proofs(vec![proof1.clone()]); @@ -274,7 +288,7 @@ async fn test_duplicate_batches_on_commit() { get_proposal_and_assert(&mut proof_manager, 10, &[], &vec![proof0.clone()]).await; // Nothing goes wrong on commits - proof_manager.handle_commit_notification(4, vec![batch.clone()]); + proof_manager.handle_commit_notification(4, vec![batch.clone().into()]); get_proposal_and_assert(&mut proof_manager, 10, &[], &[]).await; // Before expiration, still marked as committed @@ -296,8 +310,14 @@ async fn test_duplicate_batches_on_expiration() { let digest = HashValue::random(); let batch_id = BatchId::new_for_test(1); let batch = BatchInfo::new(author, batch_id, 0, 10, digest, 1, 1, 0); - let proof0 = ProofOfStore::new(batch.clone(), AggregateSignature::empty()); - let proof1 = ProofOfStore::new(batch.clone(), AggregateSignature::empty()); + let proof0 = ProofOfStore::new( + BatchInfoExt::from(batch.clone()), + AggregateSignature::empty(), + ); + let proof1 = ProofOfStore::new( + BatchInfoExt::from(batch.clone()), + AggregateSignature::empty(), + ); proof_manager.receive_proofs(vec![proof0.clone()]); proof_manager.receive_proofs(vec![proof1.clone()]); diff --git a/consensus/src/quorum_store/utils.rs b/consensus/src/quorum_store/utils.rs index 91e8d61d9e338..deba46b73bb01 100644 --- a/consensus/src/quorum_store/utils.rs +++ b/consensus/src/quorum_store/utils.rs @@ -4,7 +4,7 @@ use crate::monitor; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::BatchInfo, + proof_of_store::{BatchInfo, BatchInfoExt, TBatchInfo}, }; use aptos_logger::prelude::*; use aptos_mempool::{QuorumStoreRequest, QuorumStoreResponse}; @@ -154,7 +154,7 @@ pub struct BatchKey { } impl BatchKey { - pub fn from_info(info: &BatchInfo) -> Self { + pub fn from_info(info: &BatchInfoExt) -> Self { Self { author: info.author(), batch_id: info.batch_id(), @@ -169,7 +169,7 @@ pub struct BatchSortKey { } impl BatchSortKey { - pub fn from_info(info: &BatchInfo) -> Self { + pub fn from_info(info: &BatchInfoExt) -> Self { Self { batch_key: BatchKey::from_info(info), gas_bucket_start: info.gas_bucket_start(), diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index 53146a5a451b5..4e3647142db96 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -45,7 +45,7 @@ use aptos_consensus_types::{ order_vote::OrderVote, order_vote_msg::OrderVoteMsg, pipelined_block::PipelinedBlock, - proof_of_store::{BatchInfo, ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, + proof_of_store::{BatchInfoExt, ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, proposal_msg::ProposalMsg, quorum_cert::QuorumCert, round_timeout::{RoundTimeout, RoundTimeoutMsg, RoundTimeoutReason}, @@ -95,8 +95,8 @@ pub enum UnverifiedEvent { OrderVoteMsg(Box), SyncInfo(Box), BatchMsg(Box), - SignedBatchInfo(Box>), - ProofOfStoreMsg(Box>), + SignedBatchInfo(Box>), + ProofOfStoreMsg(Box>), OptProposalMsg(Box), } @@ -107,7 +107,7 @@ impl UnverifiedEvent { self, peer_id: PeerId, validator: &ValidatorVerifier, - proof_cache: &ProofCache, + proof_cache: &ProofCache, quorum_store_enabled: bool, self_message: bool, max_num_batches: usize, @@ -115,7 +115,6 @@ impl UnverifiedEvent { ) -> Result { let start_time = Instant::now(); Ok(match self { - //TODO: no need to sign and verify the proposal UnverifiedEvent::ProposalMsg(p) => { if !self_message { p.verify(peer_id, validator, proof_cache, quorum_store_enabled)?; @@ -161,7 +160,6 @@ impl UnverifiedEvent { } VerifiedEvent::OrderVoteMsg(v) }, - // sync info verification is on-demand (verified when it's used) UnverifiedEvent::SyncInfo(s) => VerifiedEvent::UnverifiedSyncInfo(s), UnverifiedEvent::BatchMsg(b) => { if !self_message { @@ -222,9 +220,15 @@ impl From for UnverifiedEvent { ConsensusMsg::OrderVoteMsg(m) => UnverifiedEvent::OrderVoteMsg(m), ConsensusMsg::SyncInfo(m) => UnverifiedEvent::SyncInfo(m), ConsensusMsg::BatchMsg(m) => UnverifiedEvent::BatchMsg(m), - ConsensusMsg::SignedBatchInfo(m) => UnverifiedEvent::SignedBatchInfo(m), - ConsensusMsg::ProofOfStoreMsg(m) => UnverifiedEvent::ProofOfStoreMsg(m), + ConsensusMsg::SignedBatchInfo(m) => { + UnverifiedEvent::SignedBatchInfo(Box::new((*m).into())) + }, + ConsensusMsg::ProofOfStoreMsg(m) => { + UnverifiedEvent::ProofOfStoreMsg(Box::new((*m).into())) + }, ConsensusMsg::RoundTimeoutMsg(m) => UnverifiedEvent::RoundTimeoutMsg(m), + ConsensusMsg::SignedBatchInfoMsgV2(m) => UnverifiedEvent::SignedBatchInfo(m), + ConsensusMsg::ProofOfStoreMsgV2(m) => UnverifiedEvent::ProofOfStoreMsg(m), _ => unreachable!("Unexpected conversion"), } } @@ -240,8 +244,8 @@ pub enum VerifiedEvent { OrderVoteMsg(Box), UnverifiedSyncInfo(Box), BatchMsg(Box), - SignedBatchInfo(Box>), - ProofOfStoreMsg(Box>), + SignedBatchInfo(Box>), + ProofOfStoreMsg(Box>), // local messages LocalTimeout(Round), // Shutdown the NetworkListener diff --git a/consensus/src/round_manager_tests/mod.rs b/consensus/src/round_manager_tests/mod.rs index 15b3d64229cc5..75ce34d760e22 100644 --- a/consensus/src/round_manager_tests/mod.rs +++ b/consensus/src/round_manager_tests/mod.rs @@ -43,7 +43,7 @@ use aptos_consensus_types::{ opt_proposal_msg::OptProposalMsg, order_vote_msg::OrderVoteMsg, pipeline::commit_decision::CommitDecision, - proof_of_store::BatchInfo, + proof_of_store::{BatchInfo, BatchInfoExt}, proposal_msg::ProposalMsg, round_timeout::RoundTimeoutMsg, utils::PayloadTxnsSize, @@ -734,7 +734,7 @@ impl NodeSetup { struct MockQuorumStoreCommitNotifier; impl TQuorumStoreCommitNotifier for MockQuorumStoreCommitNotifier { - fn notify(&self, _block_timestamp: u64, _batches: Vec) { + fn notify(&self, _block_timestamp: u64, _batches: Vec) { unimplemented!() } } From f966c6252e588d1cb98303b66368f6e19d32c864 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Fri, 7 Nov 2025 10:30:45 -0800 Subject: [PATCH 6/9] [qs] send proof v2 support with flag --- config/src/config/quorum_store_config.rs | 3 ++ consensus/src/network.rs | 19 +++++-- consensus/src/quorum_store/batch_store.rs | 2 +- .../src/quorum_store/proof_coordinator.rs | 54 ++++++++++++------- .../src/quorum_store/quorum_store_builder.rs | 1 + .../tests/batch_requester_test.rs | 11 +++- .../tests/proof_coordinator_test.rs | 2 + .../test_utils/mock_quorum_store_sender.rs | 17 +++++- 8 files changed, 80 insertions(+), 29 deletions(-) diff --git a/config/src/config/quorum_store_config.rs b/config/src/config/quorum_store_config.rs index 55ce7071acd2b..bae2f3a8fc4db 100644 --- a/config/src/config/quorum_store_config.rs +++ b/config/src/config/quorum_store_config.rs @@ -99,6 +99,8 @@ pub struct QuorumStoreConfig { pub enable_opt_quorum_store: bool, pub opt_qs_minimum_batch_age_usecs: u64, pub enable_payload_v2: bool, + /// Boolean flag that controls the usage of `BatchInfoExt::V1` + pub enable_proof_v2: bool, } impl Default for QuorumStoreConfig { @@ -140,6 +142,7 @@ impl Default for QuorumStoreConfig { enable_opt_quorum_store: true, opt_qs_minimum_batch_age_usecs: Duration::from_millis(50).as_micros() as u64, enable_payload_v2: false, + enable_proof_v2: false, } } } diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 214fc50a14ee2..12f691cc4a817 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -29,7 +29,7 @@ use aptos_consensus_types::{ order_vote_msg::OrderVoteMsg, pipeline::{commit_decision::CommitDecision, commit_vote::CommitVote}, proof_of_store::{ - BatchInfo, ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg, + BatchInfo, BatchInfoExt, ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg, }, proposal_msg::ProposalMsg, round_timeout::RoundTimeoutMsg, @@ -212,9 +212,14 @@ pub trait QuorumStoreSender: Send + Clone { async fn broadcast_proof_of_store_msg(&mut self, proof_of_stores: Vec>); + async fn broadcast_proof_of_store_msg_v2( + &mut self, + proof_of_stores: Vec>, + ); + async fn send_proof_of_store_msg_to_self( &mut self, - proof_of_stores: Vec>, + proof_of_stores: Vec>, ); } @@ -582,9 +587,15 @@ impl QuorumStoreSender for NetworkSender { self.broadcast(msg).await } - async fn send_proof_of_store_msg_to_self(&mut self, proofs: Vec>) { + async fn broadcast_proof_of_store_msg_v2(&mut self, proofs: Vec>) { fail_point!("consensus::send::proof_of_store", |_| ()); - let msg = ConsensusMsg::ProofOfStoreMsg(Box::new(ProofOfStoreMsg::new(proofs))); + let msg = ConsensusMsg::ProofOfStoreMsgV2(Box::new(ProofOfStoreMsg::new(proofs))); + self.broadcast(msg).await + } + + async fn send_proof_of_store_msg_to_self(&mut self, proofs: Vec>) { + fail_point!("consensus::send::proof_of_store", |_| ()); + let msg = ConsensusMsg::ProofOfStoreMsgV2(Box::new(ProofOfStoreMsg::new(proofs))); self.send(msg, vec![self.author]).await } } diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index 02364580a8b65..066d161c691db 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -12,7 +12,7 @@ use crate::{ }, }; use anyhow::bail; -use aptos_consensus_types::proof_of_store::{BatchInfo, SignedBatchInfo}; +use aptos_consensus_types::proof_of_store::{BatchInfo, BatchInfoExt, SignedBatchInfo}; use aptos_crypto::{CryptoMaterialError, HashValue}; use aptos_executor_types::{ExecutorError, ExecutorResult}; use aptos_infallible::Mutex; diff --git a/consensus/src/quorum_store/proof_coordinator.rs b/consensus/src/quorum_store/proof_coordinator.rs index 973b1fdb87f7e..cfac6e1ce725e 100644 --- a/consensus/src/quorum_store/proof_coordinator.rs +++ b/consensus/src/quorum_store/proof_coordinator.rs @@ -171,6 +171,7 @@ pub(crate) struct ProofCoordinator { proof_cache: ProofCache, broadcast_proofs: bool, batch_expiry_gap_when_init_usecs: u64, + enable_proof_v2_msg: bool, } //PoQS builder object - gather signed digest to form PoQS @@ -183,6 +184,7 @@ impl ProofCoordinator { proof_cache: ProofCache, broadcast_proofs: bool, batch_expiry_gap_when_init_usecs: u64, + enable_proof_v2_msg: bool, ) -> Self { Self { peer_id, @@ -195,6 +197,7 @@ impl ProofCoordinator { proof_cache, broadcast_proofs, batch_expiry_gap_when_init_usecs, + enable_proof_v2_msg, } } @@ -374,24 +377,24 @@ impl ProofCoordinator { let approx_created_ts_usecs = signed_batch_info .expiration() .saturating_sub(self.batch_expiry_gap_when_init_usecs); + let self_peer_id = self.peer_id; + let enable_broadcast_proofs = self.broadcast_proofs; + let enable_proof_v2_msg = self.enable_proof_v2_msg; - let mut proofs = vec![]; - for signed_batch_info in signed_batch_infos.into_iter() { + let mut proofs_iter = signed_batch_infos.into_iter().filter_map(|signed_batch_info| { let peer_id = signed_batch_info.signer(); let digest = *signed_batch_info.digest(); let batch_id = signed_batch_info.batch_id(); match self.add_signature(signed_batch_info, &validator_verifier) { - Ok(result) => { - if let Some(proof) = result { - debug!( - LogSchema::new(LogEvent::ProofOfStoreReady), - digest = digest, - batch_id = batch_id.id, - ); - let (info, sig) = proof.unpack(); - proofs.push(ProofOfStore::new(info.info().clone(), sig)); - } + Ok(Some(proof)) => { + debug!( + LogSchema::new(LogEvent::ProofOfStoreReady), + digest = digest, + batch_id = batch_id.id, + ); + Some(proof) }, + Ok(None) => None, Err(e) => { // Can happen if we already garbage collected, the commit notification is late, or the peer is misbehaving. if peer_id == self.peer_id { @@ -399,20 +402,31 @@ impl ProofCoordinator { } else { debug!("QS: could not add signature from peer {}, digest = {}, batch_id = {}, err = {:?}", peer_id, digest, batch_id, e); } + None }, } - } - if let Some(value) = self.batch_info_to_proof.get_mut(&info) { - value.observe_voting_pct(approx_created_ts_usecs, &validator_verifier); - } - if !proofs.is_empty() { - observe_batch(approx_created_ts_usecs, self.peer_id, BatchStage::POS_FORMED); - if self.broadcast_proofs { - network_sender.broadcast_proof_of_store_msg(proofs).await; + }).peekable(); + if proofs_iter.peek().is_some() { + observe_batch(approx_created_ts_usecs, self_peer_id, BatchStage::POS_FORMED); + if enable_broadcast_proofs { + if enable_proof_v2_msg { + let proofs: Vec<_> = proofs_iter.collect(); + network_sender.broadcast_proof_of_store_msg_v2(proofs).await; + } else { + let proofs: Vec<_> = proofs_iter.map(|proof| { + let (info, sig) = proof.unpack(); + ProofOfStore::new(info.info().clone(), sig) + }).collect(); + network_sender.broadcast_proof_of_store_msg(proofs).await; + } } else { + let proofs: Vec<_> = proofs_iter.collect(); network_sender.send_proof_of_store_msg_to_self(proofs).await; } } + if let Some(value) = self.batch_info_to_proof.get_mut(&info) { + value.observe_voting_pct(approx_created_ts_usecs, &validator_verifier); + } }, } }), diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 4b6a040e6f4f5..88f6264fc2228 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -351,6 +351,7 @@ impl InnerBuilder { self.proof_cache, self.broadcast_proofs, self.config.batch_expiry_gap_when_init_usecs, + self.config.enable_proof_v2, ); spawn_named!( "proof_coordinator", diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index 01476e90eec31..0092dc7c8ff99 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -11,7 +11,7 @@ use crate::{ }; use aptos_consensus_types::{ common::Author, - proof_of_store::{BatchInfo, ProofOfStore, SignedBatchInfo}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore, SignedBatchInfo}, }; use aptos_crypto::HashValue; use aptos_infallible::Mutex; @@ -73,9 +73,16 @@ impl QuorumStoreSender for MockBatchRequester { unimplemented!() } + async fn broadcast_proof_of_store_msg_v2( + &mut self, + _proof_of_stores: Vec>, + ) { + unimplemented!() + } + async fn send_proof_of_store_msg_to_self( &mut self, - _proof_of_stores: Vec>, + _proof_of_stores: Vec>, ) { unimplemented!() } diff --git a/consensus/src/quorum_store/tests/proof_coordinator_test.rs b/consensus/src/quorum_store/tests/proof_coordinator_test.rs index 509875a4dd56b..cb698664789ec 100644 --- a/consensus/src/quorum_store/tests/proof_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/proof_coordinator_test.rs @@ -60,6 +60,7 @@ async fn test_proof_coordinator_basic() { proof_cache.clone(), true, 10, + false, ); let (proof_coordinator_tx, proof_coordinator_rx) = channel(100); let (tx, mut rx) = channel(100); @@ -113,6 +114,7 @@ async fn test_proof_coordinator_with_unverified_signatures() { proof_cache.clone(), true, 10, + false, ); let (proof_coordinator_tx, proof_coordinator_rx) = channel(100); let (tx, mut rx) = channel(100); diff --git a/consensus/src/test_utils/mock_quorum_store_sender.rs b/consensus/src/test_utils/mock_quorum_store_sender.rs index f7a7ddd5dff7d..295eb77963f9f 100644 --- a/consensus/src/test_utils/mock_quorum_store_sender.rs +++ b/consensus/src/test_utils/mock_quorum_store_sender.rs @@ -9,7 +9,7 @@ use crate::{ use aptos_consensus_types::{ common::Author, proof_of_store::{ - BatchInfo, ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg, + BatchInfo, BatchInfoExt, ProofOfStore, ProofOfStoreMsg, SignedBatchInfo, SignedBatchInfoMsg, }, }; use std::time::Duration; @@ -72,8 +72,21 @@ impl QuorumStoreSender for MockQuorumStoreSender { async fn send_proof_of_store_msg_to_self( &mut self, - _proof_of_stores: Vec>, + _proof_of_stores: Vec>, ) { unimplemented!() } + + async fn broadcast_proof_of_store_msg_v2( + &mut self, + proof_of_stores: Vec>, + ) { + self.tx + .send(( + ConsensusMsg::ProofOfStoreMsgV2(Box::new(ProofOfStoreMsg::new(proof_of_stores))), + vec![], + )) + .await + .expect("We should be able to send the proof of store message"); + } } From 75caee4095c326878f71ded29595b104709fe7cc Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Fri, 7 Nov 2025 12:33:45 -0800 Subject: [PATCH 7/9] [qs] support signing BatchInfoExt behind flag --- consensus/src/network.rs | 18 ++++++ .../src/quorum_store/batch_coordinator.rs | 31 +++++++--- consensus/src/quorum_store/batch_store.rs | 60 ++++++++++++++++--- .../src/quorum_store/quorum_store_builder.rs | 1 + .../tests/batch_coordinator_test.rs | 1 + .../tests/batch_generator_test.rs | 7 +++ .../tests/batch_requester_test.rs | 8 +++ .../test_utils/mock_quorum_store_sender.rs | 16 +++++ types/src/transaction/mod.rs | 8 +++ 9 files changed, 135 insertions(+), 15 deletions(-) diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 12f691cc4a817..123cd6d550fca 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -208,6 +208,12 @@ pub trait QuorumStoreSender: Send + Clone { recipients: Vec, ); + async fn send_signed_batch_info_msg_v2( + &self, + signed_batch_infos: Vec>, + recipients: Vec, + ); + async fn broadcast_batch_msg(&mut self, batches: Vec); async fn broadcast_proof_of_store_msg(&mut self, proof_of_stores: Vec>); @@ -575,6 +581,18 @@ impl QuorumStoreSender for NetworkSender { self.send(msg, recipients).await } + async fn send_signed_batch_info_msg_v2( + &self, + signed_batch_infos: Vec>, + recipients: Vec, + ) { + fail_point!("consensus::send::signed_batch_info", |_| ()); + let msg = ConsensusMsg::SignedBatchInfoMsgV2(Box::new(SignedBatchInfoMsg::new( + signed_batch_infos, + ))); + self.send(msg, recipients).await + } + async fn broadcast_batch_msg(&mut self, batches: Vec) { fail_point!("consensus::send::broadcast_batch", |_| ()); let msg = ConsensusMsg::BatchMsg(Box::new(BatchMsg::new(batches))); diff --git a/consensus/src/quorum_store/batch_coordinator.rs b/consensus/src/quorum_store/batch_coordinator.rs index 0486d6f0be479..50cc928ffcb3a 100644 --- a/consensus/src/quorum_store/batch_coordinator.rs +++ b/consensus/src/quorum_store/batch_coordinator.rs @@ -44,6 +44,7 @@ pub struct BatchCoordinator { max_total_bytes: u64, batch_expiry_gap_when_init_usecs: u64, transaction_filter_config: BatchTransactionFilterConfig, + enable_proof_v2: bool, } impl BatchCoordinator { @@ -59,6 +60,7 @@ impl BatchCoordinator { max_total_bytes: u64, batch_expiry_gap_when_init_usecs: u64, transaction_filter_config: BatchTransactionFilterConfig, + enable_proof_v2: bool, ) -> Self { Self { my_peer_id, @@ -72,6 +74,7 @@ impl BatchCoordinator { max_total_bytes, batch_expiry_gap_when_init_usecs, transaction_filter_config, + enable_proof_v2, } } @@ -87,6 +90,7 @@ impl BatchCoordinator { let batch_store = self.batch_store.clone(); let network_sender = self.network_sender.clone(); let sender_to_proof_manager = self.sender_to_proof_manager.clone(); + let enable_proof_v2 = self.enable_proof_v2; tokio::spawn(async move { let peer_id = persist_requests[0].author(); let batches = persist_requests @@ -98,14 +102,27 @@ impl BatchCoordinator { ) }) .collect(); - let signed_batch_infos = batch_store.persist(persist_requests); - if !signed_batch_infos.is_empty() { - if approx_created_ts_usecs > 0 { - observe_batch(approx_created_ts_usecs, peer_id, BatchStage::SIGNED); + + if enable_proof_v2 { + let signed_batch_infos = batch_store.persist_v2(persist_requests); + if !signed_batch_infos.is_empty() { + if approx_created_ts_usecs > 0 { + observe_batch(approx_created_ts_usecs, peer_id, BatchStage::SIGNED); + } + network_sender + .send_signed_batch_info_msg_v2(signed_batch_infos, vec![peer_id]) + .await; + } + } else { + let signed_batch_infos = batch_store.persist(persist_requests); + if !signed_batch_infos.is_empty() { + if approx_created_ts_usecs > 0 { + observe_batch(approx_created_ts_usecs, peer_id, BatchStage::SIGNED); + } + network_sender + .send_signed_batch_info_msg(signed_batch_infos, vec![peer_id]) + .await; } - network_sender - .send_signed_batch_info_msg(signed_batch_infos, vec![peer_id]) - .await; } let _ = sender_to_proof_manager .send(ProofManagerCommand::ReceiveBatches(batches)) diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index 066d161c691db..b90a6c03a6f69 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -12,7 +12,9 @@ use crate::{ }, }; use anyhow::bail; -use aptos_consensus_types::proof_of_store::{BatchInfo, BatchInfoExt, SignedBatchInfo}; +use aptos_consensus_types::proof_of_store::{ + BatchInfo, BatchInfoExt, BatchKind, ExtraBatchInfo, SignedBatchInfo, TBatchInfo, +}; use aptos_crypto::{CryptoMaterialError, HashValue}; use aptos_executor_types::{ExecutorError, ExecutorResult}; use aptos_infallible::Mutex; @@ -26,6 +28,7 @@ use fail::fail_point; use futures::{future::Shared, FutureExt}; use once_cell::sync::OnceCell; use std::{ + any::TypeId, collections::{BTreeSet, HashMap}, future::Future, pin::Pin, @@ -378,10 +381,10 @@ impl BatchStore { ret } - fn generate_signed_batch_info( + fn generate_signed_batch_info( &self, - batch_info: BatchInfo, - ) -> Result, CryptoMaterialError> { + batch_info: T, + ) -> Result, CryptoMaterialError> { fail_point!("quorum_store::create_invalid_signed_batch_info", |_| { Ok(SignedBatchInfo::new_with_signature( batch_info.clone(), @@ -392,10 +395,17 @@ impl BatchStore { SignedBatchInfo::new(batch_info, &self.validator_signer) } - fn persist_inner(&self, persist_request: PersistedValue) -> Option> { + fn persist_inner( + &self, + batch_info: T, + persist_request: PersistedValue, + ) -> Option> { + assert!( + batch_info.as_batch_info() == persist_request.batch_info(), + "Provided batch info doesn't match persist request batch info" + ); match self.save(&persist_request) { Ok(needs_db) => { - let batch_info = persist_request.batch_info().clone(); trace!("QS: sign digest {}", persist_request.digest()); if needs_db { #[allow(clippy::unwrap_in_result)] @@ -405,7 +415,6 @@ impl BatchStore { } self.generate_signed_batch_info(batch_info).ok() }, - Err(e) => { debug!("QS: failed to store to cache {:?}", e); None @@ -486,7 +495,37 @@ impl BatchWriter for BatchStore { fn persist(&self, persist_requests: Vec) -> Vec> { let mut signed_infos = vec![]; for persist_request in persist_requests.into_iter() { - if let Some(signed_info) = self.persist_inner(persist_request.clone()) { + let batch_info = persist_request.batch_info().clone(); + if let Some(signed_info) = self.persist_inner(batch_info, persist_request.clone()) { + self.notify_subscribers(persist_request); + signed_infos.push(signed_info); + } + } + signed_infos + } + + fn persist_v2( + &self, + persist_requests: Vec, + ) -> Vec> { + let mut signed_infos = vec![]; + for persist_request in persist_requests.into_iter() { + let is_encrypted_batch = persist_request + .payload() + .as_ref() + .expect("Payload must be available for persistence") + .iter() + .any(|txn| txn.is_encrypted()); + let batch_kind = if is_encrypted_batch { + BatchKind::Encrypted + } else { + BatchKind::Normal + }; + let batch_info = BatchInfoExt::V2 { + info: persist_request.batch_info().clone(), + extra: ExtraBatchInfo { batch_kind }, + }; + if let Some(signed_info) = self.persist_inner(batch_info, persist_request.clone()) { self.notify_subscribers(persist_request); signed_infos.push(signed_info); } @@ -611,4 +650,9 @@ impl BatchReader for Batch pub trait BatchWriter: Send + Sync { fn persist(&self, persist_requests: Vec) -> Vec>; + + fn persist_v2( + &self, + persist_requests: Vec, + ) -> Vec>; } diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index 88f6264fc2228..dcb654fd4620f 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -333,6 +333,7 @@ impl InnerBuilder { self.config.receiver_max_total_bytes as u64, self.config.batch_expiry_gap_when_init_usecs, self.transaction_filter_config.clone(), + self.config.enable_proof_v2, ); #[allow(unused_variables)] let name = format!("batch_coordinator-{}", i); diff --git a/consensus/src/quorum_store/tests/batch_coordinator_test.rs b/consensus/src/quorum_store/tests/batch_coordinator_test.rs index 999e689c8cf39..90b97872f9937 100644 --- a/consensus/src/quorum_store/tests/batch_coordinator_test.rs +++ b/consensus/src/quorum_store/tests/batch_coordinator_test.rs @@ -142,6 +142,7 @@ fn create_batch_coordinator( 10_000, 10_000, transaction_filter_config, + false, ) } diff --git a/consensus/src/quorum_store/tests/batch_generator_test.rs b/consensus/src/quorum_store/tests/batch_generator_test.rs index a5169d0867926..758eba20368b3 100644 --- a/consensus/src/quorum_store/tests/batch_generator_test.rs +++ b/consensus/src/quorum_store/tests/batch_generator_test.rs @@ -38,6 +38,13 @@ impl BatchWriter for MockBatchWriter { fn persist(&self, _persist_requests: Vec) -> Vec> { vec![] } + + fn persist_v2( + &self, + persist_requests: Vec, + ) -> Vec> { + vec![] + } } #[allow(clippy::needless_collect)] diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index 0092dc7c8ff99..4441e5eeadb1e 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -62,6 +62,14 @@ impl QuorumStoreSender for MockBatchRequester { unimplemented!() } + async fn send_signed_batch_info_msg_v2( + &self, + _signed_batch_infos: Vec>, + _recipients: Vec, + ) { + unimplemented!() + } + async fn broadcast_batch_msg(&mut self, _batches: Vec) { unimplemented!() } diff --git a/consensus/src/test_utils/mock_quorum_store_sender.rs b/consensus/src/test_utils/mock_quorum_store_sender.rs index 295eb77963f9f..f33ef98ed688a 100644 --- a/consensus/src/test_utils/mock_quorum_store_sender.rs +++ b/consensus/src/test_utils/mock_quorum_store_sender.rs @@ -53,6 +53,22 @@ impl QuorumStoreSender for MockQuorumStoreSender { .expect("could not send"); } + async fn send_signed_batch_info_msg_v2( + &self, + signed_batch_infos: Vec>, + recipients: Vec, + ) { + self.tx + .send(( + ConsensusMsg::SignedBatchInfoMsgV2(Box::new(SignedBatchInfoMsg::new( + signed_batch_infos, + ))), + recipients, + )) + .await + .expect("could not send"); + } + async fn broadcast_batch_msg(&mut self, _batches: Vec) { unimplemented!() } diff --git a/types/src/transaction/mod.rs b/types/src/transaction/mod.rs index 9cea038278718..5ff3e71309689 100644 --- a/types/src/transaction/mod.rs +++ b/types/src/transaction/mod.rs @@ -940,6 +940,10 @@ impl TransactionPayload { extra_config, }) } + + pub fn is_encrypted(&self) -> bool { + matches!(self, Self::EncryptedPayload(_)) + } } impl TransactionExtraConfig { @@ -1296,6 +1300,10 @@ impl SignedTransaction { pub fn replay_protector(&self) -> ReplayProtector { self.raw_txn.replay_protector() } + + pub fn is_encrypted(&self) -> bool { + self.payload().is_encrypted() + } } #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] From ddb70b2b3f318d228f90dc2af6d078165e1428ac Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Fri, 7 Nov 2025 15:20:52 -0800 Subject: [PATCH 8/9] [qs] Generic Batch struct and new DB schema for Batch with Ext --- consensus/src/network.rs | 8 +-- consensus/src/network_interface.rs | 6 +- .../src/quorum_store/batch_coordinator.rs | 14 ++-- consensus/src/quorum_store/batch_generator.rs | 12 ++-- consensus/src/quorum_store/batch_requester.rs | 3 +- consensus/src/quorum_store/batch_store.rs | 37 ++++++---- .../src/quorum_store/quorum_store_builder.rs | 2 +- consensus/src/quorum_store/quorum_store_db.rs | 70 ++++++++++++++++--- consensus/src/quorum_store/schema.rs | 36 +++++++++- .../tests/batch_generator_test.rs | 7 +- .../tests/batch_requester_test.rs | 8 +-- .../quorum_store/tests/batch_store_test.rs | 4 +- .../tests/quorum_store_db_test.rs | 7 +- consensus/src/quorum_store/types.rs | 58 ++++++++------- consensus/src/round_manager.rs | 6 +- .../test_utils/mock_quorum_store_sender.rs | 4 +- consensus/src/util/db_tool.rs | 6 +- 17 files changed, 196 insertions(+), 92 deletions(-) diff --git a/consensus/src/network.rs b/consensus/src/network.rs index 123cd6d550fca..278ba5b4f927b 100644 --- a/consensus/src/network.rs +++ b/consensus/src/network.rs @@ -200,7 +200,7 @@ pub trait QuorumStoreSender: Send + Clone { request: BatchRequest, recipient: Author, timeout: Duration, - ) -> anyhow::Result; + ) -> anyhow::Result>; async fn send_signed_batch_info_msg( &self, @@ -214,7 +214,7 @@ pub trait QuorumStoreSender: Send + Clone { recipients: Vec, ); - async fn broadcast_batch_msg(&mut self, batches: Vec); + async fn broadcast_batch_msg(&mut self, batches: Vec>); async fn broadcast_proof_of_store_msg(&mut self, proof_of_stores: Vec>); @@ -548,7 +548,7 @@ impl QuorumStoreSender for NetworkSender { request: BatchRequest, recipient: Author, timeout: Duration, - ) -> anyhow::Result { + ) -> anyhow::Result> { fail_point!("consensus::send::request_batch", |_| Err(anyhow!("failed"))); let request_digest = request.digest(); let msg = ConsensusMsg::BatchRequestMsg(Box::new(request)); @@ -593,7 +593,7 @@ impl QuorumStoreSender for NetworkSender { self.send(msg, recipients).await } - async fn broadcast_batch_msg(&mut self, batches: Vec) { + async fn broadcast_batch_msg(&mut self, batches: Vec>) { fail_point!("consensus::send::broadcast_batch", |_| ()); let msg = ConsensusMsg::BatchMsg(Box::new(BatchMsg::new(batches))); self.broadcast(msg).await diff --git a/consensus/src/network_interface.rs b/consensus/src/network_interface.rs index 3591016aa6fe4..7ff44c824e432 100644 --- a/consensus/src/network_interface.rs +++ b/consensus/src/network_interface.rs @@ -64,11 +64,11 @@ pub enum ConsensusMsg { /// it can save slow machines to quickly confirm the execution result. CommitDecisionMsg(Box), /// Quorum Store: Send a Batch of transactions. - BatchMsg(Box), + BatchMsg(Box>), /// Quorum Store: Request the payloads of a completed batch. BatchRequestMsg(Box), /// Quorum Store: Response to the batch request. - BatchResponse(Box), + BatchResponse(Box>), /// Quorum Store: Send a signed batch digest. This is a vote for the batch and a promise that /// the batch of transactions was received and will be persisted until batch expiration. SignedBatchInfo(Box>), @@ -81,7 +81,7 @@ pub enum ConsensusMsg { /// Randomness generation message RandGenMessage(RandGenMessage), /// Quorum Store: Response to the batch request. - BatchResponseV2(Box), + BatchResponseV2(Box>), /// OrderVoteMsg is the struct that is broadcasted by a validator on receiving quorum certificate /// on a block. OrderVoteMsg(Box), diff --git a/consensus/src/quorum_store/batch_coordinator.rs b/consensus/src/quorum_store/batch_coordinator.rs index 50cc928ffcb3a..52eb2bdba65b2 100644 --- a/consensus/src/quorum_store/batch_coordinator.rs +++ b/consensus/src/quorum_store/batch_coordinator.rs @@ -15,7 +15,7 @@ use crate::{ }; use anyhow::ensure; use aptos_config::config::BatchTransactionFilterConfig; -use aptos_consensus_types::payload::TDataInfo; +use aptos_consensus_types::{payload::TDataInfo, proof_of_store::BatchInfo}; use aptos_logger::prelude::*; use aptos_short_hex_str::AsShortHexStr; use aptos_types::PeerId; @@ -28,7 +28,7 @@ use tokio::sync::{ #[derive(Debug)] pub enum BatchCoordinatorCommand { Shutdown(oneshot::Sender<()>), - NewBatches(PeerId, Vec), + NewBatches(PeerId, Vec>), } /// The `BatchCoordinator` is responsible for coordinating the receipt and persistence of batches. @@ -80,7 +80,7 @@ impl BatchCoordinator { fn persist_and_send_digests( &self, - persist_requests: Vec, + persist_requests: Vec>, approx_created_ts_usecs: u64, ) { if persist_requests.is_empty() { @@ -130,7 +130,7 @@ impl BatchCoordinator { }); } - fn ensure_max_limits(&self, batches: &[Batch]) -> anyhow::Result<()> { + fn ensure_max_limits(&self, batches: &[Batch]) -> anyhow::Result<()> { let mut total_txns = 0; let mut total_bytes = 0; for batch in batches.iter() { @@ -166,7 +166,11 @@ impl BatchCoordinator { Ok(()) } - pub(crate) async fn handle_batches_msg(&mut self, author: PeerId, batches: Vec) { + pub(crate) async fn handle_batches_msg( + &mut self, + author: PeerId, + batches: Vec>, + ) { if let Err(e) = self.ensure_max_limits(&batches) { error!("Batch from {}: {}", author, e); counters::RECEIVED_BATCH_MAX_LIMIT_FAILED.inc(); diff --git a/consensus/src/quorum_store/batch_generator.rs b/consensus/src/quorum_store/batch_generator.rs index 4d3765f755049..2eae2a6b15cb7 100644 --- a/consensus/src/quorum_store/batch_generator.rs +++ b/consensus/src/quorum_store/batch_generator.rs @@ -14,7 +14,7 @@ use crate::{ use aptos_config::config::QuorumStoreConfig; use aptos_consensus_types::{ common::{TransactionInProgress, TransactionSummary}, - proof_of_store::{BatchInfoExt, TBatchInfo}, + proof_of_store::{BatchInfo, BatchInfoExt, TBatchInfo}, }; use aptos_experimental_runtimes::thread_manager::optimal_min_len; use aptos_logger::prelude::*; @@ -33,7 +33,7 @@ use tokio::time::Interval; pub enum BatchGeneratorCommand { CommitNotification(u64, Vec), ProofExpiration(Vec), - RemoteBatch(Batch), + RemoteBatch(Batch), Shutdown(tokio::sync::oneshot::Sender<()>), } @@ -175,7 +175,7 @@ impl BatchGenerator { txns: Vec, expiry_time: u64, bucket_start: u64, - ) -> Batch { + ) -> Batch { let batch_id = self.batch_id; self.batch_id.increment(); self.db @@ -201,7 +201,7 @@ impl BatchGenerator { /// batches are pushed. fn push_bucket_to_batches( &mut self, - batches: &mut Vec, + batches: &mut Vec>, txns: &mut Vec, num_txns_in_bucket: usize, expiry_time: u64, @@ -242,7 +242,7 @@ impl BatchGenerator { &mut self, pulled_txns: &mut Vec, expiry_time: u64, - ) -> Vec { + ) -> Vec> { // Sort by gas, in descending order. This is a stable sort on existing mempool ordering, // so will not reorder accounts or their sequence numbers as long as they have the same gas. pulled_txns.sort_by_key(|txn| u64::MAX - txn.gas_unit_price()); @@ -325,7 +325,7 @@ impl BatchGenerator { self.txns_in_progress_sorted.len() } - pub(crate) async fn handle_scheduled_pull(&mut self, max_count: u64) -> Vec { + pub(crate) async fn handle_scheduled_pull(&mut self, max_count: u64) -> Vec> { counters::BATCH_PULL_EXCLUDED_TXNS.observe(self.txns_in_progress_sorted.len() as f64); trace!( "QS: excluding txs len: {:?}", diff --git a/consensus/src/quorum_store/batch_requester.rs b/consensus/src/quorum_store/batch_requester.rs index f3391cb541a68..c943fd292b89c 100644 --- a/consensus/src/quorum_store/batch_requester.rs +++ b/consensus/src/quorum_store/batch_requester.rs @@ -9,6 +9,7 @@ use crate::{ types::{BatchRequest, BatchResponse, PersistedValue}, }, }; +use aptos_consensus_types::proof_of_store::BatchInfo; use aptos_crypto::HashValue; use aptos_executor_types::*; use aptos_infallible::Mutex; @@ -102,7 +103,7 @@ impl BatchRequester { digest: HashValue, expiration: u64, responders: Arc>>, - mut subscriber_rx: oneshot::Receiver, + mut subscriber_rx: oneshot::Receiver>, ) -> ExecutorResult> { let validator_verifier = self.validator_verifier.clone(); let mut request_state = BatchRequesterState::new(responders, self.retry_limit); diff --git a/consensus/src/quorum_store/batch_store.rs b/consensus/src/quorum_store/batch_store.rs index b90a6c03a6f69..2c978a2b192e6 100644 --- a/consensus/src/quorum_store/batch_store.rs +++ b/consensus/src/quorum_store/batch_store.rs @@ -116,7 +116,7 @@ impl QuotaManager { pub struct BatchStore { epoch: OnceCell, last_certified_time: AtomicU64, - db_cache: DashMap, + db_cache: DashMap>, peer_quota: DashMap, expirations: Mutex>, db: Arc, @@ -124,7 +124,7 @@ pub struct BatchStore { db_quota: usize, batch_quota: usize, validator_signer: ValidatorSigner, - persist_subscribers: DashMap>>, + persist_subscribers: DashMap>>>, expiration_buffer_usecs: u64, } @@ -252,7 +252,7 @@ impl BatchStore { *self.epoch.get().expect("Epoch should always be set") } - fn free_quota(&self, value: PersistedValue) { + fn free_quota(&self, value: PersistedValue) { let mut quota_manager = self .peer_quota .get_mut(&value.author()) @@ -268,7 +268,10 @@ impl BatchStore { // Note: holds db_cache entry lock (due to DashMap), while accessing peer_quota // DashMap. Hence, peer_quota reference should never be held while accessing the // db_cache to avoid the deadlock (if needed, order is db_cache, then peer_quota). - pub(crate) fn insert_to_cache(&self, value: &PersistedValue) -> anyhow::Result { + pub(crate) fn insert_to_cache( + &self, + value: &PersistedValue, + ) -> anyhow::Result { let digest = *value.digest(); let author = value.author(); let expiration_time = value.expiration(); @@ -326,7 +329,7 @@ impl BatchStore { Ok(true) } - pub(crate) fn save(&self, value: &PersistedValue) -> anyhow::Result { + pub(crate) fn save(&self, value: &PersistedValue) -> anyhow::Result { let last_certified_time = self.last_certified_time(); if value.expiration() > last_certified_time { fail_point!("quorum_store::save", |_| { @@ -398,7 +401,7 @@ impl BatchStore { fn persist_inner( &self, batch_info: T, - persist_request: PersistedValue, + persist_request: PersistedValue, ) -> Option> { assert!( batch_info.as_batch_info() == persist_request.batch_info(), @@ -437,7 +440,7 @@ impl BatchStore { self.last_certified_time.load(Ordering::Relaxed) } - fn get_batch_from_db(&self, digest: &HashValue) -> ExecutorResult { + fn get_batch_from_db(&self, digest: &HashValue) -> ExecutorResult> { counters::GET_BATCH_FROM_DB_COUNT.inc(); match self.db.get_batch(digest) { @@ -452,7 +455,7 @@ impl BatchStore { pub(crate) fn get_batch_from_local( &self, digest: &HashValue, - ) -> ExecutorResult { + ) -> ExecutorResult> { if let Some(value) = self.db_cache.get(digest) { if value.payload_storage_mode() == StorageMode::PersistedOnly { self.get_batch_from_db(digest) @@ -469,7 +472,7 @@ impl BatchStore { /// This can be useful in cases where there are multiple flows to add a batch (like /// direct from author batch / batch requester fetch) to the batch store and either /// flow needs to subscribe to the other. - fn subscribe(&self, digest: HashValue) -> oneshot::Receiver { + fn subscribe(&self, digest: HashValue) -> oneshot::Receiver> { let (tx, rx) = oneshot::channel(); self.persist_subscribers.entry(digest).or_default().push(tx); @@ -482,7 +485,7 @@ impl BatchStore { rx } - fn notify_subscribers(&self, value: PersistedValue) { + fn notify_subscribers(&self, value: PersistedValue) { if let Some((_, subscribers)) = self.persist_subscribers.remove(value.digest()) { for subscriber in subscribers { subscriber.send(value.clone()).ok(); @@ -492,7 +495,10 @@ impl BatchStore { } impl BatchWriter for BatchStore { - fn persist(&self, persist_requests: Vec) -> Vec> { + fn persist( + &self, + persist_requests: Vec>, + ) -> Vec> { let mut signed_infos = vec![]; for persist_request in persist_requests.into_iter() { let batch_info = persist_request.batch_info().clone(); @@ -506,7 +512,7 @@ impl BatchWriter for BatchStore { fn persist_v2( &self, - persist_requests: Vec, + persist_requests: Vec>, ) -> Vec> { let mut signed_infos = vec![]; for persist_request in persist_requests.into_iter() { @@ -649,10 +655,13 @@ impl BatchReader for Batch } pub trait BatchWriter: Send + Sync { - fn persist(&self, persist_requests: Vec) -> Vec>; + fn persist( + &self, + persist_requests: Vec>, + ) -> Vec>; fn persist_v2( &self, - persist_requests: Vec, + persist_requests: Vec>, ) -> Vec>; } diff --git a/consensus/src/quorum_store/quorum_store_builder.rs b/consensus/src/quorum_store/quorum_store_builder.rs index dcb654fd4620f..b0b895388a133 100644 --- a/consensus/src/quorum_store/quorum_store_builder.rs +++ b/consensus/src/quorum_store/quorum_store_builder.rs @@ -410,7 +410,7 @@ impl InnerBuilder { let response = if let Ok(value) = batch_store.get_batch_from_local(&rpc_request.req.digest()) { - let batch: Batch = value.try_into().unwrap(); + let batch: Batch = value.try_into().unwrap(); BatchResponse::Batch(batch) } else { match aptos_db_clone.get_latest_ledger_info() { diff --git a/consensus/src/quorum_store/quorum_store_db.rs b/consensus/src/quorum_store/quorum_store_db.rs index 38c81a5c3dee5..8175a9edaf6e1 100644 --- a/consensus/src/quorum_store/quorum_store_db.rs +++ b/consensus/src/quorum_store/quorum_store_db.rs @@ -4,11 +4,12 @@ use crate::{ error::DbError, quorum_store::{ - schema::{BatchIdSchema, BatchSchema, BATCH_CF_NAME, BATCH_ID_CF_NAME}, + schema::{BatchIdSchema, BatchSchema, BatchV2Schema, BATCH_CF_NAME, BATCH_ID_CF_NAME}, types::PersistedValue, }, }; use anyhow::Result; +use aptos_consensus_types::proof_of_store::{BatchInfo, BatchInfoExt, TBatchInfo}; use aptos_crypto::HashValue; use aptos_logger::prelude::*; use aptos_schemadb::{ @@ -22,11 +23,20 @@ use std::{collections::HashMap, path::Path, time::Instant}; pub trait QuorumStoreStorage: Sync + Send { fn delete_batches(&self, digests: Vec) -> Result<(), DbError>; - fn get_all_batches(&self) -> Result>; + fn get_all_batches(&self) -> Result>>; - fn save_batch(&self, batch: PersistedValue) -> Result<(), DbError>; + fn save_batch(&self, batch: PersistedValue) -> Result<(), DbError>; - fn get_batch(&self, digest: &HashValue) -> Result, DbError>; + fn get_batch(&self, digest: &HashValue) -> Result>, DbError>; + + fn get_all_batches_v2(&self) -> Result>>; + + fn save_batch_v2(&self, batch: PersistedValue) -> Result<(), DbError>; + + fn get_batch_v2( + &self, + digest: &HashValue, + ) -> Result>, DbError>; fn delete_batch_id(&self, epoch: u64) -> Result<(), DbError>; @@ -85,14 +95,14 @@ impl QuorumStoreStorage for QuorumStoreDB { Ok(()) } - fn get_all_batches(&self) -> Result> { + fn get_all_batches(&self) -> Result>> { let mut iter = self.db.iter::()?; iter.seek_to_first(); iter.map(|res| res.map_err(Into::into)) - .collect::>>() + .collect::>>>() } - fn save_batch(&self, batch: PersistedValue) -> Result<(), DbError> { + fn save_batch(&self, batch: PersistedValue) -> Result<(), DbError> { trace!( "QS: db persists digest {} expiration {:?}", batch.digest(), @@ -101,10 +111,33 @@ impl QuorumStoreStorage for QuorumStoreDB { self.put::(batch.digest(), &batch) } - fn get_batch(&self, digest: &HashValue) -> Result, DbError> { + fn get_batch(&self, digest: &HashValue) -> Result>, DbError> { Ok(self.db.get::(digest)?) } + fn get_all_batches_v2(&self) -> Result>> { + let mut iter = self.db.iter::()?; + iter.seek_to_first(); + iter.map(|res| res.map_err(Into::into)) + .collect::>>>() + } + + fn save_batch_v2(&self, batch: PersistedValue) -> Result<(), DbError> { + trace!( + "QS: db persists digest {} expiration {:?}", + batch.digest(), + batch.expiration() + ); + self.put::(batch.digest(), &batch) + } + + fn get_batch_v2( + &self, + digest: &HashValue, + ) -> Result>, DbError> { + Ok(self.db.get::(digest)?) + } + fn delete_batch_id(&self, epoch: u64) -> Result<(), DbError> { let mut batch = SchemaBatch::new(); batch.delete::(&epoch)?; @@ -160,15 +193,15 @@ pub mod mock { Ok(()) } - fn get_all_batches(&self) -> Result> { + fn get_all_batches(&self) -> Result>> { Ok(HashMap::new()) } - fn save_batch(&self, _: PersistedValue) -> Result<(), DbError> { + fn save_batch(&self, _: PersistedValue) -> Result<(), DbError> { Ok(()) } - fn get_batch(&self, _: &HashValue) -> Result, DbError> { + fn get_batch(&self, _: &HashValue) -> Result>, DbError> { Ok(None) } @@ -183,5 +216,20 @@ pub mod mock { fn save_batch_id(&self, _: u64, _: BatchId) -> Result<(), DbError> { Ok(()) } + + fn get_all_batches_v2(&self) -> Result>> { + Ok(HashMap::new()) + } + + fn save_batch_v2(&self, batch: PersistedValue) -> Result<(), DbError> { + Ok(()) + } + + fn get_batch_v2( + &self, + digest: &HashValue, + ) -> Result>, DbError> { + Ok(None) + } } } diff --git a/consensus/src/quorum_store/schema.rs b/consensus/src/quorum_store/schema.rs index 4de503c9cc3c5..583400c8c445c 100644 --- a/consensus/src/quorum_store/schema.rs +++ b/consensus/src/quorum_store/schema.rs @@ -3,6 +3,7 @@ use crate::quorum_store::types::PersistedValue; use anyhow::Result; +use aptos_consensus_types::proof_of_store::{BatchInfo, BatchInfoExt}; use aptos_crypto::HashValue; use aptos_schemadb::{ schema::{KeyCodec, Schema, ValueCodec}, @@ -12,13 +13,14 @@ use aptos_types::quorum_store::BatchId; pub(crate) const BATCH_CF_NAME: ColumnFamilyName = "batch"; pub(crate) const BATCH_ID_CF_NAME: ColumnFamilyName = "batch_ID"; +pub(crate) const BATCH_V2_CF_NAME: ColumnFamilyName = "batch_v2"; #[derive(Debug)] pub(crate) struct BatchSchema; impl Schema for BatchSchema { type Key = HashValue; - type Value = PersistedValue; + type Value = PersistedValue; const COLUMN_FAMILY_NAME: aptos_schemadb::ColumnFamilyName = BATCH_CF_NAME; } @@ -33,7 +35,37 @@ impl KeyCodec for HashValue { } } -impl ValueCodec for PersistedValue { +impl ValueCodec for PersistedValue { + fn encode_value(&self) -> Result> { + Ok(bcs::to_bytes(&self)?) + } + + fn decode_value(data: &[u8]) -> Result { + Ok(bcs::from_bytes(data)?) + } +} + +#[derive(Debug)] +pub(crate) struct BatchV2Schema; + +impl Schema for BatchV2Schema { + type Key = HashValue; + type Value = PersistedValue; + + const COLUMN_FAMILY_NAME: aptos_schemadb::ColumnFamilyName = BATCH_V2_CF_NAME; +} + +impl KeyCodec for HashValue { + fn encode_key(&self) -> Result> { + Ok(self.to_vec()) + } + + fn decode_key(data: &[u8]) -> Result { + Ok(HashValue::from_slice(data)?) + } +} + +impl ValueCodec for PersistedValue { fn encode_value(&self) -> Result> { Ok(bcs::to_bytes(&self)?) } diff --git a/consensus/src/quorum_store/tests/batch_generator_test.rs b/consensus/src/quorum_store/tests/batch_generator_test.rs index 758eba20368b3..7232c2f624f60 100644 --- a/consensus/src/quorum_store/tests/batch_generator_test.rs +++ b/consensus/src/quorum_store/tests/batch_generator_test.rs @@ -35,13 +35,16 @@ impl MockBatchWriter { } impl BatchWriter for MockBatchWriter { - fn persist(&self, _persist_requests: Vec) -> Vec> { + fn persist( + &self, + _persist_requests: Vec>, + ) -> Vec> { vec![] } fn persist_v2( &self, - persist_requests: Vec, + persist_requests: Vec>, ) -> Vec> { vec![] } diff --git a/consensus/src/quorum_store/tests/batch_requester_test.rs b/consensus/src/quorum_store/tests/batch_requester_test.rs index 4441e5eeadb1e..54055539f6700 100644 --- a/consensus/src/quorum_store/tests/batch_requester_test.rs +++ b/consensus/src/quorum_store/tests/batch_requester_test.rs @@ -34,11 +34,11 @@ use tokio::sync::oneshot; #[derive(Clone)] struct MockBatchRequester { - return_value: BatchResponse, + return_value: BatchResponse, } impl MockBatchRequester { - fn new(return_value: BatchResponse) -> Self { + fn new(return_value: BatchResponse) -> Self { Self { return_value } } } @@ -50,7 +50,7 @@ impl QuorumStoreSender for MockBatchRequester { _request: BatchRequest, _recipient: Author, _timeout: Duration, - ) -> anyhow::Result { + ) -> anyhow::Result> { Ok(self.return_value.clone()) } @@ -70,7 +70,7 @@ impl QuorumStoreSender for MockBatchRequester { unimplemented!() } - async fn broadcast_batch_msg(&mut self, _batches: Vec) { + async fn broadcast_batch_msg(&mut self, _batches: Vec>) { unimplemented!() } diff --git a/consensus/src/quorum_store/tests/batch_store_test.rs b/consensus/src/quorum_store/tests/batch_store_test.rs index a2704c2bf80c1..fb213841927f9 100644 --- a/consensus/src/quorum_store/tests/batch_store_test.rs +++ b/consensus/src/quorum_store/tests/batch_store_test.rs @@ -46,7 +46,7 @@ fn request_for_test( round: u64, num_bytes: u64, maybe_payload: Option>, -) -> PersistedValue { +) -> PersistedValue { PersistedValue::new( BatchInfo::new( *TEST_REQUEST_ACCOUNT, // make sure all request come from the same account @@ -96,7 +96,7 @@ async fn test_extend_expiration_vs_save() { let batch_store_clone2 = batch_store.clone(); let digests: Vec = (0..num_experiments).map(|_| HashValue::random()).collect(); - let later_exp_values: Vec = (0..num_experiments) + let later_exp_values: Vec> = (0..num_experiments) .map(|i| { // Pre-insert some of them. if i % 2 == 0 { diff --git a/consensus/src/quorum_store/tests/quorum_store_db_test.rs b/consensus/src/quorum_store/tests/quorum_store_db_test.rs index 9d526499e976b..d23e301ef91bb 100644 --- a/consensus/src/quorum_store/tests/quorum_store_db_test.rs +++ b/consensus/src/quorum_store/tests/quorum_store_db_test.rs @@ -8,6 +8,7 @@ use crate::{ }, test_utils::create_vec_signed_transactions, }; +use aptos_consensus_types::proof_of_store::BatchInfo; use aptos_temppath::TempPath; use aptos_types::{account_address::AccountAddress, quorum_store::BatchId}; use claims::assert_ok; @@ -19,7 +20,7 @@ fn test_db_for_data() { let source = AccountAddress::random(); let signed_txns = create_vec_signed_transactions(100); - let persist_request_1: PersistedValue = + let persist_request_1: PersistedValue = Batch::new(BatchId::new_for_test(1), signed_txns, 1, 20, source, 0).into(); let clone_1 = persist_request_1.clone(); assert!(db.save_batch(clone_1).is_ok()); @@ -32,13 +33,13 @@ fn test_db_for_data() { ); let signed_txns = create_vec_signed_transactions(200); - let persist_request_2: PersistedValue = + let persist_request_2: PersistedValue = Batch::new(BatchId::new_for_test(1), signed_txns, 1, 20, source, 0).into(); let clone_2 = persist_request_2.clone(); assert_ok!(db.save_batch(clone_2)); let signed_txns = create_vec_signed_transactions(300); - let persist_request_3: PersistedValue = + let persist_request_3: PersistedValue = Batch::new(BatchId::new_for_test(1), signed_txns, 1, 20, source, 0).into(); let clone_3 = persist_request_3.clone(); assert_ok!(db.save_batch(clone_3)); diff --git a/consensus/src/quorum_store/types.rs b/consensus/src/quorum_store/types.rs index 490690114cd41..e13a4cfd166da 100644 --- a/consensus/src/quorum_store/types.rs +++ b/consensus/src/quorum_store/types.rs @@ -4,7 +4,7 @@ use anyhow::ensure; use aptos_consensus_types::{ common::{BatchPayload, TxnSummaryWithExpiration}, - proof_of_store::BatchInfo, + proof_of_store::{BatchInfo, TBatchInfo}, }; use aptos_crypto::{hash::CryptoHash, HashValue}; use aptos_types::{ @@ -18,8 +18,8 @@ use std::{ }; #[derive(Clone, Eq, Deserialize, Serialize, PartialEq, Debug)] -pub struct PersistedValue { - info: BatchInfo, +pub struct PersistedValue { + info: T, maybe_payload: Option>, } @@ -29,8 +29,8 @@ pub(crate) enum StorageMode { MemoryAndPersisted, } -impl PersistedValue { - pub(crate) fn new(info: BatchInfo, maybe_payload: Option>) -> Self { +impl PersistedValue { + pub(crate) fn new(info: T, maybe_payload: Option>) -> Self { Self { info, maybe_payload, @@ -53,7 +53,7 @@ impl PersistedValue { self.maybe_payload = None; } - pub fn batch_info(&self) -> &BatchInfo { + pub fn batch_info(&self) -> &T { &self.info } @@ -78,23 +78,23 @@ impl PersistedValue { vec![] } - pub fn unpack(self) -> (BatchInfo, Option>) { + pub fn unpack(self) -> (T, Option>) { (self.info, self.maybe_payload) } } -impl Deref for PersistedValue { - type Target = BatchInfo; +impl Deref for PersistedValue { + type Target = T; fn deref(&self) -> &Self::Target { &self.info } } -impl TryFrom for Batch { +impl TryFrom> for Batch { type Error = anyhow::Error; - fn try_from(value: PersistedValue) -> Result { + fn try_from(value: PersistedValue) -> Result { let author = value.author(); Ok(Batch { batch_info: value.info, @@ -125,12 +125,12 @@ mod tests { } #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct Batch { - batch_info: BatchInfo, +pub struct Batch { + batch_info: T, payload: BatchPayload, } -impl Batch { +impl Batch { pub fn new( batch_id: BatchId, payload: Vec, @@ -150,6 +150,12 @@ impl Batch { payload.num_bytes() as u64, gas_bucket_start, ); + Self::new_generic(batch_info, payload) + } +} + +impl Batch { + pub fn new_generic(batch_info: T, payload: BatchPayload) -> Self { Self { batch_info, payload, @@ -200,13 +206,13 @@ impl Batch { self.payload.txns() } - pub fn batch_info(&self) -> &BatchInfo { + pub fn batch_info(&self) -> &T { &self.batch_info } } -impl Deref for Batch { - type Target = BatchInfo; +impl Deref for Batch { + type Target = T; fn deref(&self) -> &Self::Target { &self.batch_info @@ -264,8 +270,8 @@ impl BatchRequest { } } -impl From for PersistedValue { - fn from(value: Batch) -> Self { +impl From> for PersistedValue { + fn from(value: Batch) -> Self { let Batch { batch_info, payload, @@ -275,18 +281,18 @@ impl From for PersistedValue { } #[derive(Clone, Debug, Deserialize, Serialize)] -pub enum BatchResponse { - Batch(Batch), +pub enum BatchResponse { + Batch(Batch), NotFound(LedgerInfoWithSignatures), } #[derive(Clone, Debug, Deserialize, Serialize)] -pub struct BatchMsg { - batches: Vec, +pub struct BatchMsg { + batches: Vec>, } -impl BatchMsg { - pub fn new(batches: Vec) -> Self { +impl BatchMsg { + pub fn new(batches: Vec>) -> Self { Self { batches } } @@ -338,7 +344,7 @@ impl BatchMsg { self.batches.first().map(|batch| batch.author()) } - pub fn take(self) -> Vec { + pub fn take(self) -> Vec> { self.batches } } diff --git a/consensus/src/round_manager.rs b/consensus/src/round_manager.rs index 4e3647142db96..4181324f7114a 100644 --- a/consensus/src/round_manager.rs +++ b/consensus/src/round_manager.rs @@ -45,7 +45,7 @@ use aptos_consensus_types::{ order_vote::OrderVote, order_vote_msg::OrderVoteMsg, pipelined_block::PipelinedBlock, - proof_of_store::{BatchInfoExt, ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, + proof_of_store::{BatchInfo, BatchInfoExt, ProofCache, ProofOfStoreMsg, SignedBatchInfoMsg}, proposal_msg::ProposalMsg, quorum_cert::QuorumCert, round_timeout::{RoundTimeout, RoundTimeoutMsg, RoundTimeoutReason}, @@ -94,7 +94,7 @@ pub enum UnverifiedEvent { RoundTimeoutMsg(Box), OrderVoteMsg(Box), SyncInfo(Box), - BatchMsg(Box), + BatchMsg(Box>), SignedBatchInfo(Box>), ProofOfStoreMsg(Box>), OptProposalMsg(Box), @@ -243,7 +243,7 @@ pub enum VerifiedEvent { RoundTimeoutMsg(Box), OrderVoteMsg(Box), UnverifiedSyncInfo(Box), - BatchMsg(Box), + BatchMsg(Box>), SignedBatchInfo(Box>), ProofOfStoreMsg(Box>), // local messages diff --git a/consensus/src/test_utils/mock_quorum_store_sender.rs b/consensus/src/test_utils/mock_quorum_store_sender.rs index f33ef98ed688a..99c92134e824f 100644 --- a/consensus/src/test_utils/mock_quorum_store_sender.rs +++ b/consensus/src/test_utils/mock_quorum_store_sender.rs @@ -33,7 +33,7 @@ impl QuorumStoreSender for MockQuorumStoreSender { _request: BatchRequest, _recipient: Author, _timeout: Duration, - ) -> anyhow::Result { + ) -> anyhow::Result> { unimplemented!(); } @@ -69,7 +69,7 @@ impl QuorumStoreSender for MockQuorumStoreSender { .expect("could not send"); } - async fn broadcast_batch_msg(&mut self, _batches: Vec) { + async fn broadcast_batch_msg(&mut self, _batches: Vec>) { unimplemented!() } diff --git a/consensus/src/util/db_tool.rs b/consensus/src/util/db_tool.rs index 36c834e209194..66b7a50bf24e0 100644 --- a/consensus/src/util/db_tool.rs +++ b/consensus/src/util/db_tool.rs @@ -11,7 +11,7 @@ use crate::{ }, }; use anyhow::{bail, Result}; -use aptos_consensus_types::{block::Block, common::Payload}; +use aptos_consensus_types::{block::Block, common::Payload, proof_of_store::BatchInfo}; use aptos_crypto::HashValue; use aptos_types::transaction::{SignedTransaction, Transaction}; use clap::Parser; @@ -63,7 +63,7 @@ impl Command { fn extract_txns_from_quorum_store( digests: impl Iterator, - all_batches: &HashMap, + all_batches: &HashMap>, ) -> anyhow::Result> { let mut block_txns = Vec::new(); for digest in digests { @@ -82,7 +82,7 @@ fn extract_txns_from_quorum_store( pub fn extract_txns_from_block<'a>( block: &'a Block, - all_batches: &'a HashMap, + all_batches: &'a HashMap>, ) -> anyhow::Result> { match block.payload().as_ref() { Some(payload) => match payload { From 90e0f433a568735a414d4dde5686fcb81e7b9d05 Mon Sep 17 00:00:00 2001 From: Balaji Arun Date: Sat, 8 Nov 2025 21:21:12 -0800 Subject: [PATCH 9/9] [consensus] Introduce OptQS::V2 Payload --- consensus/consensus-types/src/block.rs | 63 +++-- consensus/consensus-types/src/common.rs | 59 +++-- consensus/consensus-types/src/payload.rs | 215 +++++++++++------- .../consensus-types/src/proof_of_store.rs | 20 +- .../network/observer_message.rs | 39 ++-- .../quorum_store_payload_manager.rs | 58 ++++- .../src/quorum_store/batch_proof_queue.rs | 1 - consensus/src/util/db_tool.rs | 34 ++- 8 files changed, 330 insertions(+), 159 deletions(-) diff --git a/consensus/consensus-types/src/block.rs b/consensus/consensus-types/src/block.rs index c830d4e0a9478..d45bfd6b9a15c 100644 --- a/consensus/consensus-types/src/block.rs +++ b/consensus/consensus-types/src/block.rs @@ -6,6 +6,7 @@ use crate::{ block_data::{BlockData, BlockType}, common::{Author, Payload, Round}, opt_block_data::OptBlockData, + payload::{OptQuorumStorePayload, TDataInfo}, quorum_cert::QuorumCert, }; use anyhow::{bail, ensure, format_err, Result}; @@ -143,11 +144,19 @@ impl Block { proof_with_data.num_txns(), proof_with_data.num_bytes(), ), - Payload::OptQuorumStore(opt_quorum_store_payload) => ( - opt_quorum_store_payload.proof_with_data().num_proofs(), - opt_quorum_store_payload.proof_with_data().num_txns(), - opt_quorum_store_payload.proof_with_data().num_bytes(), - ), + Payload::OptQuorumStore(opt_quorum_store_payload) => match opt_quorum_store_payload + { + OptQuorumStorePayload::V1(p) => ( + p.proof_with_data().num_proofs(), + p.proof_with_data().num_txns(), + p.proof_with_data().num_bytes(), + ), + OptQuorumStorePayload::V2(p) => ( + p.proof_with_data().num_proofs(), + p.proof_with_data().num_txns(), + p.proof_with_data().num_bytes(), + ), + }, }, } } @@ -169,11 +178,19 @@ impl Block { .map(|(b, _)| b.num_bytes() as usize) .sum(), ), - Payload::OptQuorumStore(opt_quorum_store_payload) => ( - opt_quorum_store_payload.inline_batches().num_batches(), - opt_quorum_store_payload.inline_batches().num_txns(), - opt_quorum_store_payload.inline_batches().num_bytes(), - ), + Payload::OptQuorumStore(opt_quorum_store_payload) => match opt_quorum_store_payload + { + OptQuorumStorePayload::V1(p) => ( + p.inline_batches().num_batches(), + p.inline_batches().num_txns(), + p.inline_batches().num_bytes(), + ), + OptQuorumStorePayload::V2(p) => ( + p.inline_batches().num_batches(), + p.inline_batches().num_txns(), + p.inline_batches().num_bytes(), + ), + }, _ => (0, 0, 0), }, } @@ -184,19 +201,19 @@ impl Block { match self.block_data.payload() { None => (0, 0, 0), Some(payload) => match payload { - Payload::OptQuorumStore(opt_quorum_store_payload) => ( - opt_quorum_store_payload.opt_batches().len(), - opt_quorum_store_payload - .opt_batches() - .iter() - .map(|b| b.num_txns() as usize) - .sum(), - opt_quorum_store_payload - .opt_batches() - .iter() - .map(|b| b.num_bytes() as usize) - .sum(), - ), + Payload::OptQuorumStore(opt_quorum_store_payload) => match opt_quorum_store_payload + { + OptQuorumStorePayload::V1(p) => ( + p.opt_batches().len(), + p.opt_batches().iter().map(|b| b.num_txns() as usize).sum(), + p.opt_batches().iter().map(|b| b.num_bytes() as usize).sum(), + ), + OptQuorumStorePayload::V2(p) => ( + p.opt_batches().len(), + p.opt_batches().iter().map(|b| b.num_txns() as usize).sum(), + p.opt_batches().iter().map(|b| b.num_bytes() as usize).sum(), + ), + }, _ => (0, 0, 0), }, } diff --git a/consensus/consensus-types/src/common.rs b/consensus/consensus-types/src/common.rs index f27597edca7f6..4fb442b0ac706 100644 --- a/consensus/consensus-types/src/common.rs +++ b/consensus/consensus-types/src/common.rs @@ -515,11 +515,15 @@ impl Payload { } } - fn verify_with_cache( - proofs: &[ProofOfStore], + fn verify_with_cache( + proofs: &[ProofOfStore], validator: &ValidatorVerifier, proof_cache: &ProofCache, - ) -> anyhow::Result<()> { + ) -> anyhow::Result<()> + where + T: TBatchInfo + Send + Sync + 'static, + BatchInfoExt: From, + { let unverified: Vec<_> = proofs .iter() .filter(|proof| { @@ -535,15 +539,15 @@ impl Payload { Ok(()) } - pub fn verify_inline_batches<'a>( - inline_batches: impl Iterator)>, + pub fn verify_inline_batches<'a, T: TBatchInfo + 'a>( + inline_batches: impl Iterator)>, ) -> anyhow::Result<()> { for (batch, payload) in inline_batches { // TODO: Can cloning be avoided here? let computed_digest = BatchPayload::new(batch.author(), payload.clone()).hash(); ensure!( computed_digest == *batch.digest(), - "Hash of the received inline batch doesn't match the digest value for batch {}: {} != {}", + "Hash of the received inline batch doesn't match the digest value for batch {:?}: {} != {}", batch, computed_digest, batch.digest() @@ -552,9 +556,9 @@ impl Payload { Ok(()) } - pub fn verify_opt_batches( + pub fn verify_opt_batches( verifier: &ValidatorVerifier, - opt_batches: &OptBatches, + opt_batches: &OptBatches, ) -> anyhow::Result<()> { let authors = verifier.address_to_validator_index(); for batch in &opt_batches.batch_summary { @@ -592,16 +596,26 @@ impl Payload { )?; Ok(()) }, - (true, Payload::OptQuorumStore(opt_quorum_store)) => { - let proof_with_data = opt_quorum_store.proof_with_data(); + (true, Payload::OptQuorumStore(OptQuorumStorePayload::V1(p))) => { + let proof_with_data = p.proof_with_data(); + Self::verify_with_cache(&proof_with_data.batch_summary, verifier, proof_cache)?; + Self::verify_inline_batches( + p.inline_batches() + .iter() + .map(|batch| (batch.info(), batch.transactions())), + )?; + Self::verify_opt_batches(verifier, p.opt_batches())?; + Ok(()) + }, + (true, Payload::OptQuorumStore(OptQuorumStorePayload::V2(p))) => { + let proof_with_data = p.proof_with_data(); Self::verify_with_cache(&proof_with_data.batch_summary, verifier, proof_cache)?; Self::verify_inline_batches( - opt_quorum_store - .inline_batches() + p.inline_batches() .iter() .map(|batch| (batch.info(), batch.transactions())), )?; - Self::verify_opt_batches(verifier, opt_quorum_store.opt_batches())?; + Self::verify_opt_batches(verifier, p.opt_batches())?; Ok(()) }, (_, _) => Err(anyhow::anyhow!( @@ -792,17 +806,28 @@ impl From<&Vec<&Payload>> for PayloadFilter { Payload::DirectMempool(_) => { error!("DirectMempool payload in InQuorumStore filter"); }, - Payload::OptQuorumStore(opt_qs_payload) => { - for batch in opt_qs_payload.inline_batches().iter() { + Payload::OptQuorumStore(OptQuorumStorePayload::V1(p)) => { + for batch in p.inline_batches().iter() { exclude_batches.insert(batch.info().clone().into()); } - for batch_info in &opt_qs_payload.opt_batches().batch_summary { + for batch_info in &p.opt_batches().batch_summary { exclude_batches.insert(batch_info.clone().into()); } - for proof in &opt_qs_payload.proof_with_data().batch_summary { + for proof in &p.proof_with_data().batch_summary { exclude_batches.insert(proof.info().clone().into()); } }, + Payload::OptQuorumStore(OptQuorumStorePayload::V2(p)) => { + for batch in p.inline_batches().iter() { + exclude_batches.insert(batch.info().clone()); + } + for batch_info in &p.opt_batches().batch_summary { + exclude_batches.insert(batch_info.clone()); + } + for proof in &p.proof_with_data().batch_summary { + exclude_batches.insert(proof.info().clone()); + } + }, } } PayloadFilter::InQuorumStore(exclude_batches) diff --git a/consensus/consensus-types/src/payload.rs b/consensus/consensus-types/src/payload.rs index c9be597722df1..338a4ae6945e9 100644 --- a/consensus/consensus-types/src/payload.rs +++ b/consensus/consensus-types/src/payload.rs @@ -1,7 +1,7 @@ // Copyright (c) Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore}; +use crate::proof_of_store::{BatchInfo, BatchInfoExt, ProofOfStore, TBatchInfo}; use anyhow::ensure; use aptos_types::{transaction::SignedTransaction, PeerId}; use core::fmt; @@ -11,13 +11,9 @@ use std::{ ops::{Deref, DerefMut}, }; -pub type OptBatches = BatchPointer; +pub type OptBatches = BatchPointer; -pub type ProofBatches = BatchPointer>; - -pub type OptBatchesExt = BatchPointer; - -pub type ProofBatchesExt = BatchPointer; +pub type ProofBatches = BatchPointer>; pub trait TDataInfo { fn num_txns(&self) -> u64; @@ -98,6 +94,12 @@ impl Deref for BatchPointer { } } +impl DerefMut for BatchPointer { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.batch_summary + } +} + impl IntoIterator for BatchPointer { type IntoIter = std::vec::IntoIter; type Item = T; @@ -192,20 +194,20 @@ impl PayloadExecutionLimit { } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -pub struct InlineBatch { - batch_info: BatchInfo, +pub struct InlineBatch { + batch_info: T, transactions: Vec, } -impl InlineBatch { - pub fn new(batch_info: BatchInfo, transactions: Vec) -> Self { +impl InlineBatch { + pub fn new(batch_info: T, transactions: Vec) -> Self { Self { batch_info, transactions, } } - pub fn info(&self) -> &BatchInfo { + pub fn info(&self) -> &T { &self.batch_info } @@ -215,9 +217,9 @@ impl InlineBatch { } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -pub struct InlineBatches(Vec); +pub struct InlineBatches(Vec>); -impl InlineBatches { +impl InlineBatches { pub fn num_batches(&self) -> usize { self.0.len() } @@ -247,7 +249,7 @@ impl InlineBatches { .collect() } - pub fn batch_infos(&self) -> Vec { + pub fn batch_infos(&self) -> Vec { self.0 .iter() .map(|inline_batch| inline_batch.batch_info.clone()) @@ -255,14 +257,14 @@ impl InlineBatches { } } -impl From> for InlineBatches { - fn from(value: Vec) -> Self { +impl From>> for InlineBatches { + fn from(value: Vec>) -> Self { Self(value) } } -impl From)>> for InlineBatches { - fn from(value: Vec<(BatchInfo, Vec)>) -> Self { +impl From)>> for InlineBatches { + fn from(value: Vec<(T, Vec)>) -> Self { value .into_iter() .map(|(batch_info, transactions)| InlineBatch::new(batch_info, transactions)) @@ -271,29 +273,32 @@ impl From)>> for InlineBatches { } } -impl Deref for InlineBatches { - type Target = Vec; +impl Deref for InlineBatches { + type Target = Vec>; fn deref(&self) -> &Self::Target { &self.0 } } -impl DerefMut for InlineBatches { +impl DerefMut for InlineBatches { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] -pub struct OptQuorumStorePayloadV1 { - inline_batches: InlineBatches, - opt_batches: OptBatches, - proofs: ProofBatches, +pub struct OptQuorumStorePayloadV1 { + inline_batches: InlineBatches, + opt_batches: OptBatches, + proofs: ProofBatches, execution_limits: PayloadExecutionLimit, } -impl OptQuorumStorePayloadV1 { +impl OptQuorumStorePayloadV1 +where + T: TBatchInfo + Send + Sync + 'static + TDataInfo, +{ pub fn get_all_batch_infos(self) -> Vec { let Self { inline_batches, @@ -323,29 +328,93 @@ impl OptQuorumStorePayloadV1 { "OptQS InlineBatch epoch doesn't match given epoch" ); ensure!( - self.opt_batches.iter().all(|b| b.info().epoch() == epoch), + self.opt_batches.iter().all(|b| b.epoch() == epoch), "OptQS OptBatch epoch doesn't match given epoch" ); ensure!( - self.proofs.iter().all(|b| b.info().epoch() == epoch), + self.proofs.iter().all(|b| b.epoch() == epoch), "OptQS Proof epoch doesn't match given epoch" ); Ok(()) } + + fn extend(mut self, other: Self) -> Self { + self.inline_batches.extend(other.inline_batches.0); + self.opt_batches.extend(other.opt_batches); + self.proofs.extend(other.proofs); + self.execution_limits.extend(other.execution_limits); + self + } + + pub fn inline_batches(&self) -> &InlineBatches { + &self.inline_batches + } + + pub fn proof_with_data(&self) -> &BatchPointer> { + &self.proofs + } + + pub fn opt_batches(&self) -> &BatchPointer { + &self.opt_batches + } + + pub fn set_execution_limit(&mut self, execution_limits: PayloadExecutionLimit) { + self.execution_limits = execution_limits; + } + + pub(crate) fn num_txns(&self) -> usize { + self.opt_batches.num_txns() + self.proofs.num_txns() + self.inline_batches.num_txns() + } + + pub(crate) fn is_empty(&self) -> bool { + self.opt_batches.is_empty() && self.proofs.is_empty() && self.inline_batches.is_empty() + } + + pub(crate) fn num_bytes(&self) -> usize { + self.opt_batches.num_bytes() + self.proofs.num_bytes() + self.inline_batches.num_bytes() + } +} + +impl From> for OptQuorumStorePayloadV1 { + fn from(p: OptQuorumStorePayloadV1) -> Self { + OptQuorumStorePayloadV1 { + inline_batches: p + .inline_batches + .0 + .into_iter() + .map(|batch| InlineBatch::new(batch.batch_info.into(), batch.transactions)) + .collect::>() + .into(), + opt_batches: p + .opt_batches + .into_iter() + .map(|batch| batch.into()) + .collect::>() + .into(), + proofs: p + .proofs + .into_iter() + .map(|proof| proof.into()) + .collect::>() + .into(), + execution_limits: p.execution_limits, + } + } } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub enum OptQuorumStorePayload { - V1(OptQuorumStorePayloadV1), + V1(OptQuorumStorePayloadV1), + V2(OptQuorumStorePayloadV1), } impl OptQuorumStorePayload { pub fn new( - inline_batches: InlineBatches, - opt_batches: OptBatches, - proofs: ProofBatches, + inline_batches: InlineBatches, + opt_batches: OptBatches, + proofs: ProofBatches, execution_limits: PayloadExecutionLimit, ) -> Self { Self::V1(OptQuorumStorePayloadV1 { @@ -356,64 +425,56 @@ impl OptQuorumStorePayload { }) } - pub(crate) fn num_txns(&self) -> usize { - self.opt_batches.num_txns() + self.proofs.num_txns() + self.inline_batches.num_txns() - } - - pub(crate) fn is_empty(&self) -> bool { - self.opt_batches.is_empty() && self.proofs.is_empty() && self.inline_batches.is_empty() - } - - pub(crate) fn extend(mut self, other: Self) -> Self { - let other: OptQuorumStorePayloadV1 = other.into_inner(); - self.inline_batches.extend(other.inline_batches.0); - self.opt_batches.extend(other.opt_batches); - self.proofs.extend(other.proofs); - self.execution_limits.extend(other.execution_limits); - self - } - - pub(crate) fn num_bytes(&self) -> usize { - self.opt_batches.num_bytes() + self.proofs.num_bytes() + self.inline_batches.num_bytes() + pub(crate) fn extend(self, other: Self) -> Self { + match (self, other) { + (Self::V1(p1), Self::V1(p2)) => Self::V1(p1.extend(p2)), + (Self::V2(p1), Self::V2(p2)) => Self::V2(p1.extend(p2)), + (Self::V1(p1), Self::V2(p2)) => { + Self::V2(OptQuorumStorePayloadV1::::from(p1).extend(p2)) + }, + (Self::V2(p1), Self::V1(p2)) => Self::V2(p1.extend(p2.into())), + } } - pub fn into_inner(self) -> OptQuorumStorePayloadV1 { + pub fn set_execution_limit(&mut self, execution_limits: PayloadExecutionLimit) { match self { - OptQuorumStorePayload::V1(opt_qs_payload) => opt_qs_payload, + OptQuorumStorePayload::V1(p) => p.set_execution_limit(execution_limits), + OptQuorumStorePayload::V2(p) => p.set_execution_limit(execution_limits), } } - pub fn inline_batches(&self) -> &InlineBatches { - &self.inline_batches - } - - pub fn proof_with_data(&self) -> &BatchPointer> { - &self.proofs + pub(crate) fn num_txns(&self) -> usize { + match self { + OptQuorumStorePayload::V1(p) => p.num_txns(), + OptQuorumStorePayload::V2(p) => p.num_txns(), + } } - pub fn opt_batches(&self) -> &BatchPointer { - &self.opt_batches + pub(crate) fn is_empty(&self) -> bool { + match self { + OptQuorumStorePayload::V1(p) => p.is_empty(), + OptQuorumStorePayload::V2(p) => p.is_empty(), + } } - pub fn set_execution_limit(&mut self, execution_limits: PayloadExecutionLimit) { - self.execution_limits = execution_limits; + pub(crate) fn num_bytes(&self) -> usize { + match self { + OptQuorumStorePayload::V1(p) => p.num_bytes(), + OptQuorumStorePayload::V2(p) => p.num_bytes(), + } } -} - -impl Deref for OptQuorumStorePayload { - type Target = OptQuorumStorePayloadV1; - fn deref(&self) -> &Self::Target { + pub(crate) fn max_txns_to_execute(&self) -> Option { match self { - OptQuorumStorePayload::V1(opt_qs_payload) => opt_qs_payload, + OptQuorumStorePayload::V1(p) => p.max_txns_to_execute(), + OptQuorumStorePayload::V2(p) => p.max_txns_to_execute(), } } -} -impl DerefMut for OptQuorumStorePayload { - fn deref_mut(&mut self) -> &mut Self::Target { + pub(crate) fn check_epoch(&self, epoch: u64) -> anyhow::Result<()> { match self { - OptQuorumStorePayload::V1(opt_qs_payload) => opt_qs_payload, + OptQuorumStorePayload::V1(p) => p.check_epoch(epoch), + OptQuorumStorePayload::V2(p) => p.check_epoch(epoch), } } } @@ -423,9 +484,9 @@ impl fmt::Display for OptQuorumStorePayload { write!( f, "OptQuorumStorePayload(opt_batches: {}, proofs: {}, limits: {:?})", - self.opt_batches.num_txns(), - self.proofs.num_txns(), - self.execution_limits, + self.num_txns(), + self.num_txns(), + self.is_empty(), ) } } diff --git a/consensus/consensus-types/src/proof_of_store.rs b/consensus/consensus-types/src/proof_of_store.rs index cd9d16eb62844..33e22194a38db 100644 --- a/consensus/consensus-types/src/proof_of_store.rs +++ b/consensus/consensus-types/src/proof_of_store.rs @@ -249,7 +249,25 @@ impl TBatchInfo for BatchInfoExt { } fn size(&self) -> PayloadTxnsSize { - PayloadTxnsSize::new(self.num_txns(), self.num_bytes()) + PayloadTxnsSize::new(self.info().num_txns(), self.info().num_bytes()) + } +} + +impl TDataInfo for BatchInfoExt { + fn num_txns(&self) -> u64 { + self.info().num_txns() + } + + fn num_bytes(&self) -> u64 { + self.info().num_bytes() + } + + fn info(&self) -> &BatchInfo { + self.info() + } + + fn signers(&self, _ordered_authors: &[PeerId]) -> Vec { + vec![self.author()] } } diff --git a/consensus/src/consensus_observer/network/observer_message.rs b/consensus/src/consensus_observer/network/observer_message.rs index fd47d820a31bd..219b76bf26134 100644 --- a/consensus/src/consensus_observer/network/observer_message.rs +++ b/consensus/src/consensus_observer/network/observer_message.rs @@ -2,9 +2,10 @@ // SPDX-License-Identifier: Apache-2.0 use crate::consensus_observer::common::error::Error; +use anyhow::bail; use aptos_consensus_types::{ common::{BatchPayload, Payload}, - payload::InlineBatches, + payload::{InlineBatches, OptQuorumStorePayload}, pipelined_block::PipelinedBlock, proof_of_store::{BatchInfo, ProofCache, ProofOfStore}, }; @@ -695,18 +696,20 @@ impl BlockTransactionPayload { // TODO: verify the block gas limit? }, - Payload::OptQuorumStore(opt_qs_payload) => { + Payload::OptQuorumStore(OptQuorumStorePayload::V1(p)) => { // Verify the batches in the requested block - self.verify_batches(opt_qs_payload.proof_with_data())?; + self.verify_batches(p.proof_with_data())?; // Verify optQS and inline batches - self.verify_optqs_and_inline_batches( - opt_qs_payload.opt_batches(), - opt_qs_payload.inline_batches(), - )?; + self.verify_optqs_and_inline_batches(p.opt_batches(), p.inline_batches())?; // Verify the transaction limit - self.verify_transaction_limit(opt_qs_payload.max_txns_to_execute())?; + self.verify_transaction_limit(p.max_txns_to_execute())?; + }, + Payload::OptQuorumStore(OptQuorumStorePayload::V2(p)) => { + return Err(Error::InvalidMessageError( + "OptQuorumStorePayload V2 is not supproted".into(), + )); }, } @@ -771,7 +774,7 @@ impl BlockTransactionPayload { fn verify_optqs_and_inline_batches( &self, expected_opt_batches: &Vec, - expected_inline_batches: &InlineBatches, + expected_inline_batches: &InlineBatches, ) -> Result<(), Error> { let optqs_and_inline_batches: &Vec = match self { BlockTransactionPayload::OptQuorumStore(_, optqs_and_inline_batches) => { @@ -1245,10 +1248,10 @@ mod test { ); // Create a quorum store payload with a single proof - let inline_batches = InlineBatches::from(Vec::::new()); + let inline_batches = InlineBatches::from(Vec::>::new()); let opt_batches: BatchPointer = Vec::new().into(); let batch_info = create_batch_info(); - let proof_with_data: ProofBatches = + let proof_with_data: ProofBatches = vec![ProofOfStore::new(batch_info, AggregateSignature::empty())].into(); let ordered_payload = Payload::OptQuorumStore(OptQuorumStorePayload::new( inline_batches.clone(), @@ -1264,7 +1267,7 @@ mod test { assert_matches!(error, Error::InvalidMessageError(_)); // Create a quorum store payload with no transaction limit - let proof_with_data: ProofBatches = Vec::new().into(); + let proof_with_data: ProofBatches = Vec::new().into(); let ordered_payload = Payload::OptQuorumStore(OptQuorumStorePayload::new( inline_batches, opt_batches, @@ -1279,7 +1282,7 @@ mod test { assert_matches!(error, Error::InvalidMessageError(_)); // Create a quorum store payload with a single inline batch - let proof_with_data: ProofBatches = Vec::new().into(); + let proof_with_data: ProofBatches = Vec::new().into(); let ordered_payload = Payload::OptQuorumStore(OptQuorumStorePayload::new( vec![(create_batch_info(), vec![])].into(), Vec::new().into(), @@ -1294,9 +1297,9 @@ mod test { assert_matches!(error, Error::InvalidMessageError(_)); // Create a quorum store payload with a single opt batch - let proof_with_data: ProofBatches = Vec::new().into(); + let proof_with_data: ProofBatches = Vec::new().into(); let ordered_payload = Payload::OptQuorumStore(OptQuorumStorePayload::new( - Vec::::new().into(), + Vec::>::new().into(), vec![create_batch_info()].into(), proof_with_data, PayloadExecutionLimit::None, @@ -1311,7 +1314,7 @@ mod test { // Create an empty opt quorum store payload let proof_with_data = Vec::new().into(); let ordered_payload = Payload::OptQuorumStore(OptQuorumStorePayload::new( - Vec::::new().into(), + Vec::>::new().into(), Vec::new().into(), proof_with_data, PayloadExecutionLimit::MaxTransactionsToExecute(100), @@ -1327,8 +1330,8 @@ mod test { create_batch_info(), AggregateSignature::empty(), )]; - let inline_batches: InlineBatches = vec![(create_batch_info(), vec![])].into(); - let opt_batches: OptBatches = vec![create_batch_info()].into(); + let inline_batches: InlineBatches = vec![(create_batch_info(), vec![])].into(); + let opt_batches: OptBatches = vec![create_batch_info()].into(); let opt_and_inline_batches = [opt_batches.deref().clone(), inline_batches.batch_infos()].concat(); diff --git a/consensus/src/payload_manager/quorum_store_payload_manager.rs b/consensus/src/payload_manager/quorum_store_payload_manager.rs index e9d005dc817ca..791c769104c80 100644 --- a/consensus/src/payload_manager/quorum_store_payload_manager.rs +++ b/consensus/src/payload_manager/quorum_store_payload_manager.rs @@ -15,8 +15,8 @@ use aptos_config::config::BlockTransactionFilterConfig; use aptos_consensus_types::{ block::Block, common::{Author, Payload, ProofWithData}, - payload::{BatchPointer, TDataInfo}, - proof_of_store::{BatchInfo, BatchInfoExt}, + payload::{BatchPointer, OptQuorumStorePayload, OptQuorumStorePayloadV1, TDataInfo}, + proof_of_store::{BatchInfo, BatchInfoExt, TBatchInfo}, }; use aptos_crypto::HashValue; use aptos_executor_types::*; @@ -199,9 +199,8 @@ impl TPayloadManager for QuorumStorePayloadManager { ) .collect::>() }, - Payload::OptQuorumStore(opt_quorum_store_payload) => { - opt_quorum_store_payload.into_inner().get_all_batch_infos() - }, + Payload::OptQuorumStore(OptQuorumStorePayload::V1(p)) => p.get_all_batch_infos(), + Payload::OptQuorumStore(OptQuorumStorePayload::V2(p)) => p.get_all_batch_infos(), }) .collect(); @@ -271,16 +270,32 @@ impl TPayloadManager for QuorumStorePayloadManager { Payload::DirectMempool(_) => { unreachable!() }, - Payload::OptQuorumStore(opt_qs_payload) => { + Payload::OptQuorumStore(OptQuorumStorePayload::V1(p)) => { prefetch_helper( - opt_qs_payload.opt_batches(), + p.opt_batches(), self.batch_reader.clone(), Some(author), timestamp, &self.ordered_authors, ); prefetch_helper( - opt_qs_payload.proof_with_data(), + p.proof_with_data(), + self.batch_reader.clone(), + None, + timestamp, + &self.ordered_authors, + ) + }, + Payload::OptQuorumStore(OptQuorumStorePayload::V2(p)) => { + prefetch_helper( + p.opt_batches(), + self.batch_reader.clone(), + Some(author), + timestamp, + &self.ordered_authors, + ); + prefetch_helper( + p.proof_with_data(), self.batch_reader.clone(), None, timestamp, @@ -391,9 +406,26 @@ impl TPayloadManager for QuorumStorePayloadManager { // or inlined transactions. Ok(()) }, - Payload::OptQuorumStore(opt_qs_payload) => { + Payload::OptQuorumStore(OptQuorumStorePayload::V1(p)) => { let mut missing_authors = BitVec::with_num_bits(self.ordered_authors.len() as u16); - for batch in opt_qs_payload.opt_batches().deref() { + for batch in p.opt_batches().deref() { + if self.batch_reader.exists(batch.digest()).is_none() { + let index = *self + .address_to_validator_index + .get(&batch.author()) + .expect("Payload author should have been verified"); + missing_authors.set(index as u16); + } + } + if missing_authors.all_zeros() { + Ok(()) + } else { + Err(missing_authors) + } + }, + Payload::OptQuorumStore(OptQuorumStorePayload::V2(p)) => { + let mut missing_authors = BitVec::with_num_bits(self.ordered_authors.len() as u16); + for batch in p.opt_batches().deref() { if self.batch_reader.exists(batch.digest()).is_none() { let index = *self .address_to_validator_index @@ -476,7 +508,7 @@ impl TPayloadManager for QuorumStorePayloadManager { ) .await? }, - Payload::OptQuorumStore(opt_qs_payload) => { + Payload::OptQuorumStore(OptQuorumStorePayload::V1(opt_qs_payload)) => { let opt_batch_txns = process_optqs_payload( opt_qs_payload.opt_batches(), self.batch_reader.clone(), @@ -554,7 +586,9 @@ fn get_inline_transactions(block: &Block) -> Vec { .flat_map(|(_batch_info, txns)| txns.clone()) .collect() }, - Payload::OptQuorumStore(opt_qs_payload) => opt_qs_payload.inline_batches().transactions(), + Payload::OptQuorumStore(OptQuorumStorePayload::V1(opt_qs_payload)) => { + opt_qs_payload.inline_batches().transactions() + }, _ => { vec![] // Other payload types do not have inline transactions }, diff --git a/consensus/src/quorum_store/batch_proof_queue.rs b/consensus/src/quorum_store/batch_proof_queue.rs index 0563212ba69b9..d49d9b0dceab5 100644 --- a/consensus/src/quorum_store/batch_proof_queue.rs +++ b/consensus/src/quorum_store/batch_proof_queue.rs @@ -8,7 +8,6 @@ use super::{ use crate::quorum_store::counters; use aptos_consensus_types::{ common::{Author, TxnSummaryWithExpiration}, - payload::TDataInfo, proof_of_store::{BatchInfoExt, ProofOfStore, TBatchInfo}, utils::PayloadTxnsSize, }; diff --git a/consensus/src/util/db_tool.rs b/consensus/src/util/db_tool.rs index 66b7a50bf24e0..229ae5cdcec0a 100644 --- a/consensus/src/util/db_tool.rs +++ b/consensus/src/util/db_tool.rs @@ -11,7 +11,12 @@ use crate::{ }, }; use anyhow::{bail, Result}; -use aptos_consensus_types::{block::Block, common::Payload, proof_of_store::BatchInfo}; +use aptos_consensus_types::{ + block::Block, + common::Payload, + payload::OptQuorumStorePayload, + proof_of_store::{BatchInfo, TBatchInfo}, +}; use aptos_crypto::HashValue; use aptos_types::transaction::{SignedTransaction, Transaction}; use clap::Parser; @@ -113,21 +118,30 @@ pub fn extract_txns_from_block<'a>( } Ok(all_txns) }, - Payload::OptQuorumStore(opt_qs_payload) => { + Payload::OptQuorumStore(OptQuorumStorePayload::V1(p)) => { + let mut all_txns = extract_txns_from_quorum_store( + p.proof_with_data().iter().map(|proof| *proof.digest()), + all_batches, + ) + .unwrap(); + all_txns.extend( + extract_txns_from_quorum_store( + p.opt_batches().iter().map(|info| *info.digest()), + all_batches, + ) + .unwrap(), + ); + Ok(all_txns) + }, + Payload::OptQuorumStore(OptQuorumStorePayload::V2(p)) => { let mut all_txns = extract_txns_from_quorum_store( - opt_qs_payload - .proof_with_data() - .iter() - .map(|proof| *proof.digest()), + p.proof_with_data().iter().map(|proof| *proof.digest()), all_batches, ) .unwrap(); all_txns.extend( extract_txns_from_quorum_store( - opt_qs_payload - .opt_batches() - .iter() - .map(|info| *info.digest()), + p.opt_batches().iter().map(|info| *info.digest()), all_batches, ) .unwrap(),