diff --git a/bin/remote-prover/src/server/prover.rs b/bin/remote-prover/src/server/prover.rs index 3a163a190..6ca76794e 100644 --- a/bin/remote-prover/src/server/prover.rs +++ b/bin/remote-prover/src/server/prover.rs @@ -31,8 +31,8 @@ impl Prover { } } - /// Proves a [`ProofRequest`] using the appropriate prover implementation as specified during - /// construction. + /// Proves a [`proto::ProofRequest`] using the appropriate prover implementation as specified + /// during construction. pub fn prove(&self, request: proto::ProofRequest) -> Result { match self { Prover::Transaction(prover) => prover.prove_request(request), diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 40491d4d5..1f0e151ab 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -2,6 +2,7 @@ CREATE TABLE block_headers ( block_num INTEGER NOT NULL, block_header BLOB NOT NULL, signature BLOB NOT NULL, + commitment BLOB NOT NULL, PRIMARY KEY (block_num), CONSTRAINT block_header_block_num_is_u32 CHECK (block_num BETWEEN 0 AND 0xFFFFFFFF) @@ -156,3 +157,9 @@ CREATE TABLE transactions ( CREATE INDEX idx_transactions_account_id ON transactions(account_id); -- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); + +CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; +CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; + +CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; +CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql b/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql deleted file mode 100644 index 1195d70bd..000000000 --- a/crates/store/src/db/migrations/2026020600000_cleanup_indices/down.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Reverse the cleanup indices migration - -DROP INDEX IF EXISTS idx_vault_cleanup; -DROP INDEX IF EXISTS idx_storage_cleanup; diff --git a/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql b/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql deleted file mode 100644 index b98f55c6d..000000000 --- a/crates/store/src/db/migrations/2026020600000_cleanup_indices/up.sql +++ /dev/null @@ -1,9 +0,0 @@ --- Add indices to optimize cleanup queries that delete old non-latest entries. --- --- These partial indices only include rows where is_latest = 0, making them: --- - Smaller (only index rows that will eventually be deleted) --- - Faster for cleanup operations (direct lookup of old entries) --- - No overhead for is_latest = 1 rows (which are never deleted) - -CREATE INDEX idx_vault_cleanup ON account_vault_assets(block_num) WHERE is_latest = 0; -CREATE INDEX idx_storage_cleanup ON account_storage_map_values(block_num) WHERE is_latest = 0; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql deleted file mode 100644 index 1a15b55c4..000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/down.sql +++ /dev/null @@ -1,2 +0,0 @@ -DROP INDEX IF EXISTS idx_account_storage_map_latest_by_account_slot_key; -DROP INDEX IF EXISTS idx_account_vault_assets_latest_by_account_key; diff --git a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql b/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql deleted file mode 100644 index 83233e157..000000000 --- a/crates/store/src/db/migrations/20260206163855_add_account_indices/up.sql +++ /dev/null @@ -1,2 +0,0 @@ -CREATE INDEX idx_account_storage_map_latest_by_account_slot_key ON account_storage_map_values(account_id, slot_name, key, is_latest) WHERE is_latest = 1; -CREATE INDEX idx_account_vault_assets_latest_by_account_key ON account_vault_assets(account_id, vault_key, is_latest) WHERE is_latest = 1; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 803d532f0..74aa8ce3b 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -28,12 +28,12 @@ use tracing::{info, instrument}; use crate::COMPONENT; use crate::db::migrations::apply_migrations; use crate::db::models::conv::SqlTypeConvert; -use crate::db::models::queries::StorageMapValuesPage; pub use crate::db::models::queries::{ AccountCommitmentsPage, NullifiersPage, PublicAccountIdsPage, }; +use crate::db::models::queries::{BlockHeaderCommitment, StorageMapValuesPage}; use crate::db::models::{Page, queries}; use crate::errors::{DatabaseError, NoteSyncError}; use crate::genesis::GenesisBlock; @@ -266,7 +266,7 @@ impl Db { /// Open a connection to the DB and apply any pending migrations. #[instrument(target = COMPONENT, skip_all)] - pub async fn load(database_filepath: PathBuf) -> Result { + pub async fn load(database_filepath: PathBuf) -> Result { let db = miden_node_db::Db::new(&database_filepath)?; info!( target: COMPONENT, @@ -359,6 +359,16 @@ impl Db { .await } + /// Loads all the block headers from the DB. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_block_header_commitments(&self) -> Result> { + self.transact("all block headers", |conn| { + let raw = queries::select_all_block_header_commitments(conn)?; + Ok(raw) + }) + .await + } + /// Returns a page of account commitments for tree rebuilding. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account_commitments_paged( diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 3720729b1..2176ea0d4 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,12 +32,14 @@ on relevant platforms" )] +use miden_crypto::Word; +use miden_crypto::utils::Deserializable; use miden_protocol::Felt; use miden_protocol::account::{StorageSlotName, StorageSlotType}; -use miden_protocol::block::BlockNumber; +use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::NoteTag; -use crate::db::models::queries::NetworkAccountType; +use crate::db::models::queries::{BlockHeaderCommitment, NetworkAccountType}; #[derive(Debug, thiserror::Error)] #[error("failed to convert from database type {from_type} into {into_type}")] @@ -67,6 +69,32 @@ pub trait SqlTypeConvert: Sized { } } +impl SqlTypeConvert for BlockHeaderCommitment { + type Raw = Vec; + fn from_raw_sql( + raw: Self::Raw, + ) -> Result { + let inner = + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err)?; + Ok(BlockHeaderCommitment(inner)) + } + fn to_raw_sql(self) -> Self::Raw { + self.0.as_bytes().to_vec() + } +} + +impl SqlTypeConvert for BlockHeader { + type Raw = Vec; + + fn from_raw_sql(raw: Self::Raw) -> Result { + ::read_from_bytes(raw.as_slice()).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + miden_crypto::utils::Serializable::to_bytes(&self) + } +} + impl SqlTypeConvert for NetworkAccountType { type Raw = i32; diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index fa1e77e85..dd1ab9748 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -183,6 +183,7 @@ fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { block_headers::block_num.eq(i64::from(block_num.as_u32())), block_headers::block_header.eq(block_header.to_bytes()), block_headers::signature.eq(signature.to_bytes()), + block_headers::commitment.eq(block_header.commitment().to_bytes()), )) .execute(conn) .expect("Failed to insert block header"); diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 553430ddb..bfcd34ee7 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,6 +11,7 @@ use diesel::{ SelectableHelper, SqliteConnection, }; +use miden_crypto::Word; use miden_crypto::dsa::ecdsa_k256_keccak::Signature; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_protocol::block::{BlockHeader, BlockNumber}; @@ -125,6 +126,44 @@ pub fn select_all_block_headers( vec_raw_try_into(raw_block_headers) } +/// Select all block headers from the DB using the given [`SqliteConnection`]. +/// +/// # Returns +/// +/// A vector of [`BlockHeader`] or an error. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT commitment +/// FROM block_headers +/// ORDER BY block_num ASC +/// ``` +pub fn select_all_block_header_commitments( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + let raw_commitments = + QueryDsl::select(schema::block_headers::table, schema::block_headers::commitment) + .order(schema::block_headers::block_num.asc()) + .load::>(conn)?; + let commitments = + Result::from_iter(raw_commitments.into_iter().map(BlockHeaderCommitment::from_raw_sql))?; + Ok(commitments) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[repr(transparent)] +pub struct BlockHeaderCommitment(pub(crate) Word); + +impl BlockHeaderCommitment { + pub fn new(header: &BlockHeader) -> Self { + Self(header.commitment()) + } + pub fn word(self) -> Word { + self.0 + } +} + #[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] #[diesel(table_name = schema::block_headers)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] @@ -133,11 +172,18 @@ pub struct BlockHeaderRawRow { pub block_num: i64, pub block_header: Vec, pub signature: Vec, + pub commitment: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; fn try_into(self) -> Result { - let block_header = BlockHeader::read_from_bytes(&self.block_header[..])?; + let block_header = BlockHeader::from_raw_sql(self.block_header)?; + // we're bust if this invariant doesn't hold + debug_assert_eq!( + BlockHeaderCommitment::new(&block_header), + BlockHeaderCommitment::from_raw_sql(self.commitment) + .expect("Database always contains valid format commitments") + ); Ok(block_header) } } @@ -158,13 +204,15 @@ pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, pub signature: Vec, + pub commitment: Vec, } impl From<(&BlockHeader, &Signature)> for BlockHeaderInsert { - fn from(from: (&BlockHeader, &Signature)) -> Self { + fn from((header, signature): (&BlockHeader, &Signature)) -> Self { Self { - block_num: from.0.block_num().to_raw_sql(), - block_header: from.0.to_bytes(), - signature: from.1.to_bytes(), + block_num: header.block_num().to_raw_sql(), + block_header: header.to_bytes(), + signature: signature.to_bytes(), + commitment: BlockHeaderCommitment::new(header).to_raw_sql(), } } } diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 72bdcaea1..1095fc189 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -89,10 +89,9 @@ pub(crate) fn insert_transactions( block_num: BlockNumber, transactions: &OrderedTransactionHeaders, ) -> Result { - #[expect(clippy::into_iter_on_ref)] // false positive let rows: Vec<_> = transactions .as_slice() - .into_iter() + .iter() .map(|tx| TransactionSummaryRowInsert::new(tx, block_num)) .collect(); diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index 1ace2abaa..ef74e86fa 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -1,6 +1,6 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_protocol::note::Nullifier; -use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::utils::Serializable; use crate::errors::DatabaseError; @@ -14,16 +14,6 @@ pub(crate) fn vec_raw_try_into>( ) } -#[expect(dead_code)] -/// Deserialize an iterable container full of byte blobs `B` to types `T` -pub(crate) fn deserialize_raw_vec, T: Deserializable>( - raw: impl IntoIterator, -) -> Result, DeserializationError> { - Result::, DeserializationError>::from_iter( - raw.into_iter().map(|raw| T::read_from_bytes(raw.as_ref())), - ) -} - /// Utility to convert an iterable container to a vector of byte blobs pub(crate) fn serialize_vec<'a, D: Serializable + 'a>( raw: impl IntoIterator, diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index ebb8c280f..f93afc16e 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -48,6 +48,7 @@ diesel::table! { block_num -> BigInt, block_header -> Binary, signature -> Binary, + commitment -> Binary, } } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 61bbf3e99..a277f1c68 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -119,7 +119,7 @@ pub enum StateInitializationError { #[error("failed to load block store")] BlockStoreLoadError(#[source] std::io::Error), #[error("failed to load database")] - DatabaseLoadError(#[from] miden_node_db::DatabaseError), + DatabaseLoadError(#[source] DatabaseError), #[error("inner forest error")] InnerForestError(#[from] InnerForestError), #[error( diff --git a/crates/store/src/state/loader.rs b/crates/store/src/state/loader.rs index d237716f3..c8c886148 100644 --- a/crates/store/src/state/loader.rs +++ b/crates/store/src/state/loader.rs @@ -12,9 +12,10 @@ use std::future::Future; use std::num::NonZeroUsize; use std::path::Path; +use miden_crypto::merkle::mmr::Mmr; use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::NullifierTree; -use miden_protocol::block::{BlockHeader, BlockNumber, Blockchain}; +use miden_protocol::block::{BlockNumber, Blockchain}; #[cfg(not(feature = "rocksdb"))] use miden_protocol::crypto::merkle::smt::MemoryStorage; use miden_protocol::crypto::merkle::smt::{LargeSmt, LargeSmtError, SmtStorage}; @@ -30,6 +31,7 @@ use { use crate::COMPONENT; use crate::db::Db; +use crate::db::models::queries::BlockHeaderCommitment; use crate::errors::{DatabaseError, StateInitializationError}; use crate::inner_forest::InnerForest; @@ -331,16 +333,13 @@ pub fn load_smt(storage: S) -> Result, StateInitializ /// Loads the blockchain MMR from all block headers in the database. #[instrument(target = COMPONENT, skip_all)] pub async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); + let block_commitments = db.select_all_block_header_commitments().await?; // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX // entries. - let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + let chain_mmr = Blockchain::from_mmr_unchecked(Mmr::from( + block_commitments.iter().copied().map(BlockHeaderCommitment::word), + )); Ok(chain_mmr) }