diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 37bb1d1f21..0f4ba580ee 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -127,3 +127,22 @@ jobs: run: make check - name: Diff check run: git diff --exit-code + + check-features: + name: check all feature combinations + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v4 + - name: Cleanup large tools for build space + uses: ./.github/actions/cleanup-runner + - name: Install RocksDB + uses: ./.github/actions/install-rocksdb + - uses: Swatinem/rust-cache@v2 + with: + save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} + - name: Install rust + run: rustup update --no-self-update + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack + - name: Check all feature combinations + run: make check-features diff --git a/.github/workflows/publish-dry-run.yml b/.github/workflows/publish-dry-run.yml index fe6b15e879..c84a08d34e 100644 --- a/.github/workflows/publish-dry-run.yml +++ b/.github/workflows/publish-dry-run.yml @@ -29,7 +29,6 @@ jobs: run: sudo apt-get update && sudo apt-get install -y jq - name: Update Rust toolchain run: rustup update --no-self-update - - uses: Swatinem/rust-cache@v2 - uses: taiki-e/install-action@v2 with: tool: cargo-binstall@1.16.6 diff --git a/CHANGELOG.md b/CHANGELOG.md index 06b5def80a..08cc4ab76f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,7 +58,9 @@ - Fix race condition at DB shutdown in tests ([#1503](https://github.com/0xMiden/miden-node/pull/1503)). - [BREAKING] Updated to new miden-base protocol: removed `aux` and `execution_hint` from `NoteMetadata`, removed `NoteExecutionMode`, and `NoteMetadata::new()` is now infallible ([#1526](https://github.com/0xMiden/miden-node/pull/1526)). - [BREAKING] Network note queries now use full account ID instead of 30-bit prefix ([#1572](https://github.com/0xMiden/miden-node/pull/1572)). +- [BREAKING] Renamed `SyncStorageMaps` RPC endpoint to `SyncAccountStorageMaps` for consistency ([#1581](https://github.com/0xMiden/miden-node/pull/1581)). - Removed git information from node's `--version` CLI as it was often incorrect ([#1576](https://github.com/0xMiden/miden-node/pull/1576)). +- [BREAKING] Renamed `GetNetworkAccountDetailsByPrefix` endpoint to `GetNetworkAccountDetailsById` which now accepts full account ID instead of 30-bit prefix ([#1580](https://github.com/0xMiden/miden-node/pull/1580)). ### Fixes diff --git a/Makefile b/Makefile index 72bdbce492..64aa55bf4f 100644 --- a/Makefile +++ b/Makefile @@ -87,6 +87,10 @@ test: ## Runs all tests check: ## Check all targets and features for errors without code generation ${BUILD_PROTO} cargo check --all-features --all-targets --locked --workspace +.PHONY: check-features +check-features: ## Checks all feature combinations compile without warnings using cargo-hack + @scripts/check-features.sh + # --- building ------------------------------------------------------------------------------------ .PHONY: build diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index ac94f20b72..fa63c7b671 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -125,12 +125,12 @@ impl StoreClient { &self, account_id: NetworkAccountId, ) -> Result, StoreError> { - let request = proto::store::AccountIdPrefix { account_id_prefix: account_id.prefix() }; + let request = proto::account::AccountId::from(account_id.inner()); let store_response = self .inner .clone() - .get_network_account_details_by_prefix(request) + .get_network_account_details_by_id(request) .await? .into_inner() .details; diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index cf24e253f8..8e06d33699 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -416,12 +416,12 @@ pub struct AccountStorageMapDetails { /// /// When a storage map contains many entries (> [`AccountStorageMapDetails::MAX_RETURN_ENTRIES`]), /// returning all entries in a single RPC response creates performance issues. In such cases, -/// the `LimitExceeded` variant indicates to the client to use the `SyncStorageMaps` endpoint +/// the `LimitExceeded` variant indicates to the client to use the `SyncAccountStorageMaps` endpoint /// instead. #[derive(Debug, Clone, PartialEq, Eq)] pub enum StorageMapEntries { /// The map has too many entries to return inline. - /// Clients must use `SyncStorageMaps` endpoint instead. + /// Clients must use `SyncAccountStorageMaps` endpoint instead. LimitExceeded, /// All storage map entries (key-value pairs) without proofs. @@ -1098,18 +1098,6 @@ pub enum NetworkAccountError { NotNetworkAccount(AccountId), #[error("invalid network account attachment: {0}")] InvalidAttachment(#[source] NetworkAccountTargetError), - #[error("invalid network account prefix: {0}")] - InvalidPrefix(u32), -} - -/// Validates that a u32 represents a valid network account prefix. -/// -/// Network accounts have a 30-bit prefix (top 2 bits must be 0). -pub fn validate_network_account_prefix(prefix: u32) -> Result { - if prefix >> 30 != 0 { - return Err(NetworkAccountError::InvalidPrefix(prefix)); - } - Ok(prefix) } /// Gets the 30-bit prefix of the account ID. diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 3e3ef1d0d6..798a1d18e8 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -234,7 +234,7 @@ pub mod account_storage_details { #[prost(string, tag = "1")] pub slot_name: ::prost::alloc::string::String, /// True when the number of entries exceeds the response limit. - /// When set, clients should use the `SyncStorageMaps` endpoint. + /// When set, clients should use the `SyncAccountStorageMaps` endpoint. #[prost(bool, tag = "2")] pub too_many_entries: bool, /// The map entries (with or without proofs). Empty when too_many_entries is true. @@ -479,7 +479,7 @@ pub struct SyncStateResponse { /// Allows requesters to sync storage map values for specific public accounts within a block range, /// with support for cursor-based pagination to handle large storage maps. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncStorageMapsRequest { +pub struct SyncAccountStorageMapsRequest { /// Block range from which to start synchronizing. /// /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), @@ -491,7 +491,7 @@ pub struct SyncStorageMapsRequest { pub account_id: ::core::option::Option, } #[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStorageMapsResponse { +pub struct SyncAccountStorageMapsResponse { /// Pagination information. #[prost(message, optional, tag = "1")] pub pagination_info: ::core::option::Option, @@ -1041,11 +1041,11 @@ pub mod api_client { self.inner.unary(req, path, codec).await } /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_storage_maps( + pub async fn sync_account_storage_maps( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -1057,9 +1057,12 @@ pub mod api_client { ) })?; let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc.Api/SyncStorageMaps"); + let path = http::uri::PathAndQuery::from_static( + "/rpc.Api/SyncAccountStorageMaps", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncStorageMaps")); + req.extensions_mut() + .insert(GrpcMethod::new("rpc.Api", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } /// Returns transactions records for specific accounts within a block range. @@ -1266,11 +1269,11 @@ pub mod api_server { tonic::Status, >; /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_storage_maps( + async fn sync_account_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns transactions records for specific accounts within a block range. @@ -1948,25 +1951,25 @@ pub mod api_server { }; Box::pin(fut) } - "/rpc.Api/SyncStorageMaps" => { + "/rpc.Api/SyncAccountStorageMaps" => { #[allow(non_camel_case_types)] - struct SyncStorageMapsSvc(pub Arc); + struct SyncAccountStorageMapsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService - for SyncStorageMapsSvc { - type Response = super::SyncStorageMapsResponse; + > tonic::server::UnaryService + for SyncAccountStorageMapsSvc { + type Response = super::SyncAccountStorageMapsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_storage_maps(&inner, request).await + ::sync_account_storage_maps(&inner, request).await }; Box::pin(fut) } @@ -1977,7 +1980,7 @@ pub mod api_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStorageMapsSvc(inner); + let method = SyncAccountStorageMapsSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 4892b7b9c9..be9d1d6469 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -151,14 +151,7 @@ pub mod transaction_inputs { pub block_num: u32, } } -/// Account ID prefix. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountIdPrefix { - /// Account ID prefix. - #[prost(fixed32, tag = "1")] - pub account_id_prefix: u32, -} -/// Represents the result of getting network account details by prefix. +/// Represents the result of getting network account details by ID. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct MaybeAccountDetails { /// Account details. @@ -697,11 +690,13 @@ pub mod rpc_client { self.inner.unary(req, path, codec).await } /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_storage_maps( + pub async fn sync_account_storage_maps( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest< + super::super::rpc::SyncAccountStorageMapsRequest, + >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -714,10 +709,11 @@ pub mod rpc_client { })?; let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( - "/store.Rpc/SyncStorageMaps", + "/store.Rpc/SyncAccountStorageMaps", ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncStorageMaps")); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncAccountStorageMaps")); self.inner.unary(req, path, codec).await } /// Returns transactions records for specific accounts within a block range. @@ -886,11 +882,11 @@ pub mod rpc_server { tonic::Status, >; /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_storage_maps( + async fn sync_account_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns transactions records for specific accounts within a block range. @@ -1480,15 +1476,15 @@ pub mod rpc_server { }; Box::pin(fut) } - "/store.Rpc/SyncStorageMaps" => { + "/store.Rpc/SyncAccountStorageMaps" => { #[allow(non_camel_case_types)] - struct SyncStorageMapsSvc(pub Arc); + struct SyncAccountStorageMapsSvc(pub Arc); impl< T: Rpc, > tonic::server::UnaryService< - super::super::rpc::SyncStorageMapsRequest, - > for SyncStorageMapsSvc { - type Response = super::super::rpc::SyncStorageMapsResponse; + super::super::rpc::SyncAccountStorageMapsRequest, + > for SyncAccountStorageMapsSvc { + type Response = super::super::rpc::SyncAccountStorageMapsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1496,12 +1492,12 @@ pub mod rpc_server { fn call( &mut self, request: tonic::Request< - super::super::rpc::SyncStorageMapsRequest, + super::super::rpc::SyncAccountStorageMapsRequest, >, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::sync_storage_maps(&inner, request).await + ::sync_account_storage_maps(&inner, request).await }; Box::pin(fut) } @@ -1512,7 +1508,7 @@ pub mod rpc_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = SyncStorageMapsSvc(inner); + let method = SyncAccountStorageMapsSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( @@ -2406,10 +2402,10 @@ pub mod ntx_builder_client { .insert(GrpcMethod::new("store.NtxBuilder", "GetCurrentBlockchainData")); self.inner.unary(req, path, codec).await } - /// Returns the latest state of a network account with the specified account prefix. - pub async fn get_network_account_details_by_prefix( + /// Returns the latest state of a network account with the specified account ID. + pub async fn get_network_account_details_by_id( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< tonic::Response, tonic::Status, @@ -2424,15 +2420,12 @@ pub mod ntx_builder_client { })?; let codec = tonic_prost::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static( - "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix", + "/store.NtxBuilder/GetNetworkAccountDetailsById", ); let mut req = request.into_request(); req.extensions_mut() .insert( - GrpcMethod::new( - "store.NtxBuilder", - "GetNetworkAccountDetailsByPrefix", - ), + GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountDetailsById"), ); self.inner.unary(req, path, codec).await } @@ -2603,10 +2596,10 @@ pub mod ntx_builder_server { tonic::Response, tonic::Status, >; - /// Returns the latest state of a network account with the specified account prefix. - async fn get_network_account_details_by_prefix( + /// Returns the latest state of a network account with the specified account ID. + async fn get_network_account_details_by_id( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< tonic::Response, tonic::Status, @@ -2882,15 +2875,13 @@ pub mod ntx_builder_server { }; Box::pin(fut) } - "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { + "/store.NtxBuilder/GetNetworkAccountDetailsById" => { #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByPrefixSvc( - pub Arc, - ); + struct GetNetworkAccountDetailsByIdSvc(pub Arc); impl< T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByPrefixSvc { + > tonic::server::UnaryService + for GetNetworkAccountDetailsByIdSvc { type Response = super::MaybeAccountDetails; type Future = BoxFuture< tonic::Response, @@ -2898,11 +2889,11 @@ pub mod ntx_builder_server { >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { - ::get_network_account_details_by_prefix( + ::get_network_account_details_by_id( &inner, request, ) @@ -2917,7 +2908,7 @@ pub mod ntx_builder_server { let max_encoding_message_size = self.max_encoding_message_size; let inner = self.inner.clone(); let fut = async move { - let method = GetNetworkAccountDetailsByPrefixSvc(inner); + let method = GetNetworkAccountDetailsByIdSvc(inner); let codec = tonic_prost::ProstCodec::default(); let mut grpc = tonic::server::Grpc::new(codec) .apply_compression_config( diff --git a/crates/rpc/README.md b/crates/rpc/README.md index e3f1a6018d..4d3cf9387f 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -25,7 +25,7 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [SyncAccountVault](#SyncAccountVault) - [SyncNotes](#syncnotes) - [SyncState](#syncstate) -- [SyncStorageMaps](#syncstoragemaps) +- [SyncAccountStorageMaps](#syncaccountstoragemaps) - [SyncTransactions](#synctransactions) @@ -234,7 +234,7 @@ notes, client can make additional filtering of that data on its side. --- -### SyncStorageMaps +### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index 45e4bf8950..13d26962eb 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -204,13 +204,13 @@ impl api_server::Api for RpcService { self.store.clone().sync_state(request).await } - async fn sync_storage_maps( + async fn sync_account_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); - self.store.clone().sync_storage_maps(request).await + self.store.clone().sync_account_storage_maps(request).await } async fn sync_notes( diff --git a/crates/store/README.md b/crates/store/README.md index 57c002fe56..ea44889d04 100644 --- a/crates/store/README.md +++ b/crates/store/README.md @@ -55,7 +55,7 @@ The full gRPC API can be found [here](../../proto/proto/store.proto). - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) - [SyncState](#syncstate) -- [SyncStorageMaps](#syncstoragemaps) +- [SyncAccountStorageMaps](#syncaccountstoragemaps) - [SyncTransactions](#synctransactions) @@ -245,7 +245,7 @@ data contains excessive notes and nullifiers, client can make additional filteri --- -### SyncStorageMaps +### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index b3ca25d563..0858e71d10 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -13,14 +13,14 @@ CREATE TABLE account_codes ( ) WITHOUT ROWID; CREATE TABLE accounts ( - account_id BLOB NOT NULL, - network_account_id_prefix INTEGER NULL, -- 30-bit account ID prefix, only filled for network accounts + account_id BLOB NOT NULL, + network_account_type INTEGER NOT NULL, -- 0-not a network account, 1-network account block_num INTEGER NOT NULL, - account_commitment BLOB NOT NULL, + account_commitment BLOB NOT NULL, code_commitment BLOB, nonce INTEGER, - storage_header BLOB, -- Serialized AccountStorageHeader from miden-objects - vault_root BLOB, -- Vault root commitment + storage_header BLOB, -- Serialized AccountStorageHeader from miden-objects + vault_root BLOB, -- Vault root commitment is_latest BOOLEAN NOT NULL DEFAULT 0, -- Indicates if this is the latest state for this account_id created_at_block INTEGER NOT NULL, @@ -30,10 +30,11 @@ CREATE TABLE accounts ( (code_commitment IS NOT NULL AND nonce IS NOT NULL AND storage_header IS NOT NULL AND vault_root IS NOT NULL) OR (code_commitment IS NULL AND nonce IS NULL AND storage_header IS NULL AND vault_root IS NULL) - ) + ), + CONSTRAINT accounts_network_account_type_in_enum CHECK (network_account_type BETWEEN 0 AND 1) ) WITHOUT ROWID; -CREATE INDEX idx_accounts_network_prefix ON accounts(network_account_id_prefix) WHERE network_account_id_prefix IS NOT NULL; +CREATE INDEX idx_accounts_network_type ON accounts(network_account_type) WHERE network_account_type = 1; CREATE INDEX idx_accounts_id_block ON accounts(account_id, block_num DESC); CREATE INDEX idx_accounts_latest ON accounts(account_id, is_latest) WHERE is_latest = 1; CREATE INDEX idx_accounts_created_at_block ON accounts(created_at_block); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index ee7c722c8a..7fc4a5cab4 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -420,14 +420,14 @@ impl Db { .await } - /// Loads public account details from the DB based on the account ID's prefix. + /// Loads public account details for a network account by its full account ID. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_network_account_by_prefix( + pub async fn select_network_account_by_id( &self, - id_prefix: u32, + account_id: AccountId, ) -> Result> { - self.transact("Get account by id prefix", move |conn| { - queries::select_account_by_id_prefix(conn, id_prefix) + self.transact("Get network account by id", move |conn| { + queries::select_network_account_by_id(conn, account_id) }) .await } diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 4dcd012efa..2e6313bf61 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,12 +32,13 @@ on relevant platforms" )] -use miden_node_proto::domain::account::NetworkAccountId; use miden_protocol::Felt; use miden_protocol::account::{StorageSlotName, StorageSlotType}; use miden_protocol::block::BlockNumber; use miden_protocol::note::NoteTag; +use crate::db::models::queries::NetworkAccountType; + #[derive(Debug, thiserror::Error)] #[error("failed to convert from database type {from_type} into {into_type}")] pub struct DatabaseTypeConversionError { @@ -66,6 +67,29 @@ pub(crate) trait SqlTypeConvert: Sized { } } +impl SqlTypeConvert for NetworkAccountType { + type Raw = i32; + + fn to_raw_sql(self) -> Self::Raw { + match self { + NetworkAccountType::None => 0, + NetworkAccountType::Network => 1, + } + } + + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid network account type value {0}")] + struct ValueError(i32); + + match raw { + 0 => Ok(Self::None), + 1 => Ok(Self::Network), + other => Err(Self::map_err(ValueError(other))), + } + } +} + impl SqlTypeConvert for BlockNumber { type Raw = i64; @@ -78,12 +102,6 @@ impl SqlTypeConvert for BlockNumber { } } -/// Converts a network account ID to its 30-bit prefix for database indexing. -#[inline(always)] -pub(crate) fn network_account_id_to_prefix_sql(id: NetworkAccountId) -> i64 { - i64::from(id.prefix()) -} - impl SqlTypeConvert for NoteTag { type Raw = i32; diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 6568d5723e..1f4f675335 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -16,7 +16,6 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_node_proto as proto; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; use miden_node_utils::limiter::{ MAX_RESPONSE_PAYLOAD_BYTES, @@ -44,12 +43,7 @@ use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; use crate::COMPONENT; -use crate::db::models::conv::{ - SqlTypeConvert, - network_account_id_to_prefix_sql, - nonce_to_raw_sql, - raw_sql_to_nonce, -}; +use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; @@ -65,6 +59,18 @@ mod tests; type StorageMapValueRow = (i64, String, Vec, Vec); +// NETWORK ACCOUNT TYPE +// ================================================================================================ + +/// Classifies accounts for database storage based on whether they are network accounts. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum NetworkAccountType { + /// Not a network account. + None, + /// A network account. + Network, +} + // ACCOUNT CODE // ================================================================================================ @@ -204,11 +210,7 @@ fn select_full_account( Ok(Account::new(account_id, vault, storage, code, nonce, None)?) } -/// Select the latest account info by account ID prefix from the DB using the given -/// [`SqliteConnection`]. Meant to be used by the network transaction builder. -/// Because network notes get matched through accounts through the account's 30-bit prefix, it is -/// possible that multiple accounts match against a single prefix. In this scenario, the first -/// account is returned. +/// Select the latest account info for a network account by its full account ID. /// /// # Returns /// @@ -224,16 +226,18 @@ fn select_full_account( /// FROM /// accounts /// WHERE -/// network_account_id_prefix = ?1 +/// account_id = ?1 +/// AND network_account_type = 1 /// AND is_latest = 1 /// ``` -pub(crate) fn select_account_by_id_prefix( +pub(crate) fn select_network_account_by_id( conn: &mut SqliteConnection, - id_prefix: u32, + account_id: AccountId, ) -> Result, DatabaseError> { let maybe_summary = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::network_account_type.eq(NetworkAccountType::Network.to_raw_sql())) .filter(schema::accounts::is_latest.eq(true)) - .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) .get_result::(conn) .optional() .map_err(DatabaseError::Diesel)?; @@ -536,7 +540,10 @@ pub(crate) fn select_all_network_account_ids( let account_ids_raw: Vec<(Vec, i64)> = Box::new( QueryDsl::select( schema::accounts::table - .filter(schema::accounts::network_account_id_prefix.is_not_null()) + .filter( + schema::accounts::network_account_type + .eq(NetworkAccountType::Network.to_raw_sql()), + ) .filter(schema::accounts::is_latest.eq(true)), (schema::accounts::account_id, schema::accounts::created_at_block), ) @@ -923,18 +930,16 @@ pub(crate) fn upsert_accounts( accounts: &[BlockAccountUpdate], block_num: BlockNumber, ) -> Result { - use proto::domain::account::NetworkAccountId; - let mut count = 0; for update in accounts { let account_id = update.account_id(); let account_id_bytes = account_id.to_bytes(); let block_num_raw = block_num.to_raw_sql(); - let network_account_id = if account_id.is_network() { - Some(NetworkAccountId::try_from(account_id)?) + let network_account_type = if account_id.is_network() { + NetworkAccountType::Network } else { - None + NetworkAccountType::None }; // Preserve the original creation block when updating existing accounts. @@ -1062,7 +1067,7 @@ pub(crate) fn upsert_accounts( let account_value = AccountRowInsert { account_id: account_id_bytes, - network_account_id_prefix: network_account_id.map(network_account_id_to_prefix_sql), + network_account_type: network_account_type.to_raw_sql(), account_commitment: update.final_state_commitment().to_bytes(), block_num: block_num_raw, nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), @@ -1127,7 +1132,7 @@ pub(crate) struct AccountCodeRowInsert { #[diesel(table_name = schema::accounts)] pub(crate) struct AccountRowInsert { pub(crate) account_id: Vec, - pub(crate) network_account_id_prefix: Option, + pub(crate) network_account_type: i32, pub(crate) block_num: i64, pub(crate) account_commitment: Vec, pub(crate) code_commitment: Option>, diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 0132848929..0ae4b8e1e1 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -24,7 +24,7 @@ diesel::table! { diesel::table! { accounts (account_id, block_num) { account_id -> Binary, - network_account_id_prefix -> Nullable, + network_account_type -> Integer, account_commitment -> Binary, code_commitment -> Nullable, nonce -> Nullable, diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index df1f0fa653..6796505808 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -438,7 +438,7 @@ pub enum SyncAccountVaultError { // ================================================================================================ #[derive(Debug, Error, GrpcError)] -pub enum SyncStorageMapsError { +pub enum SyncAccountStorageMapsError { #[error("database error")] #[grpc(internal)] DatabaseError(#[from] DatabaseError), diff --git a/crates/store/src/inner_forest/mod.rs b/crates/store/src/inner_forest/mod.rs index 0a4bd00d62..b27b8ff3cf 100644 --- a/crates/store/src/inner_forest/mod.rs +++ b/crates/store/src/inner_forest/mod.rs @@ -89,59 +89,6 @@ impl InnerForest { *EmptySubtreeRoots::entry(SMT_DEPTH, 0) } - /// Retrieves the most recent vault SMT root for an account. - /// - /// Returns the latest vault root entry regardless of block number. - /// Used when applying incremental deltas where we always want the previous state. - /// - /// If no vault root is found for the account, returns an empty SMT root. - /// - /// # Arguments - /// - /// * `is_full_state` - If `true`, returns an empty SMT root (for new accounts or DB - /// reconstruction where delta values are absolute). If `false`, looks up the previous state - /// (for incremental updates where delta values are relative changes). - fn get_latest_vault_root(&self, account_id: AccountId, is_full_state: bool) -> Word { - if is_full_state { - return Self::empty_smt_root(); - } - self.vault_roots - .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) - } - - /// Retrieves the most recent storage map SMT root for an account slot. - /// - /// Returns the latest storage root entry regardless of block number. - /// Used when applying incremental deltas where we always want the previous state. - /// - /// If no storage root is found for the slot, returns an empty SMT root. - /// - /// # Arguments - /// - /// * `is_full_state` - If `true`, returns an empty SMT root (for new accounts or DB - /// reconstruction where delta values are absolute). If `false`, looks up the previous state - /// (for incremental updates where delta values are relative changes). - fn get_latest_storage_map_root( - &self, - account_id: AccountId, - slot_name: &StorageSlotName, - is_full_state: bool, - ) -> Word { - if is_full_state { - return Self::empty_smt_root(); - } - - self.storage_map_roots - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), - ) - .next_back() - .map_or_else(Self::empty_smt_root, |(_, root)| *root) - } - /// Retrieves a vault root for the specified account at or before the specified block. pub(crate) fn get_vault_root( &self, @@ -321,29 +268,94 @@ impl InnerForest { let account_id = delta.id(); let is_full_state = delta.is_full_state(); - if !delta.vault().is_empty() { - self.update_account_vault(block_num, account_id, delta.vault(), is_full_state)?; + if is_full_state { + self.insert_account_vault(block_num, account_id, delta.vault()); + } else if !delta.vault().is_empty() { + self.update_account_vault(block_num, account_id, delta.vault())?; } - if !delta.storage().is_empty() { - self.update_account_storage(block_num, account_id, delta.storage(), is_full_state); + if is_full_state { + self.insert_account_storage(block_num, account_id, delta.storage()); + } else if !delta.storage().is_empty() { + self.update_account_storage(block_num, account_id, delta.storage()); } + Ok(()) } - // PRIVATE METHODS + // ASSET VAULT DELTA PROCESSING // -------------------------------------------------------------------------------------------- - /// Updates the forest with vault changes from a delta. + /// Retrieves the most recent vault SMT root for an account. If no vault root is found for the + /// account, returns an empty SMT root. + fn get_latest_vault_root(&self, account_id: AccountId) -> Word { + self.vault_roots + .range((account_id, BlockNumber::GENESIS)..=(account_id, BlockNumber::from(u32::MAX))) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Inserts asset vault data into the forest for the specified account. Assumes that asset + /// vault for this account does not yet exist in the forest. + fn insert_account_vault( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + delta: &AccountVaultDelta, + ) { + // get the current vault root for the account, and make sure it is empty + let prev_root = self.get_latest_vault_root(account_id); + assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + + // if there are no assets in the vault, add a root of an empty SMT to the vault roots map + // so that the map has entries for all accounts, and then return (i.e., no need to insert + // anything into the forest) + if delta.is_empty() { + self.vault_roots.insert((account_id, block_num), prev_root); + return; + } + + let mut entries: Vec<(Word, Word)> = Vec::new(); + + // process fungible assets + for (faucet_id, amount_delta) in delta.fungible().iter() { + let amount = + (*amount_delta).try_into().expect("full-state amount should be non-negative"); + let asset = FungibleAsset::new(*faucet_id, amount).expect("valid faucet id"); + entries.push((asset.vault_key().into(), asset.into())); + } + + // process non-fungible assets + for (&asset, _action) in delta.non_fungible().iter() { + // TODO: assert that action is addition + entries.push((asset.vault_key().into(), asset.into())); + } + + assert!(!entries.is_empty(), "non-empty delta should contain entries"); + let num_entries = entries.len(); + + let new_root = self + .forest + .batch_insert(prev_root, entries) + .expect("forest insertion should succeed"); + + self.vault_roots.insert((account_id, block_num), new_root); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + vault_entries = num_entries, + "Inserted vault into forest" + ); + } + + /// Updates the forest with vault changes from a delta. The vault delta is assumed to be + /// non-empty. /// /// Processes both fungible and non-fungible asset changes, building entries for the vault SMT /// and tracking the new root. /// - /// # Arguments - /// - /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). - /// If `false`, delta values are relative changes applied to previous state. - /// /// # Errors /// /// Returns an error if applying a delta results in a negative balance. @@ -351,26 +363,25 @@ impl InnerForest { &mut self, block_num: BlockNumber, account_id: AccountId, - vault_delta: &AccountVaultDelta, - is_full_state: bool, + delta: &AccountVaultDelta, ) -> Result<(), InnerForestError> { - let prev_root = self.get_latest_vault_root(account_id, is_full_state); + assert!(!delta.is_empty(), "expected the delta not to be empty"); - let mut entries = Vec::new(); + // get the previous vault root; the root could be for an empty or non-empty SMT + let prev_root = self.get_latest_vault_root(account_id); + + let mut entries: Vec<(Word, Word)> = Vec::new(); // Process fungible assets - for (faucet_id, amount_delta) in vault_delta.fungible().iter() { + for (faucet_id, amount_delta) in delta.fungible().iter() { let key: Word = FungibleAsset::new(*faucet_id, 0).expect("valid faucet id").vault_key().into(); - let new_amount = if is_full_state { - // For full-state deltas, amount is the absolute value - (*amount_delta).try_into().expect("full-state amount should be non-negative") - } else { - // For partial deltas, amount is a change that must be applied to previous balance. + let new_amount = { + // amount delta is a change that must be applied to previous balance. // - // TODO: SmtForest only exposes `fn open()` which computes a full Merkle - // proof. We only need the leaf, so a direct `fn get()` method would be faster. + // TODO: SmtForest only exposes `fn open()` which computes a full Merkle proof. We + // only need the leaf, so a direct `fn get()` method would be faster. let prev_amount = self .forest .open(prev_root, key) @@ -391,16 +402,13 @@ impl InnerForest { let value = if new_amount == 0 { EMPTY_WORD } else { - let asset: Asset = FungibleAsset::new(*faucet_id, new_amount) - .expect("valid fungible asset") - .into(); - Word::from(asset) + FungibleAsset::new(*faucet_id, new_amount).expect("valid fungible asset").into() }; entries.push((key, value)); } // Process non-fungible assets - for (asset, action) in vault_delta.non_fungible().iter() { + for (asset, action) in delta.non_fungible().iter() { let value = match action { NonFungibleDeltaAction::Add => Word::from(Asset::NonFungible(*asset)), NonFungibleDeltaAction::Remove => EMPTY_WORD, @@ -408,86 +416,174 @@ impl InnerForest { entries.push((asset.vault_key().into(), value)); } - if entries.is_empty() { - return Ok(()); - } + assert!(!entries.is_empty(), "non-empty delta should contain entries"); + let num_entries = entries.len(); - let updated_root = self + let new_root = self .forest - .batch_insert(prev_root, entries.iter().copied()) + .batch_insert(prev_root, entries) .expect("forest insertion should succeed"); - self.vault_roots.insert((account_id, block_num), updated_root); + self.vault_roots.insert((account_id, block_num), new_root); tracing::debug!( target: crate::COMPONENT, %account_id, %block_num, - vault_entries = entries.len(), + vault_entries = num_entries, "Updated vault in forest" ); Ok(()) } - /// Updates the forest with storage map changes from a delta. - /// - /// Processes storage map slot deltas, building SMTs for each modified slot - /// and tracking the new roots and accumulated entries. + // STORAGE MAP DELTA PROCESSING + // -------------------------------------------------------------------------------------------- + + /// Retrieves the most recent storage map SMT root for an account slot. If no storage root is + /// found for the slot, returns an empty SMT root. + fn get_latest_storage_map_root( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + ) -> Word { + self.storage_map_roots + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..=(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next_back() + .map_or_else(Self::empty_smt_root, |(_, root)| *root) + } + + /// Retrieves the most recent entries in the specified storage map. If no storage map exists + /// returns an empty map. + fn get_latest_storage_map_entries( + &self, + account_id: AccountId, + slot_name: &StorageSlotName, + ) -> BTreeMap { + self.storage_entries + .range( + (account_id, slot_name.clone(), BlockNumber::GENESIS) + ..(account_id, slot_name.clone(), BlockNumber::from(u32::MAX)), + ) + .next_back() + .map(|(_, entries)| entries.clone()) + .unwrap_or_default() + } + + /// Inserts all storage maps from the provided storage delta into the forest. /// - /// # Arguments + /// Assumes that storage maps for the provided account are not in the forest already. + fn insert_account_storage( + &mut self, + block_num: BlockNumber, + account_id: AccountId, + delta: &AccountStorageDelta, + ) { + for (slot_name, map_delta) in delta.maps() { + // get the latest root for this map, and make sure the root is for an empty tree + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); + assert_eq!(prev_root, Self::empty_smt_root(), "account should not be in the forest"); + + // build a vector of entries and filter out any empty values; such values shouldn't + // be present in full-state deltas, but it is good to exclude them explicitly + let map_entries: Vec<(Word, Word)> = map_delta + .entries() + .iter() + .filter_map(|(&key, &value)| { + if value == EMPTY_WORD { + None + } else { + Some((Word::from(key), value)) + } + }) + .collect(); + + // if the delta is empty, make sure we create an entry in the storage map roots map, + // but no need to do anything else + if map_entries.is_empty() { + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), prev_root); + + continue; + } + + // insert the updates into the forest and update storage map roots map + let new_root = self + .forest + .batch_insert(prev_root, map_entries.iter().copied()) + .expect("forest insertion should succeed"); + + self.storage_map_roots + .insert((account_id, slot_name.clone(), block_num), new_root); + + assert!(map_entries.is_empty(), "a non-empty delta should have entries"); + let num_entries = map_entries.len(); + + // keep track of the state of storage map entries + // TODO: this is a temporary solution until the LargeSmtForest is implemented as + // tracking multiple versions of all storage maps will be prohibitively expensive + let map_entries = BTreeMap::from_iter(map_entries); + self.storage_entries + .insert((account_id, slot_name.clone(), block_num), map_entries); + + tracing::debug!( + target: crate::COMPONENT, + %account_id, + %block_num, + ?slot_name, + delta_entries = num_entries, + "Inserted storage map into forest" + ); + } + } + + /// Updates the forest with storage map changes from a delta. /// - /// * `is_full_state` - If `true`, delta values are absolute (new account or DB reconstruction). - /// If `false`, delta values are relative changes applied to previous state. + /// Processes storage map slot deltas, building SMTs for each modified slot and tracking the + /// new roots and accumulated entries. fn update_account_storage( &mut self, block_num: BlockNumber, account_id: AccountId, - storage_delta: &AccountStorageDelta, - is_full_state: bool, + delta: &AccountStorageDelta, ) { - for (slot_name, map_delta) in storage_delta.maps() { - let prev_root = self.get_latest_storage_map_root(account_id, slot_name, is_full_state); - - let delta_entries: Vec<_> = - map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); + assert!(!delta.is_empty(), "expected the delta not to be empty"); - if delta_entries.is_empty() { + for (slot_name, map_delta) in delta.maps() { + // map delta shouldn't be empty, but if it is for some reason, there is nothing to do + if map_delta.is_empty() { continue; } - let updated_root = self + // update the storage map tree in the forest and add an entry to the storage map roots + let prev_root = self.get_latest_storage_map_root(account_id, slot_name); + let delta_entries: Vec<(Word, Word)> = + map_delta.entries().iter().map(|(key, value)| ((*key).into(), *value)).collect(); + + let new_root = self .forest .batch_insert(prev_root, delta_entries.iter().copied()) .expect("forest insertion should succeed"); self.storage_map_roots - .insert((account_id, slot_name.clone(), block_num), updated_root); - - // Accumulate entries: start from previous block's entries or empty for full state - let mut accumulated_entries = if is_full_state { - BTreeMap::new() - } else { - self.storage_entries - .range( - (account_id, slot_name.clone(), BlockNumber::GENESIS) - ..(account_id, slot_name.clone(), block_num), - ) - .next_back() - .map(|(_, entries)| entries.clone()) - .unwrap_or_default() - }; + .insert((account_id, slot_name.clone(), block_num), new_root); - // Apply delta entries (insert or remove if value is EMPTY_WORD) + // merge the delta with the latest entries in the map + // TODO: this is a temporary solution until the LargeSmtForest is implemented as + // tracking multiple versions of all storage maps will be prohibitively expensive + let mut latest_entries = self.get_latest_storage_map_entries(account_id, slot_name); for (key, value) in &delta_entries { if *value == EMPTY_WORD { - accumulated_entries.remove(key); + latest_entries.remove(key); } else { - accumulated_entries.insert(*key, *value); + latest_entries.insert(*key, *value); } } self.storage_entries - .insert((account_id, slot_name.clone(), block_num), accumulated_entries); + .insert((account_id, slot_name.clone(), block_num), latest_entries); tracing::debug!( target: crate::COMPONENT, diff --git a/crates/store/src/inner_forest/tests.rs b/crates/store/src/inner_forest/tests.rs index 216ef42061..5991eb9e4b 100644 --- a/crates/store/src/inner_forest/tests.rs +++ b/crates/store/src/inner_forest/tests.rs @@ -164,39 +164,6 @@ fn test_incremental_vault_updates() { assert_ne!(root_1, root_2); } -#[test] -fn test_full_state_delta_starts_from_empty_root() { - let mut forest = InnerForest::new(); - let account_id = dummy_account(); - let faucet_id = dummy_faucet(); - let block_num = BlockNumber::GENESIS.child(); - - // Simulate a pre-existing vault state that should be ignored for full-state deltas - let mut vault_delta_pre = AccountVaultDelta::default(); - vault_delta_pre.add_asset(dummy_fungible_asset(faucet_id, 999)).unwrap(); - let delta_pre = - dummy_partial_delta(account_id, vault_delta_pre, AccountStorageDelta::default()); - forest.update_account(block_num, &delta_pre).unwrap(); - assert!(forest.vault_roots.contains_key(&(account_id, block_num))); - - // Now create a full-state delta at the same block - // A full-state delta should start from an empty root, not from the previous state - let asset = dummy_fungible_asset(faucet_id, 100); - let full_delta = dummy_full_state_delta(account_id, &[asset]); - - // Create a fresh forest to compare - let mut fresh_forest = InnerForest::new(); - fresh_forest.update_account(block_num, &full_delta).unwrap(); - let fresh_root = fresh_forest.vault_roots[&(account_id, block_num)]; - - // Update the original forest with the full-state delta - forest.update_account(block_num, &full_delta).unwrap(); - let updated_root = forest.vault_roots[&(account_id, block_num)]; - - // The full-state delta should produce the same root regardless of prior state - assert_eq!(updated_root, fresh_root); -} - #[test] fn test_vault_state_persists_across_blocks_without_changes() { // Regression test for issue #7: vault state should persist across blocks @@ -377,6 +344,55 @@ fn test_update_storage_map() { assert_ne!(storage_root, InnerForest::empty_smt_root()); } +#[test] +fn test_full_state_delta_with_empty_vault_records_root() { + // Regression test for issue #1581: full-state deltas with empty vaults must still record + // the vault root so that subsequent `get_vault_asset_witnesses` calls succeed. + // + // The network counter account from the network monitor has an empty vault (it only uses + // storage slots). Without this fix, `get_vault_asset_witnesses` fails with "root not found" + // because no vault root was ever recorded for the account. + use miden_protocol::account::{Account, AccountStorage}; + + let mut forest = InnerForest::new(); + let account_id = dummy_account(); + let block_num = BlockNumber::GENESIS.child(); + + // Create a full-state delta with an empty vault (like the network counter account). + let vault = AssetVault::new(&[]).unwrap(); + let storage = AccountStorage::new(vec![]).unwrap(); + let code = AccountCode::mock(); + let nonce = Felt::ONE; + let account = Account::new(account_id, vault, storage, code, nonce, None).unwrap(); + let full_delta = AccountDelta::try_from(account).unwrap(); + + // Sanity check: the vault delta should be empty. + assert!(full_delta.vault().is_empty()); + assert!(full_delta.is_full_state()); + + forest.update_account(block_num, &full_delta).unwrap(); + + // The vault root must be recorded even though the vault is empty. + assert!( + forest.vault_roots.contains_key(&(account_id, block_num)), + "vault root should be recorded for full-state deltas with empty vaults" + ); + + // Verify the recorded root is the empty SMT root. + let recorded_root = forest.vault_roots[&(account_id, block_num)]; + assert_eq!( + recorded_root, + InnerForest::empty_smt_root(), + "empty vault should have the empty SMT root" + ); + + // Verify `get_vault_asset_witnesses` succeeds (returns empty witnesses for empty keys). + let witnesses = forest + .get_vault_asset_witnesses(account_id, block_num, std::collections::BTreeSet::new()) + .expect("get_vault_asset_witnesses should succeed for accounts with empty vaults"); + assert!(witnesses.is_empty()); +} + #[test] fn test_storage_map_incremental_updates() { use std::collections::BTreeMap; diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 5f0fd764de..a0fefa0e7a 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -2,7 +2,7 @@ use std::collections::BTreeSet; use std::num::{NonZero, TryFromIntError}; use miden_crypto::merkle::smt::SmtProof; -use miden_node_proto::domain::account::{AccountInfo, validate_network_account_prefix}; +use miden_node_proto::domain::account::AccountInfo; use miden_node_proto::generated as proto; use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::store::ntx_builder_server; @@ -71,20 +71,14 @@ impl ntx_builder_server::NtxBuilder for StoreApi { Ok(Response::new(response)) } - async fn get_network_account_details_by_prefix( + async fn get_network_account_details_by_id( &self, - request: Request, + request: Request, ) -> Result, Status> { - let request = request.into_inner(); + let account_id = read_account_id::(Some(request.into_inner()))?; - // Validate that the call is for a valid network account prefix - let prefix = validate_network_account_prefix(request.account_id_prefix).map_err(|err| { - Status::invalid_argument( - err.as_report_context("request does not contain a valid network account prefix"), - ) - })?; let account_info: Option = - self.state.get_network_account_details_by_prefix(prefix).await?; + self.state.get_network_account_details_by_id(account_id).await?; Ok(Response::new(proto::store::MaybeAccountDetails { details: account_info.map(|acc| (&acc).into()), diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index 845855aa7d..1f9f19aecc 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -21,9 +21,9 @@ use crate::errors::{ GetNoteScriptByRootError, GetNotesByIdError, NoteSyncError, + SyncAccountStorageMapsError, SyncAccountVaultError, SyncNullifiersError, - SyncStorageMapsError, SyncTransactionsError, }; use crate::server::api::{ @@ -306,30 +306,30 @@ impl rpc_server::Rpc for StoreApi { /// Returns storage map updates for the specified account within a block range. /// /// Supports cursor-based pagination for large storage maps. - async fn sync_storage_maps( + async fn sync_account_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); - let account_id = read_account_id::(request.account_id)?; + let account_id = read_account_id::(request.account_id)?; if !account_id.is_public() { - Err(SyncStorageMapsError::AccountNotPublic(account_id))?; + Err(SyncAccountStorageMapsError::AccountNotPublic(account_id))?; } let chain_tip = self.state.latest_block_num().await; - let block_range = read_block_range::( + let block_range = read_block_range::( request.block_range, - "SyncStorageMapsRequest", + "SyncAccountStorageMapsRequest", )? - .into_inclusive_range::(&chain_tip)?; + .into_inclusive_range::(&chain_tip)?; let storage_maps_page = self .state .get_storage_map_sync_values(account_id, block_range) .await - .map_err(SyncStorageMapsError::from)?; + .map_err(SyncAccountStorageMapsError::from)?; let updates = storage_maps_page .values @@ -342,7 +342,7 @@ impl rpc_server::Rpc for StoreApi { }) .collect(); - Ok(Response::new(proto::rpc::SyncStorageMapsResponse { + Ok(Response::new(proto::rpc::SyncAccountStorageMapsResponse { pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: storage_maps_page.last_block_included.as_u32(), diff --git a/crates/store/src/state/mod.rs b/crates/store/src/state/mod.rs index b275f400a2..b584f37b4a 100644 --- a/crates/store/src/state/mod.rs +++ b/crates/store/src/state/mod.rs @@ -961,12 +961,12 @@ impl State { self.db.select_account(id).await } - /// Returns details for public (on-chain) network accounts. - pub async fn get_network_account_details_by_prefix( + /// Returns details for public (on-chain) network accounts by full account ID. + pub async fn get_network_account_details_by_id( &self, - id_prefix: u32, + account_id: AccountId, ) -> Result, DatabaseError> { - self.db.select_network_account_by_prefix(id_prefix).await + self.db.select_network_account_by_id(account_id).await } /// Returns network account IDs within the specified block range (based on account creation diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs index 7e67515296..fd4feadb11 100644 --- a/crates/utils/src/lru_cache.rs +++ b/crates/utils/src/lru_cache.rs @@ -3,7 +3,8 @@ use std::num::NonZeroUsize; use std::sync::Arc; use lru::LruCache as InnerCache; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; +use tracing::instrument; /// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across /// await points. @@ -22,11 +23,16 @@ where /// Retrieves a value from the cache. pub async fn get(&self, key: &K) -> Option { - self.0.lock().await.get(key).cloned() + self.lock().await.get(key).cloned() } /// Puts a value into the cache. pub async fn put(&self, key: K, value: V) { - self.0.lock().await.put(key, value); + self.lock().await.put(key, value); + } + + #[instrument(name = "lru.lock", skip_all)] + async fn lock(&self) -> MutexGuard<'_, InnerCache> { + self.0.lock().await } } diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs index 416b2beb92..c1cab190bd 100644 --- a/crates/validator/src/block_validation/mod.rs +++ b/crates/validator/src/block_validation/mod.rs @@ -4,6 +4,7 @@ use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_protocol::errors::ProposedBlockError; use miden_protocol::transaction::TransactionId; +use tracing::{Instrument, info_span}; use crate::server::ValidatedTransactions; @@ -31,9 +32,16 @@ pub async fn validate_block( validated_transactions: Arc, ) -> Result { // Check that all transactions in the proposed block have been validated + let verify_span = info_span!("verify_transactions"); for tx_header in proposed_block.transactions() { let tx_id = tx_header.id(); - if validated_transactions.get(&tx_id).await.is_none() { + // TODO: LruCache is a poor abstraction since it locks many times. + if validated_transactions + .get(&tx_id) + .instrument(verify_span.clone()) + .await + .is_none() + { return Err(BlockValidationError::TransactionNotValidated( tx_id, proposed_block.block_num(), @@ -45,7 +53,7 @@ pub async fn validate_block( let (header, _) = proposed_block.into_header_and_body()?; // Sign the header. - let signature = signer.sign(&header); + let signature = info_span!("sign_block").in_scope(|| signer.sign(&header)); Ok(signature) } diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index bab8b5d628..89d28d25de 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -10,6 +10,7 @@ use miden_node_proto_build::validator_api_descriptor; use miden_node_utils::ErrorReport; use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; +use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_node_utils::tracing::grpc::grpc_trace_fn; use miden_protocol::block::{BlockSigner, ProposedBlock}; use miden_protocol::transaction::{ @@ -24,6 +25,7 @@ use tokio_stream::wrappers::TcpListenerStream; use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; +use tracing::{Instrument, info_span}; use crate::COMPONENT; use crate::block_validation::validate_block; @@ -130,30 +132,34 @@ impl api_server::Api for ValidatorServer &self, request: tonic::Request, ) -> Result, tonic::Status> { - let request = request.into_inner(); - // Deserialize the transaction. - let proven_tx = - ProvenTransaction::read_from_bytes(&request.transaction).map_err(|err| { + let (tx, inputs) = info_span!("deserialize").in_scope(|| { + let request = request.into_inner(); + let tx = ProvenTransaction::read_from_bytes(&request.transaction).map_err(|err| { Status::invalid_argument(err.as_report_context("Invalid proven transaction")) })?; + let inputs = request + .transaction_inputs + .ok_or(Status::invalid_argument("Missing transaction inputs"))?; + let inputs = TransactionInputs::read_from_bytes(&inputs).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) + })?; - // Deserialize the transaction inputs. - let Some(tx_inputs) = request.transaction_inputs else { - return Err(Status::invalid_argument("Missing transaction inputs")); - }; - let tx_inputs = TransactionInputs::read_from_bytes(&tx_inputs).map_err(|err| { - Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) + Result::<_, tonic::Status>::Ok((tx, inputs)) })?; + tracing::Span::current().set_attribute("transaction.id", tx.id()); + // Validate the transaction. - let validated_tx_header = - validate_transaction(proven_tx, tx_inputs).await.map_err(|err| { - Status::invalid_argument(err.as_report_context("Invalid transaction")) - })?; + let validated_tx_header = validate_transaction(tx, inputs).await.map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction")) + })?; // Register the validated transaction. let tx_id = validated_tx_header.id(); - self.validated_transactions.put(tx_id, validated_tx_header).await; + self.validated_transactions + .put(tx_id, validated_tx_header) + .instrument(info_span!("validated_txs.insert")) + .await; Ok(tonic::Response::new(())) } @@ -163,15 +169,15 @@ impl api_server::Api for ValidatorServer &self, request: tonic::Request, ) -> Result, tonic::Status> { - let proposed_block_bytes = request.into_inner().proposed_block; + let proposed_block = info_span!("deserialize").in_scope(|| { + let proposed_block_bytes = request.into_inner().proposed_block; - // Deserialize the proposed block. - let proposed_block = ProposedBlock::read_from_bytes(&proposed_block_bytes).map_err(|err| { tonic::Status::invalid_argument(format!( "Failed to deserialize proposed block: {err}", )) - })?; + }) + })?; // Validate the block. let signature = @@ -182,7 +188,9 @@ impl api_server::Api for ValidatorServer })?; // Send the signature. - let response = proto::blockchain::BlockSignature { signature: signature.to_bytes() }; - Ok(tonic::Response::new(response)) + info_span!("serialize").in_scope(|| { + let response = proto::blockchain::BlockSignature { signature: signature.to_bytes() }; + Ok(tonic::Response::new(response)) + }) } } diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs index 95419c3927..20d610acaa 100644 --- a/crates/validator/src/tx_validation/mod.rs +++ b/crates/validator/src/tx_validation/mod.rs @@ -5,6 +5,7 @@ use miden_protocol::MIN_PROOF_SECURITY_LEVEL; use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; use miden_tx::auth::UnreachableAuth; use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; +use tracing::{Instrument, info_span}; // TRANSACTION VALIDATION ERROR // ================================================================================================ @@ -34,8 +35,10 @@ pub async fn validate_transaction( tx_inputs: TransactionInputs, ) -> Result { // First, verify the transaction proof - let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); - tx_verifier.verify(&proven_tx)?; + info_span!("verify").in_scope(|| { + let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); + tx_verifier.verify(&proven_tx) + })?; // Create a DataStore from the transaction inputs. let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); @@ -46,6 +49,7 @@ pub async fn validate_transaction( TransactionExecutor::new(&data_store); let executed_tx = executor .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) + .instrument(info_span!("execute")) .await?; // Validate that the executed transaction matches the submitted transaction. diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index 47706de3b3..b26e881313 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -23,7 +23,7 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [SyncAccountVault](#syncaccountvault) - [SyncNotes](#syncnotes) - [SyncState](#syncstate) -- [SyncStorageMaps](#syncstoragemaps) +- [SyncAccountStorageMaps](#syncaccountstoragemaps) - [SyncTransactions](#synctransactions) - [Status](#status) @@ -206,7 +206,7 @@ The low part of note tags are redacted to preserve some degree of privacy. Retur **Limits:** `account_id` (1000), `note_tag` (1000) -### SyncStorageMaps +### SyncAccountStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index c68e7b30a5..001dc40986 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -84,7 +84,7 @@ service Rpc { rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(rpc.SyncStorageMapsRequest) returns (rpc.SyncStorageMapsResponse) {} + rpc SyncAccountStorageMaps(rpc.SyncAccountStorageMapsRequest) returns (rpc.SyncAccountStorageMapsResponse) {} // Returns transactions records for specific accounts within a block range. rpc SyncTransactions(rpc.SyncTransactionsRequest) returns (rpc.SyncTransactionsResponse) {} @@ -261,8 +261,8 @@ service NtxBuilder { // header and peaks will be retrieved. rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} - // Returns the latest state of a network account with the specified account prefix. - rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} + // Returns the latest state of a network account with the specified account ID. + rpc GetNetworkAccountDetailsById(account.AccountId) returns (MaybeAccountDetails) {} // Returns a list of all network account ids. rpc GetNetworkAccountIds(rpc.BlockRange) returns (NetworkAccountIdList) {} @@ -280,16 +280,10 @@ service NtxBuilder { rpc GetStorageMapWitness(StorageMapWitnessRequest) returns (StorageMapWitnessResponse) {} } -// GET NETWORK ACCOUNT DETAILS BY PREFIX +// GET NETWORK ACCOUNT DETAILS BY ID // ================================================================================================ -// Account ID prefix. -message AccountIdPrefix { - // Account ID prefix. - fixed32 account_id_prefix = 1; -} - -// Represents the result of getting network account details by prefix. +// Represents the result of getting network account details by ID. message MaybeAccountDetails { // Account details. optional account.AccountDetails details = 1; diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index b0f1046f59..f521fc1c5f 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -100,7 +100,7 @@ service Api { rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} + rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} // Returns transactions records for specific accounts within a block range. rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} @@ -344,7 +344,7 @@ message AccountStorageDetails { string slot_name = 1; // True when the number of entries exceeds the response limit. - // When set, clients should use the `SyncStorageMaps` endpoint. + // When set, clients should use the `SyncAccountStorageMaps` endpoint. bool too_many_entries = 2; // The map entries (with or without proofs). Empty when too_many_entries is true. @@ -533,14 +533,14 @@ message SyncStateResponse { repeated note.NoteSyncRecord notes = 7; } -// SYNC STORAGE MAP +// SYNC ACCOUNT STORAGE MAP // ================================================================================================ // Storage map synchronization request. // // Allows requesters to sync storage map values for specific public accounts within a block range, // with support for cursor-based pagination to handle large storage maps. -message SyncStorageMapsRequest { +message SyncAccountStorageMapsRequest { // Block range from which to start synchronizing. // // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), @@ -551,7 +551,7 @@ message SyncStorageMapsRequest { account.AccountId account_id = 3; } -message SyncStorageMapsResponse { +message SyncAccountStorageMapsResponse { // Pagination information. PaginationInfo pagination_info = 1; diff --git a/scripts/check-features.sh b/scripts/check-features.sh new file mode 100755 index 0000000000..0b128a1855 --- /dev/null +++ b/scripts/check-features.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -euo pipefail + +# Script to check all feature combinations compile without warnings +# This script ensures that warnings are treated as errors for CI + +echo "Checking all feature combinations with cargo-hack..." + +# Set environment variables to treat warnings as errors and build protos +export RUSTFLAGS="-D warnings" +export BUILD_PROTO=1 + +# Run cargo-hack with comprehensive feature checking +cargo hack check \ + --workspace \ + --each-feature \ + --exclude-features default \ + --all-targets + +echo "All feature combinations compiled successfully!"